dev: umfangreiches Refactoring, Trakt-Integration und Code-Review-Fixes (0.1.69-dev)
Core & Architektur: - Neues Verzeichnis addon/core/ mit router.py, trakt.py, metadata.py, gui.py, playstate.py, plugin_manager.py, updater.py - Tests-Verzeichnis hinzugefügt (24 Tests, pytest + Coverage) Trakt-Integration: - OAuth Device Flow, Scrobbling, Watchlist, History, Calendar - Upcoming Episodes, Weiterschauen (Continue Watching) - Watched-Status in Episodenlisten - _trakt_find_in_plugins() mit 5-Min-Cache Serienstream-Suche: - API-Ergebnisse werden immer mit Katalog-Cache ergänzt (serverseitiges 10-Treffer-Limit) - Katalog-Cache wird beim Addon-Start im Daemon-Thread vorgewärmt - Notification nach Cache-Load via xbmc.executebuiltin() (thread-sicher) Bugfixes (Code-Review): - Race Condition auf _TRAKT_WATCHED_CACHE: _TRAKT_WATCHED_CACHE_LOCK hinzugefügt - GUI-Dialog aus Daemon-Thread: xbmcgui -> xbmc.executebuiltin() - ValueError in Trakt-Watchlist-Routen abgesichert - Token expires_at==0 Check korrigiert - get_setting_bool() Kontrollfluss in gui.py bereinigt - topstreamfilm_plugin: try-finally um xbmcvfs.File.close() Cleanup: - default.py.bak und refactor_router.py entfernt - .gitignore: /tests/ Eintrag entfernt - Type-Hints vereinheitlicht (Dict/List/Tuple -> dict/list/tuple)
This commit is contained in:
@@ -36,6 +36,8 @@ ADDON_ID = "plugin.video.viewit"
|
||||
SETTING_BASE_URL = "doku_streams_base_url"
|
||||
DEFAULT_BASE_URL = "https://doku-streams.com"
|
||||
MOST_VIEWED_PATH = "/meistgesehene/"
|
||||
RANDOM_PATH = "/zufaellige-doku/"
|
||||
TAGS_BASE_PATH = "/tag/"
|
||||
DEFAULT_TIMEOUT = 20
|
||||
GLOBAL_SETTING_LOG_URLS = "debug_log_urls"
|
||||
GLOBAL_SETTING_DUMP_HTML = "debug_dump_html"
|
||||
@@ -78,12 +80,12 @@ def _extract_last_page(soup: BeautifulSoupT) -> int:
|
||||
for anchor in soup.select("nav.navigation a[href], nav.pagination a[href], a.page-numbers[href]"):
|
||||
text = (anchor.get_text(" ", strip=True) or "").strip()
|
||||
for candidate in (text, (anchor.get("href") or "").strip()):
|
||||
for value in re.findall(r"/page/(\\d+)/", candidate):
|
||||
for value in re.findall(r"/page/(\d+)/", candidate):
|
||||
try:
|
||||
max_page = max(max_page, int(value))
|
||||
except Exception:
|
||||
continue
|
||||
for value in re.findall(r"(\\d+)", candidate):
|
||||
for value in re.findall(r"(\d+)", candidate):
|
||||
try:
|
||||
max_page = max(max_page, int(value))
|
||||
except Exception:
|
||||
@@ -287,7 +289,7 @@ class DokuStreamsPlugin(BasisPlugin):
|
||||
return _parse_listing_hits(soup, query=query)
|
||||
|
||||
def capabilities(self) -> set[str]:
|
||||
return {"genres", "popular_series"}
|
||||
return {"genres", "popular_series", "tags", "random"}
|
||||
|
||||
def _categories_url(self) -> str:
|
||||
return _absolute_url("/kategorien/")
|
||||
@@ -465,6 +467,90 @@ class DokuStreamsPlugin(BasisPlugin):
|
||||
return []
|
||||
return [title]
|
||||
|
||||
def tags(self) -> List[str]:
|
||||
"""Liefert Schlagworte/Tags von der Startseite."""
|
||||
if not self._requests_available:
|
||||
return []
|
||||
try:
|
||||
soup = _get_soup(_absolute_url("/"), session=get_requests_session("dokustreams", headers=HEADERS))
|
||||
except Exception:
|
||||
return []
|
||||
tag_list: list[str] = []
|
||||
for anchor in soup.select("a[href*='/tag/']"):
|
||||
name = (anchor.get_text(" ", strip=True) or "").strip()
|
||||
href = (anchor.get("href") or "").strip()
|
||||
if name and TAGS_BASE_PATH in href and name not in tag_list:
|
||||
tag_list.append(name)
|
||||
return sorted(tag_list, key=lambda t: t.casefold())
|
||||
|
||||
def titles_for_tag(self, tag: str, page: int = 1) -> List[str]:
|
||||
"""Liefert Titel zu einem Schlagwort."""
|
||||
tag = (tag or "").strip()
|
||||
if not tag or not self._requests_available:
|
||||
return []
|
||||
page = max(1, int(page or 1))
|
||||
slug = tag.lower().replace(" ", "-")
|
||||
base = _absolute_url(f"{TAGS_BASE_PATH}{slug}/")
|
||||
url = base if page == 1 else f"{base}page/{page}/"
|
||||
try:
|
||||
soup = _get_soup(url, session=get_requests_session("dokustreams", headers=HEADERS))
|
||||
except Exception:
|
||||
return []
|
||||
hits = _parse_listing_hits(soup)
|
||||
self._title_to_url.update({hit.title: hit.url for hit in hits if hit.title and hit.url})
|
||||
for hit in hits:
|
||||
if hit.title:
|
||||
self._title_meta[hit.title] = (hit.plot, hit.poster)
|
||||
return [hit.title for hit in hits if hit.title]
|
||||
|
||||
def random_title(self) -> Optional[str]:
|
||||
"""Liefert einen zufaelligen Doku-Titel via Redirect."""
|
||||
if not self._requests_available:
|
||||
return None
|
||||
try:
|
||||
session = get_requests_session("dokustreams", headers=HEADERS)
|
||||
resp = session.get(_absolute_url(RANDOM_PATH), headers=HEADERS,
|
||||
timeout=DEFAULT_TIMEOUT, allow_redirects=True)
|
||||
resp.raise_for_status()
|
||||
final_url = (resp.url or "").strip()
|
||||
if not final_url or final_url.rstrip("/").endswith(RANDOM_PATH.rstrip("/")):
|
||||
return None
|
||||
soup = _get_soup(final_url, session=session)
|
||||
hits = _parse_listing_hits(soup)
|
||||
if not hits:
|
||||
# Einzelseite: Titel aus H1 oder og:title lesen
|
||||
h1 = soup.select_one("h1.entry-title, h1")
|
||||
title = (h1.get_text(" ", strip=True) if h1 else "").strip()
|
||||
if title:
|
||||
self._title_to_url[title] = final_url
|
||||
return title
|
||||
return None
|
||||
hit = hits[0]
|
||||
if hit.title:
|
||||
self._title_to_url[hit.title] = hit.url
|
||||
return hit.title
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
def resolve_stream_link(self, link: str) -> Optional[str]:
|
||||
"""Folgt Redirects und versucht ResolveURL fuer Hoster-Links."""
|
||||
if not link:
|
||||
return None
|
||||
from plugin_helpers import resolve_via_resolveurl
|
||||
resolved = resolve_via_resolveurl(link, fallback_to_link=False)
|
||||
if resolved:
|
||||
return resolved
|
||||
if self._requests_available:
|
||||
try:
|
||||
session = get_requests_session("dokustreams", headers=HEADERS)
|
||||
resp = session.get(link, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
|
||||
resp.raise_for_status()
|
||||
return (resp.url or link).strip() or link
|
||||
except Exception:
|
||||
pass
|
||||
return link
|
||||
|
||||
def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]:
|
||||
title = (title or "").strip()
|
||||
if not title:
|
||||
|
||||
Reference in New Issue
Block a user