dev: umfangreiches Refactoring, Trakt-Integration und Code-Review-Fixes (0.1.69-dev)

Core & Architektur:
- Neues Verzeichnis addon/core/ mit router.py, trakt.py, metadata.py,
  gui.py, playstate.py, plugin_manager.py, updater.py
- Tests-Verzeichnis hinzugefügt (24 Tests, pytest + Coverage)

Trakt-Integration:
- OAuth Device Flow, Scrobbling, Watchlist, History, Calendar
- Upcoming Episodes, Weiterschauen (Continue Watching)
- Watched-Status in Episodenlisten
- _trakt_find_in_plugins() mit 5-Min-Cache

Serienstream-Suche:
- API-Ergebnisse werden immer mit Katalog-Cache ergänzt (serverseitiges 10-Treffer-Limit)
- Katalog-Cache wird beim Addon-Start im Daemon-Thread vorgewärmt
- Notification nach Cache-Load via xbmc.executebuiltin() (thread-sicher)

Bugfixes (Code-Review):
- Race Condition auf _TRAKT_WATCHED_CACHE: _TRAKT_WATCHED_CACHE_LOCK hinzugefügt
- GUI-Dialog aus Daemon-Thread: xbmcgui -> xbmc.executebuiltin()
- ValueError in Trakt-Watchlist-Routen abgesichert
- Token expires_at==0 Check korrigiert
- get_setting_bool() Kontrollfluss in gui.py bereinigt
- topstreamfilm_plugin: try-finally um xbmcvfs.File.close()

Cleanup:
- default.py.bak und refactor_router.py entfernt
- .gitignore: /tests/ Eintrag entfernt
- Type-Hints vereinheitlicht (Dict/List/Tuple -> dict/list/tuple)
This commit is contained in:
2026-03-01 18:23:45 +01:00
parent 73f07d20b4
commit 7b60b00c8b
36 changed files with 4765 additions and 672 deletions

View File

@@ -1024,7 +1024,7 @@ class AniworldPlugin(BasisPlugin):
_session_cache_set(self._season_episodes_cache_name(season_url), payload)
def capabilities(self) -> set[str]:
return {"popular_series", "genres", "latest_episodes"}
return {"popular_series", "genres", "latest_episodes", "latest_titles"}
def _find_series_by_title(self, title: str) -> Optional[SeriesResult]:
title = (title or "").strip()
@@ -1277,6 +1277,100 @@ class AniworldPlugin(BasisPlugin):
self._save_title_url_cache()
return [entry.title for entry in entries if entry.title]
def _genre_slug(self, genre: str) -> str:
"""Wandelt einen Genre-Namen in einen URL-Slug um."""
slug = (genre or "").strip().lower()
slug = re.sub(r"[^a-z0-9]+", "-", slug).strip("-")
return slug
def _genre_page_url(self, genre: str, page: int) -> str:
slug = self._genre_slug(genre)
base = f"{_get_base_url()}/genre/{slug}"
return base if page <= 1 else f"{base}?page={page}"
def _parse_genre_page_titles(self, soup: BeautifulSoupT) -> List[str]:
"""Extrahiert Titel von einer paginierten Genre-Seite."""
titles: List[str] = []
seen: set[str] = set()
for anchor in soup.select("div.seriesListContainer a[href], ul.seriesList li a[href], a[href*='/anime/stream/']"):
href = (anchor.get("href") or "").strip()
if not href or "/staffel-" in href or "/episode-" in href:
continue
title = (anchor.get_text(" ", strip=True) or "").strip()
if not title:
continue
key = title.casefold()
if key in seen:
continue
seen.add(key)
url = _absolute_url(href)
self._remember_anime_result(title, url, persist=False)
titles.append(title)
return titles
def _extract_genre_last_page(self, soup: BeautifulSoupT) -> int:
max_page = 1
for anchor in soup.select("a.page-link[href], nav a[href]"):
href = (anchor.get("href") or "").strip()
for match in re.findall(r"[?&]page=(\d+)", href):
try:
max_page = max(max_page, int(match))
except Exception:
continue
return max_page
def titles_for_genre_page(self, genre: str, page: int = 1) -> List[str]:
"""Liefert Titel einer Genre-Seite (paginiert)."""
genre = (genre or "").strip()
if not genre or not self._requests_available:
return []
page = max(1, int(page or 1))
try:
url = self._genre_page_url(genre, page)
soup = _get_soup_simple(url)
return self._parse_genre_page_titles(soup)
except Exception:
return []
def genre_page_count(self, genre: str) -> int:
"""Liefert die Seitenanzahl fuer eine Genre-Seite."""
genre = (genre or "").strip()
if not genre or not self._requests_available:
return 1
try:
url = self._genre_page_url(genre, 1)
soup = _get_soup_simple(url)
return max(1, self._extract_genre_last_page(soup))
except Exception:
return 1
def latest_titles(self, page: int = 1) -> List[str]:
"""Liefert neu hinzugefuegte Anime vom Animekalender."""
if not self._requests_available:
return []
page = max(1, int(page or 1))
try:
url = f"{_get_base_url()}/animekalender"
if page > 1:
url = f"{url}?page={page}"
soup = _get_soup_simple(url)
titles: List[str] = []
seen: set[str] = set()
for anchor in soup.select("a[href*='/anime/stream/']"):
title = (anchor.get_text(" ", strip=True) or "").strip()
href = (anchor.get("href") or "").strip()
if not title or "/staffel-" in href or "/episode-" in href:
continue
key = title.casefold()
if key in seen:
continue
seen.add(key)
self._remember_anime_result(title, _absolute_url(href), persist=False)
titles.append(title)
return titles
except Exception:
return []
def _season_label(self, number: int) -> str:
return f"Staffel {number}"