dev: bump to 0.1.84.5-dev SerienStream Sammlungen mit Poster/Plot, Session-Cache für Sammlungs-URLs

This commit is contained in:
2026-03-31 20:57:51 +02:00
parent 938f6c0e3d
commit e5d93e3af6
4 changed files with 86 additions and 8 deletions

View File

@@ -1027,6 +1027,7 @@ class SerienstreamPlugin(BasisPlugin):
self._latest_hoster_cache: dict[str, list[str]] = {}
self._series_metadata_cache: dict[str, tuple[dict[str, str], dict[str, str]]] = {}
self._series_metadata_full: set[str] = set()
self._collection_url_cache: dict[str, str] = {}
self.is_available = True
self.unavailable_reason: str | None = None
if not self._requests_available: # pragma: no cover - optional dependency
@@ -1251,7 +1252,60 @@ class SerienstreamPlugin(BasisPlugin):
def capabilities(self) -> set[str]:
"""Meldet unterstützte Features für Router-Menüs."""
return {"popular_series", "genres", "latest_episodes", "alpha"}
return {"popular_series", "genres", "latest_episodes", "alpha", "collections"}
def collections(self) -> list[str]:
"""Liefert alle Sammlungs-Namen von /sammlungen (alle Seiten)."""
if not self._requests_available:
return []
base = _get_base_url()
names: list[str] = []
url_map: dict[str, str] = {}
page = 1
while True:
url = f"{base}/sammlungen" if page == 1 else f"{base}/sammlungen?page={page}"
soup = _get_soup_simple(url)
found = False
for a in soup.select('a[href*="/sammlung/"]'):
h2 = a.find("h2")
if not h2:
continue
title = h2.get_text(strip=True)
href = (a.get("href") or "").strip()
if title and href:
url_map[title] = _absolute_url(href)
names.append(title)
found = True
if not found:
break
if not soup.select(f'a[href*="/sammlungen?page={page + 1}"]'):
break
page += 1
if url_map:
_session_cache_set("collection_urls", url_map)
return names
def titles_for_collection(self, collection: str, page: int = 1) -> list[str]:
"""Liefert Serien-Titel einer Sammlung (paginiert)."""
if not self._requests_available:
return []
url_map = _session_cache_get("collection_urls")
if isinstance(url_map, dict):
self._collection_url_cache.update(url_map)
url = self._collection_url_cache.get(collection)
if not url:
return []
if page > 1:
url = f"{url}?page={page}"
soup = _get_soup_simple(url)
titles: list[str] = []
for a in soup.select('h6 a[href*="/serie/"]'):
title = a.get_text(strip=True)
href = (a.get("href") or "").strip()
if title and href:
self._remember_series_result(title, _absolute_url(href), "")
titles.append(title)
return titles
def popular_series(self) -> list[str]:
"""Liefert die Titel der beliebten Serien (Quelle: `/beliebte-serien`)."""