Compare commits

..

2 Commits

3 changed files with 43 additions and 23 deletions

View File

@@ -1,3 +1,11 @@
## 0.1.85.0-dev - 2026-04-01
- dev: bump to 0.1.84.5-dev settings.xml auf Kodi 19+ Format (version=1) migriert, Level-Filter fuer Expert/Advanced korrigiert
## 0.1.84.5-dev - 2026-03-31
- dev: bump to 0.1.84.0-dev SerienStream Sammlungen mit Poster/Plot, Session-Cache für Sammlungs-URLs
## 0.1.84.0-dev - 2026-03-16 ## 0.1.84.0-dev - 2026-03-16
- dev: bump to 0.1.83.5-dev Trakt Weiterschauen via watched/shows, Specials überspringen - dev: bump to 0.1.83.5-dev Trakt Weiterschauen via watched/shows, Specials überspringen

View File

@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='utf-8'?> <?xml version='1.0' encoding='utf-8'?>
<addon id="plugin.video.viewit" name="ViewIt" version="0.1.84.5-dev" provider-name="ViewIt"> <addon id="plugin.video.viewit" name="ViewIt" version="0.1.85.5-dev" provider-name="ViewIt">
<requires> <requires>
<import addon="xbmc.python" version="3.0.0" /> <import addon="xbmc.python" version="3.0.0" />
<import addon="script.module.requests" /> <import addon="script.module.requests" />

View File

@@ -505,6 +505,14 @@ def _strip_tags(value: str) -> str:
return re.sub(r"<[^>]+>", " ", value or "") return re.sub(r"<[^>]+>", " ", value or "")
def _clean_collection_title(title: str) -> str:
cleaned = "".join(
ch for ch in title
if unicodedata.category(ch) not in ("So", "Sm", "Sk", "Sc", "Cs", "Co", "Cn")
)
return re.sub(r"\s+", " ", cleaned).strip()
def _search_series_api(query: str) -> list[SeriesResult]: def _search_series_api(query: str) -> list[SeriesResult]:
query = (query or "").strip() query = (query or "").strip()
if not query: if not query:
@@ -1028,6 +1036,7 @@ class SerienstreamPlugin(BasisPlugin):
self._series_metadata_cache: dict[str, tuple[dict[str, str], dict[str, str]]] = {} self._series_metadata_cache: dict[str, tuple[dict[str, str], dict[str, str]]] = {}
self._series_metadata_full: set[str] = set() self._series_metadata_full: set[str] = set()
self._collection_url_cache: dict[str, str] = {} self._collection_url_cache: dict[str, str] = {}
self._collection_has_more: bool = False
self.is_available = True self.is_available = True
self.unavailable_reason: str | None = None self.unavailable_reason: str | None = None
if not self._requests_available: # pragma: no cover - optional dependency if not self._requests_available: # pragma: no cover - optional dependency
@@ -1255,34 +1264,35 @@ class SerienstreamPlugin(BasisPlugin):
return {"popular_series", "genres", "latest_episodes", "alpha", "collections"} return {"popular_series", "genres", "latest_episodes", "alpha", "collections"}
def collections(self) -> list[str]: def collections(self) -> list[str]:
"""Liefert alle Sammlungs-Namen von /sammlungen (alle Seiten).""" """Liefert Sammlungs-Namen von /sammlungen (Seite 1, für Paginierung)."""
return self._collections_page(1)
def _collections_page(self, page: int = 1) -> list[str]:
"""Liefert eine Seite mit Sammlungs-Namen von /sammlungen (paginiert)."""
if not self._requests_available: if not self._requests_available:
return [] return []
base = _get_base_url() base = _get_base_url()
names: list[str] = [] names: list[str] = []
url_map: dict[str, str] = {} url_map: dict[str, str] = {}
page = 1 url = f"{base}/sammlungen" if page == 1 else f"{base}/sammlungen?page={page}"
while True: soup = _get_soup_simple(url)
url = f"{base}/sammlungen" if page == 1 else f"{base}/sammlungen?page={page}" for a in soup.select('a[href*="/sammlung/"]'):
soup = _get_soup_simple(url) h2 = a.find("h2")
found = False if not h2:
for a in soup.select('a[href*="/sammlung/"]'): continue
h2 = a.find("h2") title = _clean_collection_title(h2.get_text(strip=True))
if not h2: href = (a.get("href") or "").strip()
continue if title and href:
title = h2.get_text(strip=True) url_map[title] = _absolute_url(href)
href = (a.get("href") or "").strip() names.append(title)
if title and href:
url_map[title] = _absolute_url(href)
names.append(title)
found = True
if not found:
break
if not soup.select(f'a[href*="/sammlungen?page={page + 1}"]'):
break
page += 1
if url_map: if url_map:
_session_cache_set("collection_urls", url_map) existing = _session_cache_get("collection_urls")
if isinstance(existing, dict):
existing.update(url_map)
_session_cache_set("collection_urls", existing)
else:
_session_cache_set("collection_urls", url_map)
names.sort(key=lambda t: t.casefold())
return names return names
def titles_for_collection(self, collection: str, page: int = 1) -> list[str]: def titles_for_collection(self, collection: str, page: int = 1) -> list[str]:
@@ -1297,6 +1307,7 @@ class SerienstreamPlugin(BasisPlugin):
return [] return []
if page > 1: if page > 1:
url = f"{url}?page={page}" url = f"{url}?page={page}"
base_url = self._collection_url_cache[collection]
soup = _get_soup_simple(url) soup = _get_soup_simple(url)
titles: list[str] = [] titles: list[str] = []
for a in soup.select('h6 a[href*="/serie/"]'): for a in soup.select('h6 a[href*="/serie/"]'):
@@ -1305,6 +1316,7 @@ class SerienstreamPlugin(BasisPlugin):
if title and href: if title and href:
self._remember_series_result(title, _absolute_url(href), "") self._remember_series_result(title, _absolute_url(href), "")
titles.append(title) titles.append(title)
self._collection_has_more = bool(soup.select(f'a[href*="?page={page + 1}"]'))
return titles return titles
def popular_series(self) -> list[str]: def popular_series(self) -> list[str]: