Compare commits

...

8 Commits

28 changed files with 1820 additions and 1098 deletions

View File

@@ -2,41 +2,37 @@
<img src="addon/resources/logo.png" alt="ViewIT Logo" width="220" /> <img src="addon/resources/logo.png" alt="ViewIT Logo" width="220" />
ViewIT ist ein KodiAddon zum Durchsuchen und Abspielen von Inhalten der unterstützten Anbieter. ViewIT ist ein Kodi Addon.
Es durchsucht Provider und startet Streams.
## Projektstruktur ## Projektstruktur
- `addon/` KodiAddon Quellcode - `addon/` Kodi Addon Quellcode
- `scripts/` BuildScripts (arbeiten mit `addon/` + `dist/`) - `scripts/` Build Scripts
- `dist/` BuildAusgaben (ZIPs) - `dist/` Build Ausgaben
- `docs/`, `tests/` - `docs/` Doku
- `tests/` Tests
## Build & Release ## Build und Release
- AddonOrdner bauen: `./scripts/build_install_addon.sh``dist/<addon_id>/` - Addon Ordner bauen: `./scripts/build_install_addon.sh`
- KodiZIP bauen: `./scripts/build_kodi_zip.sh``dist/<addon_id>-<version>.zip` - Kodi ZIP bauen: `./scripts/build_kodi_zip.sh`
- AddonVersion in `addon/addon.xml` - Version pflegen: `addon/addon.xml`
- Reproduzierbare ZIPs: optional `SOURCE_DATE_EPOCH` setzen - Reproduzierbares ZIP: `SOURCE_DATE_EPOCH` optional setzen
## Lokales Kodi-Repository ## Lokales Kodi Repository
- Repository bauen (inkl. ZIPs + `addons.xml` + `addons.xml.md5`): `./scripts/build_local_kodi_repo.sh` - Repository bauen: `./scripts/build_local_kodi_repo.sh`
- Lokal bereitstellen: `./scripts/serve_local_kodi_repo.sh` - Repository starten: `./scripts/serve_local_kodi_repo.sh`
- Standard-URL: `http://127.0.0.1:8080/repo/addons.xml` - Standard URL: `http://127.0.0.1:8080/repo/addons.xml`
- Optional eigene URL beim Build setzen: `REPO_BASE_URL=http://<host>:<port>/repo ./scripts/build_local_kodi_repo.sh` - Eigene URL beim Build: `REPO_BASE_URL=http://<host>:<port>/repo ./scripts/build_local_kodi_repo.sh`
## Gitea Release-Asset Upload ## Entwicklung
- ZIP bauen: `./scripts/build_kodi_zip.sh` - Router: `addon/default.py`
- Token setzen: `export GITEA_TOKEN=<token>`
- Asset an Tag hochladen (erstellt Release bei Bedarf): `./scripts/publish_gitea_release.sh`
- Optional: `--tag v0.1.50 --asset dist/plugin.video.viewit-0.1.50.zip`
## Entwicklung (kurz)
- Hauptlogik: `addon/default.py`
- Plugins: `addon/plugins/*_plugin.py` - Plugins: `addon/plugins/*_plugin.py`
- Einstellungen: `addon/resources/settings.xml` - Settings: `addon/resources/settings.xml`
## Tests mit Abdeckung ## Tests
- Dev-Abhängigkeiten installieren: `./.venv/bin/pip install -r requirements-dev.txt` - Dev Pakete installieren: `./.venv/bin/pip install -r requirements-dev.txt`
- Tests + Coverage starten: `./.venv/bin/pytest` - Tests starten: `./.venv/bin/pytest`
- Optional (XML-Report): `./.venv/bin/pytest --cov-report=xml` - XML Report: `./.venv/bin/pytest --cov-report=xml`
## Dokumentation ## Dokumentation
Siehe `docs/`. Siehe `docs/`.

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version='1.0' encoding='utf-8'?>
<addon id="plugin.video.viewit" name="ViewIt" version="0.1.52" provider-name="ViewIt"> <addon id="plugin.video.viewit" name="ViewIt" version="0.1.57" provider-name="ViewIt">
<requires> <requires>
<import addon="xbmc.python" version="3.0.0" /> <import addon="xbmc.python" version="3.0.0" />
<import addon="script.module.requests" /> <import addon="script.module.requests" />
@@ -10,12 +10,12 @@
<provides>video</provides> <provides>video</provides>
</extension> </extension>
<extension point="xbmc.addon.metadata"> <extension point="xbmc.addon.metadata">
<summary>ViewIt Kodi Plugin</summary> <summary>Suche und Wiedergabe fuer mehrere Quellen</summary>
<description>Streaming-Addon für Streamingseiten: Suche, Staffeln/Episoden und Wiedergabe.</description> <description>Findet Titel in unterstuetzten Quellen und startet Filme oder Episoden direkt in Kodi.</description>
<assets> <assets>
<icon>icon.png</icon> <icon>icon.png</icon>
</assets> </assets>
<license>GPL-3.0-or-later</license> <license>GPL-3.0-or-later</license>
<platform>all</platform> <platform>all</platform>
</extension> </extension>
</addon> </addon>

File diff suppressed because it is too large Load Diff

View File

@@ -32,3 +32,12 @@ def get_requests_session(key: str, *, headers: Optional[dict[str, str]] = None):
pass pass
return session return session
def close_all_sessions() -> None:
"""Close and clear all pooled sessions."""
for session in list(_SESSIONS.values()):
try:
session.close()
except Exception:
pass
_SESSIONS.clear()

93
addon/metadata_utils.py Normal file
View File

@@ -0,0 +1,93 @@
from __future__ import annotations
import re
from plugin_interface import BasisPlugin
from tmdb import TmdbCastMember
METADATA_MODE_AUTO = 0
METADATA_MODE_SOURCE = 1
METADATA_MODE_TMDB = 2
METADATA_MODE_MIX = 3
def metadata_setting_id(plugin_name: str) -> str:
safe = re.sub(r"[^a-z0-9]+", "_", (plugin_name or "").strip().casefold()).strip("_")
return f"{safe}_metadata_source" if safe else "metadata_source"
def plugin_supports_metadata(plugin: BasisPlugin) -> bool:
try:
return plugin.__class__.metadata_for is not BasisPlugin.metadata_for
except Exception:
return False
def metadata_policy(
plugin_name: str,
plugin: BasisPlugin,
*,
allow_tmdb: bool,
get_setting_int=None,
) -> tuple[bool, bool, bool]:
if not callable(get_setting_int):
return plugin_supports_metadata(plugin), allow_tmdb, bool(getattr(plugin, "prefer_source_metadata", False))
mode = get_setting_int(metadata_setting_id(plugin_name), default=METADATA_MODE_AUTO)
supports_source = plugin_supports_metadata(plugin)
if mode == METADATA_MODE_SOURCE:
return supports_source, False, True
if mode == METADATA_MODE_TMDB:
return False, allow_tmdb, False
if mode == METADATA_MODE_MIX:
return supports_source, allow_tmdb, True
prefer_source = bool(getattr(plugin, "prefer_source_metadata", False))
return supports_source, allow_tmdb, prefer_source
def collect_plugin_metadata(
plugin: BasisPlugin,
titles: list[str],
) -> dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember] | None]]:
getter = getattr(plugin, "metadata_for", None)
if not callable(getter):
return {}
collected: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember] | None]] = {}
for title in titles:
try:
labels, art, cast = getter(title)
except Exception:
continue
if isinstance(labels, dict) or isinstance(art, dict) or cast:
label_map = {str(k): str(v) for k, v in dict(labels or {}).items() if v}
art_map = {str(k): str(v) for k, v in dict(art or {}).items() if v}
collected[title] = (label_map, art_map, cast if isinstance(cast, list) else None)
return collected
def needs_tmdb(labels: dict[str, str], art: dict[str, str], *, want_plot: bool, want_art: bool) -> bool:
if want_plot and not labels.get("plot"):
return True
if want_art and not (art.get("thumb") or art.get("poster") or art.get("fanart") or art.get("landscape")):
return True
return False
def merge_metadata(
title: str,
tmdb_labels: dict[str, str] | None,
tmdb_art: dict[str, str] | None,
tmdb_cast: list[TmdbCastMember] | None,
plugin_meta: tuple[dict[str, str], dict[str, str], list[TmdbCastMember] | None] | None,
) -> tuple[dict[str, str], dict[str, str], list[TmdbCastMember] | None]:
labels = dict(tmdb_labels or {})
art = dict(tmdb_art or {})
cast = tmdb_cast
if plugin_meta is not None:
meta_labels, meta_art, meta_cast = plugin_meta
labels.update({k: str(v) for k, v in dict(meta_labels or {}).items() if v})
art.update({k: str(v) for k, v in dict(meta_art or {}).items() if v})
if meta_cast is not None:
cast = meta_cast
if "title" not in labels:
labels["title"] = title
return labels, art, cast

View File

@@ -4,7 +4,7 @@
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Set, Tuple from typing import Any, Callable, Dict, List, Optional, Set, Tuple
class BasisPlugin(ABC): class BasisPlugin(ABC):
@@ -15,7 +15,11 @@ class BasisPlugin(ABC):
prefer_source_metadata: bool = False prefer_source_metadata: bool = False
@abstractmethod @abstractmethod
async def search_titles(self, query: str) -> List[str]: async def search_titles(
self,
query: str,
progress_callback: Optional[Callable[[str, Optional[int]], Any]] = None,
) -> List[str]:
"""Liefert eine Liste aller Treffer fuer die Suche.""" """Liefert eine Liste aller Treffer fuer die Suche."""
@abstractmethod @abstractmethod

View File

@@ -9,7 +9,7 @@ Zum Verwenden:
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, List, Optional, TypeAlias from typing import TYPE_CHECKING, Any, Callable, List, Optional
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -34,8 +34,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
ADDON_ID = "plugin.video.viewit" ADDON_ID = "plugin.video.viewit"
@@ -88,9 +88,13 @@ class TemplatePlugin(BasisPlugin):
self._session = session self._session = session
return self._session return self._session
async def search_titles(self, query: str) -> List[str]: async def search_titles(
self,
query: str,
progress_callback: Optional[Callable[[str, Optional[int]], Any]] = None,
) -> List[str]:
"""TODO: Suche auf der Zielseite implementieren.""" """TODO: Suche auf der Zielseite implementieren."""
_ = query _ = (query, progress_callback)
return [] return []
def seasons_for(self, title: str) -> List[str]: def seasons_for(self, title: str) -> List[str]:

View File

@@ -13,7 +13,8 @@ import hashlib
import json import json
import re import re
import time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import quote
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -43,8 +44,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
SETTING_BASE_URL = "aniworld_base_url" SETTING_BASE_URL = "aniworld_base_url"
@@ -69,6 +70,16 @@ HEADERS = {
SESSION_CACHE_TTL_SECONDS = 300 SESSION_CACHE_TTL_SECONDS = 300
SESSION_CACHE_PREFIX = "viewit.aniworld" SESSION_CACHE_PREFIX = "viewit.aniworld"
SESSION_CACHE_MAX_TITLE_URLS = 800 SESSION_CACHE_MAX_TITLE_URLS = 800
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
@dataclass @dataclass
@@ -126,7 +137,7 @@ def _latest_episodes_url() -> str:
def _search_url(query: str) -> str: def _search_url(query: str) -> str:
return f"{_get_base_url()}/search?q={query}" return f"{_get_base_url()}/search?q={quote((query or '').strip())}"
def _search_api_url() -> str: def _search_api_url() -> str:
@@ -289,37 +300,56 @@ def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> Beautif
_ensure_requests() _ensure_requests()
_log_visit(url) _log_visit(url)
sess = session or get_requests_session("aniworld", headers=HEADERS) sess = session or get_requests_session("aniworld", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
_log_response_html(url, response.text) body = (response.text or "") if response is not None else ""
if _looks_like_cloudflare_challenge(response.text): if final_url != url:
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") _log_url(final_url, kind="REDIRECT")
return BeautifulSoup(response.text, "html.parser") _log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_html_simple(url: str) -> str: def _get_html_simple(url: str) -> str:
_ensure_requests() _ensure_requests()
_log_visit(url) _log_visit(url)
sess = get_requests_session("aniworld", headers=HEADERS) sess = get_requests_session("aniworld", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
body = response.text body = (response.text or "") if response is not None else ""
_log_response_html(url, body) if final_url != url:
if _looks_like_cloudflare_challenge(body): _log_url(final_url, kind="REDIRECT")
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") _log_response_html(url, body)
return body if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return body
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_soup_simple(url: str) -> BeautifulSoupT: def _get_soup_simple(url: str) -> BeautifulSoupT:
@@ -351,17 +381,27 @@ def _post_json(url: str, *, payload: Dict[str, str], session: Optional[RequestsS
_ensure_requests() _ensure_requests()
_log_visit(url) _log_visit(url)
sess = session or get_requests_session("aniworld", headers=HEADERS) sess = session or get_requests_session("aniworld", headers=HEADERS)
response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = None
response.raise_for_status()
if response.url and response.url != url:
_log_url(response.url, kind="REDIRECT")
_log_response_html(url, response.text)
if _looks_like_cloudflare_challenge(response.text):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
try: try:
return response.json() response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
except Exception: response.raise_for_status()
return None final_url = (response.url or url) if response is not None else url
body = (response.text or "") if response is not None else ""
if final_url != url:
_log_url(final_url, kind="REDIRECT")
_log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
try:
return response.json()
except Exception:
return None
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str: def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str:
@@ -555,10 +595,18 @@ def resolve_redirect(target_url: str) -> Optional[str]:
_log_visit(normalized_url) _log_visit(normalized_url)
session = get_requests_session("aniworld", headers=HEADERS) session = get_requests_session("aniworld", headers=HEADERS)
_get_soup(_get_base_url(), session=session) _get_soup(_get_base_url(), session=session)
response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True) response = None
if response.url: try:
_log_url(response.url, kind="RESOLVED") response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
return response.url if response.url else None if response.url:
_log_url(response.url, kind="RESOLVED")
return response.url if response.url else None
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def fetch_episode_hoster_names(episode_url: str) -> List[str]: def fetch_episode_hoster_names(episode_url: str) -> List[str]:
@@ -629,11 +677,12 @@ def fetch_episode_stream_link(
return resolved return resolved
def search_animes(query: str) -> List[SeriesResult]: def search_animes(query: str, *, progress_callback: ProgressCallback = None) -> List[SeriesResult]:
_ensure_requests() _ensure_requests()
query = (query or "").strip() query = (query or "").strip()
if not query: if not query:
return [] return []
_emit_progress(progress_callback, "AniWorld API-Suche", 15)
session = get_requests_session("aniworld", headers=HEADERS) session = get_requests_session("aniworld", headers=HEADERS)
try: try:
session.get(_get_base_url(), headers=HEADERS, timeout=DEFAULT_TIMEOUT) session.get(_get_base_url(), headers=HEADERS, timeout=DEFAULT_TIMEOUT)
@@ -643,7 +692,9 @@ def search_animes(query: str) -> List[SeriesResult]:
results: List[SeriesResult] = [] results: List[SeriesResult] = []
seen: set[str] = set() seen: set[str] = set()
if isinstance(data, list): if isinstance(data, list):
for entry in data: for idx, entry in enumerate(data, start=1):
if idx == 1 or idx % 50 == 0:
_emit_progress(progress_callback, f"API auswerten {idx}/{len(data)}", 35)
if not isinstance(entry, dict): if not isinstance(entry, dict):
continue continue
title = _strip_html((entry.get("title") or "").strip()) title = _strip_html((entry.get("title") or "").strip())
@@ -665,10 +716,16 @@ def search_animes(query: str) -> List[SeriesResult]:
seen.add(key) seen.add(key)
description = (entry.get("description") or "").strip() description = (entry.get("description") or "").strip()
results.append(SeriesResult(title=title, description=description, url=url)) results.append(SeriesResult(title=title, description=description, url=url))
_emit_progress(progress_callback, f"API-Treffer: {len(results)}", 85)
return results return results
soup = _get_soup_simple(_search_url(requests.utils.quote(query))) _emit_progress(progress_callback, "HTML-Suche (Fallback)", 55)
for anchor in soup.select("a[href^='/anime/stream/'][href]"): soup = _get_soup_simple(_search_url(query))
anchors = soup.select("a[href^='/anime/stream/'][href]")
total_anchors = max(1, len(anchors))
for idx, anchor in enumerate(anchors, start=1):
if idx == 1 or idx % 100 == 0:
_emit_progress(progress_callback, f"HTML auswerten {idx}/{total_anchors}", 70)
href = (anchor.get("href") or "").strip() href = (anchor.get("href") or "").strip()
if not href or "/staffel-" in href or "/episode-" in href: if not href or "/staffel-" in href or "/episode-" in href:
continue continue
@@ -686,6 +743,7 @@ def search_animes(query: str) -> List[SeriesResult]:
continue continue
seen.add(key) seen.add(key)
results.append(SeriesResult(title=title, description="", url=url)) results.append(SeriesResult(title=title, description="", url=url))
_emit_progress(progress_callback, f"HTML-Treffer: {len(results)}", 85)
return results return results
@@ -1151,7 +1209,7 @@ class AniworldPlugin(BasisPlugin):
return self._episode_label_cache.get(cache_key, {}).get(episode_label) return self._episode_label_cache.get(cache_key, {}).get(episode_label)
return None return None
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
query = (query or "").strip() query = (query or "").strip()
if not query: if not query:
self._anime_results.clear() self._anime_results.clear()
@@ -1163,7 +1221,8 @@ class AniworldPlugin(BasisPlugin):
if not self._requests_available: if not self._requests_available:
raise RuntimeError("AniworldPlugin kann ohne requests/bs4 nicht suchen.") raise RuntimeError("AniworldPlugin kann ohne requests/bs4 nicht suchen.")
try: try:
results = search_animes(query) _emit_progress(progress_callback, "AniWorld Suche startet", 10)
results = search_animes(query, progress_callback=progress_callback)
except Exception as exc: # pragma: no cover except Exception as exc: # pragma: no cover
self._anime_results.clear() self._anime_results.clear()
self._season_cache.clear() self._season_cache.clear()
@@ -1178,6 +1237,7 @@ class AniworldPlugin(BasisPlugin):
self._season_cache.clear() self._season_cache.clear()
self._season_links_cache.clear() self._season_links_cache.clear()
self._episode_label_cache.clear() self._episode_label_cache.clear()
_emit_progress(progress_callback, f"Treffer aufbereitet: {len(results)}", 95)
return [result.title for result in results] return [result.title for result in results]
def _ensure_seasons(self, title: str) -> List[SeasonInfo]: def _ensure_seasons(self, title: str) -> List[SeasonInfo]:
@@ -1213,6 +1273,18 @@ class AniworldPlugin(BasisPlugin):
_log_url(link, kind="FOUND") _log_url(link, kind="FOUND")
return link return link
def episode_url_for(self, title: str, season: str, episode: str) -> str:
cache_key = (title, season)
cached = self._episode_label_cache.get(cache_key)
if cached:
info = cached.get(episode)
if info and info.url:
return info.url
episode_info = self._lookup_episode(title, season, episode)
if episode_info and episode_info.url:
return episode_info.url
return ""
def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]:
if not self._requests_available: if not self._requests_available:
raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.") raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.")

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
import re import re
from urllib.parse import quote from urllib.parse import quote
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeAlias from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -27,8 +27,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
ADDON_ID = "plugin.video.viewit" ADDON_ID = "plugin.video.viewit"
@@ -44,6 +44,16 @@ SETTING_LOG_URLS = "log_urls_dokustreams"
SETTING_DUMP_HTML = "dump_html_dokustreams" SETTING_DUMP_HTML = "dump_html_dokustreams"
SETTING_SHOW_URL_INFO = "show_url_info_dokustreams" SETTING_SHOW_URL_INFO = "show_url_info_dokustreams"
SETTING_LOG_ERRORS = "log_errors_dokustreams" SETTING_LOG_ERRORS = "log_errors_dokustreams"
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
HEADERS = { HEADERS = {
"User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
@@ -213,16 +223,26 @@ def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> Beautif
raise RuntimeError("requests/bs4 sind nicht verfuegbar.") raise RuntimeError("requests/bs4 sind nicht verfuegbar.")
_log_visit(url) _log_visit(url)
sess = session or get_requests_session("dokustreams", headers=HEADERS) sess = session or get_requests_session("dokustreams", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error_message(f"GET {url} failed: {exc}") _log_error_message(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url_event(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
_log_response_html(url, response.text) body = (response.text or "") if response is not None else ""
return BeautifulSoup(response.text, "html.parser") if final_url != url:
_log_url_event(final_url, kind="REDIRECT")
_log_response_html(url, body)
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
class DokuStreamsPlugin(BasisPlugin): class DokuStreamsPlugin(BasisPlugin):
@@ -247,14 +267,17 @@ class DokuStreamsPlugin(BasisPlugin):
if REQUESTS_IMPORT_ERROR: if REQUESTS_IMPORT_ERROR:
print(f"DokuStreamsPlugin Importfehler: {REQUESTS_IMPORT_ERROR}") print(f"DokuStreamsPlugin Importfehler: {REQUESTS_IMPORT_ERROR}")
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
_emit_progress(progress_callback, "Doku-Streams Suche", 15)
hits = self._search_hits(query) hits = self._search_hits(query)
_emit_progress(progress_callback, f"Treffer verarbeiten ({len(hits)})", 70)
self._title_to_url = {hit.title: hit.url for hit in hits if hit.title and hit.url} self._title_to_url = {hit.title: hit.url for hit in hits if hit.title and hit.url}
for hit in hits: for hit in hits:
if hit.title: if hit.title:
self._title_meta[hit.title] = (hit.plot, hit.poster) self._title_meta[hit.title] = (hit.plot, hit.poster)
titles = [hit.title for hit in hits if hit.title] titles = [hit.title for hit in hits if hit.title]
titles.sort(key=lambda value: value.casefold()) titles.sort(key=lambda value: value.casefold())
_emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95)
return titles return titles
def _search_hits(self, query: str) -> List[SearchHit]: def _search_hits(self, query: str) -> List[SearchHit]:

View File

@@ -11,7 +11,7 @@ from __future__ import annotations
import json import json
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Set from typing import Any, Callable, Dict, List, Optional, Set
from urllib.parse import urlencode, urljoin, urlsplit from urllib.parse import urlencode, urljoin, urlsplit
try: # pragma: no cover - optional dependency (Kodi dependency) try: # pragma: no cover - optional dependency (Kodi dependency)
@@ -56,6 +56,16 @@ HEADERS = {
"Accept-Language": "de-DE,de;q=0.9,en;q=0.8", "Accept-Language": "de-DE,de;q=0.9,en;q=0.8",
"Connection": "keep-alive", "Connection": "keep-alive",
} }
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
@dataclass(frozen=True) @dataclass(frozen=True)
@@ -526,6 +536,34 @@ class EinschaltenPlugin(BasisPlugin):
self._session = requests.Session() self._session = requests.Session()
return self._session return self._session
def _http_get_text(self, url: str, *, timeout: int = 20) -> tuple[str, str]:
_log_url(url, kind="GET")
_notify_url(url)
sess = self._get_session()
response = None
try:
response = sess.get(url, headers=HEADERS, timeout=timeout)
response.raise_for_status()
final_url = (response.url or url) if response is not None else url
body = (response.text or "") if response is not None else ""
_log_url(final_url, kind="OK")
_log_response_html(final_url, body)
return final_url, body
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _http_get_json(self, url: str, *, timeout: int = 20) -> tuple[str, Any]:
final_url, body = self._http_get_text(url, timeout=timeout)
try:
payload = json.loads(body or "{}")
except Exception:
payload = {}
return final_url, payload
def _get_base_url(self) -> str: def _get_base_url(self) -> str:
base = _get_setting_text(SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip() base = _get_setting_text(SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip()
return base.rstrip("/") return base.rstrip("/")
@@ -646,15 +684,9 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return "" return ""
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) self._detail_html_by_id[movie_id] = body
sess = self._get_session() return body
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
self._detail_html_by_id[movie_id] = resp.text or ""
return resp.text or ""
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
return "" return ""
@@ -667,16 +699,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return {} return {}
try: try:
_log_url(url, kind="GET") _, data = self._http_get_json(url, timeout=20)
_notify_url(url) return data
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
# Some backends may return JSON with a JSON content-type; for debugging we still dump text.
_log_response_html(resp.url or url, resp.text)
data = resp.json()
return dict(data) if isinstance(data, dict) else {}
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
return {} return {}
@@ -741,14 +765,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return [] return []
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
return _parse_ng_state_movies(payload) return _parse_ng_state_movies(payload)
except Exception: except Exception:
return [] return []
@@ -759,14 +777,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return [] return []
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
movies = _parse_ng_state_movies(payload) movies = _parse_ng_state_movies(payload)
_log_debug_line(f"parse_ng_state_movies:count={len(movies)}") _log_debug_line(f"parse_ng_state_movies:count={len(movies)}")
if movies: if movies:
@@ -784,14 +796,8 @@ class EinschaltenPlugin(BasisPlugin):
if page > 1: if page > 1:
url = f"{url}?{urlencode({'page': str(page)})}" url = f"{url}?{urlencode({'page': str(page)})}"
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload) movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload)
_log_debug_line(f"parse_ng_state_movies_page:page={page} count={len(movies)}") _log_debug_line(f"parse_ng_state_movies_page:page={page} count={len(movies)}")
if has_more is not None: if has_more is not None:
@@ -844,14 +850,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return [] return []
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
results = _parse_ng_state_search_results(payload) results = _parse_ng_state_search_results(payload)
return _filter_movies_by_title(query, results) return _filter_movies_by_title(query, results)
except Exception: except Exception:
@@ -867,13 +867,7 @@ class EinschaltenPlugin(BasisPlugin):
api_url = self._api_genres_url() api_url = self._api_genres_url()
if api_url: if api_url:
try: try:
_log_url(api_url, kind="GET") _, payload = self._http_get_json(api_url, timeout=20)
_notify_url(api_url)
sess = self._get_session()
resp = sess.get(api_url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or api_url, kind="OK")
payload = resp.json()
if isinstance(payload, list): if isinstance(payload, list):
parsed: Dict[str, int] = {} parsed: Dict[str, int] = {}
for item in payload: for item in payload:
@@ -900,14 +894,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return return
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
parsed = _parse_ng_state_genres(payload) parsed = _parse_ng_state_genres(payload)
if parsed: if parsed:
self._genre_id_by_name.clear() self._genre_id_by_name.clear()
@@ -915,7 +903,7 @@ class EinschaltenPlugin(BasisPlugin):
except Exception: except Exception:
return return
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
if not REQUESTS_AVAILABLE: if not REQUESTS_AVAILABLE:
return [] return []
query = (query or "").strip() query = (query or "").strip()
@@ -924,9 +912,12 @@ class EinschaltenPlugin(BasisPlugin):
if not self._get_base_url(): if not self._get_base_url():
return [] return []
_emit_progress(progress_callback, "Einschalten Suche", 15)
movies = self._fetch_search_movies(query) movies = self._fetch_search_movies(query)
if not movies: if not movies:
_emit_progress(progress_callback, "Fallback: Index filtern", 45)
movies = _filter_movies_by_title(query, self._load_movies()) movies = _filter_movies_by_title(query, self._load_movies())
_emit_progress(progress_callback, f"Treffer verarbeiten ({len(movies)})", 75)
titles: List[str] = [] titles: List[str] = []
seen: set[str] = set() seen: set[str] = set()
for movie in movies: for movie in movies:
@@ -936,6 +927,7 @@ class EinschaltenPlugin(BasisPlugin):
self._id_by_title[movie.title] = movie.id self._id_by_title[movie.title] = movie.id
titles.append(movie.title) titles.append(movie.title)
titles.sort(key=lambda value: value.casefold()) titles.sort(key=lambda value: value.casefold())
_emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95)
return titles return titles
def genres(self) -> List[str]: def genres(self) -> List[str]:
@@ -971,14 +963,8 @@ class EinschaltenPlugin(BasisPlugin):
if not url: if not url:
return [] return []
try: try:
_log_url(url, kind="GET") _, body = self._http_get_text(url, timeout=20)
_notify_url(url) payload = _extract_ng_state_payload(body)
sess = self._get_session()
resp = sess.get(url, headers=HEADERS, timeout=20)
resp.raise_for_status()
_log_url(resp.url or url, kind="OK")
_log_response_html(resp.url or url, resp.text)
payload = _extract_ng_state_payload(resp.text)
except Exception: except Exception:
return [] return []
if not isinstance(payload, dict): if not isinstance(payload, dict):

View File

@@ -11,7 +11,7 @@ from dataclasses import dataclass
import re import re
from urllib.parse import quote, urlencode from urllib.parse import quote, urlencode
from urllib.parse import urljoin from urllib.parse import urljoin
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -33,8 +33,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
ADDON_ID = "plugin.video.viewit" ADDON_ID = "plugin.video.viewit"
@@ -53,6 +53,16 @@ SETTING_LOG_URLS = "log_urls_filmpalast"
SETTING_DUMP_HTML = "dump_html_filmpalast" SETTING_DUMP_HTML = "dump_html_filmpalast"
SETTING_SHOW_URL_INFO = "show_url_info_filmpalast" SETTING_SHOW_URL_INFO = "show_url_info_filmpalast"
SETTING_LOG_ERRORS = "log_errors_filmpalast" SETTING_LOG_ERRORS = "log_errors_filmpalast"
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
HEADERS = { HEADERS = {
"User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
@@ -206,16 +216,26 @@ def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> Beautif
raise RuntimeError("requests/bs4 sind nicht verfuegbar.") raise RuntimeError("requests/bs4 sind nicht verfuegbar.")
_log_visit(url) _log_visit(url)
sess = session or get_requests_session("filmpalast", headers=HEADERS) sess = session or get_requests_session("filmpalast", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error_message(f"GET {url} failed: {exc}") _log_error_message(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url_event(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
_log_response_html(url, response.text) body = (response.text or "") if response is not None else ""
return BeautifulSoup(response.text, "html.parser") if final_url != url:
_log_url_event(final_url, kind="REDIRECT")
_log_response_html(url, body)
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
class FilmpalastPlugin(BasisPlugin): class FilmpalastPlugin(BasisPlugin):
@@ -352,6 +372,7 @@ class FilmpalastPlugin(BasisPlugin):
seen_titles: set[str] = set() seen_titles: set[str] = set()
seen_urls: set[str] = set() seen_urls: set[str] = set()
for base_url, params in search_requests: for base_url, params in search_requests:
response = None
try: try:
request_url = base_url if not params else f"{base_url}?{urlencode(params)}" request_url = base_url if not params else f"{base_url}?{urlencode(params)}"
_log_url_event(request_url, kind="GET") _log_url_event(request_url, kind="GET")
@@ -365,6 +386,12 @@ class FilmpalastPlugin(BasisPlugin):
except Exception as exc: except Exception as exc:
_log_error_message(f"search request failed ({base_url}): {exc}") _log_error_message(f"search request failed ({base_url}): {exc}")
continue continue
finally:
if response is not None:
try:
response.close()
except Exception:
pass
anchors = soup.select("article.liste h2 a[href], article.liste h3 a[href]") anchors = soup.select("article.liste h2 a[href], article.liste h3 a[href]")
if not anchors: if not anchors:
@@ -466,9 +493,13 @@ class FilmpalastPlugin(BasisPlugin):
titles.sort(key=lambda value: value.casefold()) titles.sort(key=lambda value: value.casefold())
return titles return titles
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
_emit_progress(progress_callback, "Filmpalast Suche", 15)
hits = self._search_hits(query) hits = self._search_hits(query)
return self._apply_hits_to_title_index(hits) _emit_progress(progress_callback, f"Treffer verarbeiten ({len(hits)})", 70)
titles = self._apply_hits_to_title_index(hits)
_emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95)
return titles
def _parse_genres(self, soup: BeautifulSoupT) -> Dict[str, str]: def _parse_genres(self, soup: BeautifulSoupT) -> Dict[str, str]:
genres: Dict[str, str] = {} genres: Dict[str, str] = {}
@@ -820,11 +851,23 @@ class FilmpalastPlugin(BasisPlugin):
def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]:
detail_url = self._detail_url_for_selection(title, season, episode) detail_url = self._detail_url_for_selection(title, season, episode)
hosters = self._hosters_for_detail_url(detail_url) return self.available_hosters_for_url(detail_url)
return list(hosters.keys())
def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]:
detail_url = self._detail_url_for_selection(title, season, episode) detail_url = self._detail_url_for_selection(title, season, episode)
return self.stream_link_for_url(detail_url)
def episode_url_for(self, title: str, season: str, episode: str) -> str:
detail_url = self._detail_url_for_selection(title, season, episode)
return (detail_url or "").strip()
def available_hosters_for_url(self, episode_url: str) -> List[str]:
detail_url = (episode_url or "").strip()
hosters = self._hosters_for_detail_url(detail_url)
return list(hosters.keys())
def stream_link_for_url(self, episode_url: str) -> Optional[str]:
detail_url = (episode_url or "").strip()
if not detail_url: if not detail_url:
return None return None
hosters = self._hosters_for_detail_url(detail_url) hosters = self._hosters_for_detail_url(detail_url)
@@ -901,6 +944,7 @@ class FilmpalastPlugin(BasisPlugin):
redirected = link redirected = link
if self._requests_available: if self._requests_available:
response = None
try: try:
session = get_requests_session("filmpalast", headers=HEADERS) session = get_requests_session("filmpalast", headers=HEADERS)
response = session.get(link, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True) response = session.get(link, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
@@ -908,6 +952,12 @@ class FilmpalastPlugin(BasisPlugin):
redirected = (response.url or link).strip() or link redirected = (response.url or link).strip() or link
except Exception: except Exception:
redirected = link redirected = link
finally:
if response is not None:
try:
response.close()
except Exception:
pass
# 2) Danach optional die Redirect-URL nochmals auflösen. # 2) Danach optional die Redirect-URL nochmals auflösen.
if callable(resolve_with_resolveurl) and redirected and redirected != link: if callable(resolve_with_resolveurl) and redirected and redirected != link:

View File

@@ -17,7 +17,8 @@ import os
import re import re
import time import time
import unicodedata import unicodedata
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import quote
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -49,14 +50,15 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
SETTING_BASE_URL = "serienstream_base_url" SETTING_BASE_URL = "serienstream_base_url"
DEFAULT_BASE_URL = "https://s.to" DEFAULT_BASE_URL = "https://s.to"
DEFAULT_PREFERRED_HOSTERS = ["voe"] DEFAULT_PREFERRED_HOSTERS = ["voe"]
DEFAULT_TIMEOUT = 20 DEFAULT_TIMEOUT = 20
SEARCH_TIMEOUT = 8
ADDON_ID = "plugin.video.viewit" ADDON_ID = "plugin.video.viewit"
GLOBAL_SETTING_LOG_URLS = "debug_log_urls" GLOBAL_SETTING_LOG_URLS = "debug_log_urls"
GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" GLOBAL_SETTING_DUMP_HTML = "debug_dump_html"
@@ -75,6 +77,19 @@ HEADERS = {
SESSION_CACHE_TTL_SECONDS = 300 SESSION_CACHE_TTL_SECONDS = 300
SESSION_CACHE_PREFIX = "viewit.serienstream" SESSION_CACHE_PREFIX = "viewit.serienstream"
SESSION_CACHE_MAX_TITLE_URLS = 800 SESSION_CACHE_MAX_TITLE_URLS = 800
CATALOG_SEARCH_TTL_SECONDS = 600
CATALOG_SEARCH_CACHE_KEY = "catalog_index"
_CATALOG_INDEX_MEMORY: tuple[float, List["SeriesResult"]] = (0.0, [])
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
@dataclass @dataclass
@@ -111,6 +126,57 @@ class SeasonInfo:
episodes: List[EpisodeInfo] episodes: List[EpisodeInfo]
def _extract_series_metadata(soup: BeautifulSoupT) -> Tuple[Dict[str, str], Dict[str, str]]:
info: Dict[str, str] = {}
art: Dict[str, str] = {}
if not soup:
return info, art
title_tag = soup.select_one("h1")
title = (title_tag.get_text(" ", strip=True) if title_tag else "").strip()
if title:
info["title"] = title
description = ""
desc_tag = soup.select_one(".series-description .description-text")
if desc_tag:
description = (desc_tag.get_text(" ", strip=True) or "").strip()
if not description:
meta_desc = soup.select_one("meta[property='og:description'], meta[name='description']")
if meta_desc:
description = (meta_desc.get("content") or "").strip()
if description:
info["plot"] = description
poster = ""
poster_tag = soup.select_one(
".show-cover-mobile img[data-src], .show-cover-mobile img[src], .col-3 img[data-src], .col-3 img[src]"
)
if poster_tag:
poster = (poster_tag.get("data-src") or poster_tag.get("src") or "").strip()
if not poster:
for candidate in soup.select("img[data-src], img[src]"):
url = (candidate.get("data-src") or candidate.get("src") or "").strip()
if "/media/images/channel/" in url:
poster = url
break
if poster:
poster = _absolute_url(poster)
art["poster"] = poster
art["thumb"] = poster
fanart = ""
fanart_tag = soup.select_one("meta[property='og:image']")
if fanart_tag:
fanart = (fanart_tag.get("content") or "").strip()
if fanart:
fanart = _absolute_url(fanart)
art["fanart"] = fanart
art["landscape"] = fanart
return info, art
def _get_base_url() -> str: def _get_base_url() -> str:
base = get_setting_string(ADDON_ID, SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip() base = get_setting_string(ADDON_ID, SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip()
if not base: if not base:
@@ -342,37 +408,56 @@ def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> Beautif
_ensure_requests() _ensure_requests()
_log_visit(url) _log_visit(url)
sess = session or get_requests_session("serienstream", headers=HEADERS) sess = session or get_requests_session("serienstream", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
_log_response_html(url, response.text) body = (response.text or "") if response is not None else ""
if _looks_like_cloudflare_challenge(response.text): if final_url != url:
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") _log_url(final_url, kind="REDIRECT")
return BeautifulSoup(response.text, "html.parser") _log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_html_simple(url: str) -> str: def _get_html_simple(url: str) -> str:
_ensure_requests() _ensure_requests()
_log_visit(url) _log_visit(url)
sess = get_requests_session("serienstream", headers=HEADERS) sess = get_requests_session("serienstream", headers=HEADERS)
response = None
try: try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
_log_error(f"GET {url} failed: {exc}") _log_error(f"GET {url} failed: {exc}")
raise raise
if response.url and response.url != url: try:
_log_url(response.url, kind="REDIRECT") final_url = (response.url or url) if response is not None else url
body = response.text body = (response.text or "") if response is not None else ""
_log_response_html(url, body) if final_url != url:
if _looks_like_cloudflare_challenge(body): _log_url(final_url, kind="REDIRECT")
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") _log_response_html(url, body)
return body if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return body
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_soup_simple(url: str) -> BeautifulSoupT: def _get_soup_simple(url: str) -> BeautifulSoupT:
@@ -400,20 +485,238 @@ def _extract_genre_names_from_html(body: str) -> List[str]:
return names return names
def search_series(query: str) -> List[SeriesResult]: def _strip_tags(value: str) -> str:
"""Sucht Serien im (/serien)-Katalog (Genre-liste) nach Titel/Alt-Titel.""" return re.sub(r"<[^>]+>", " ", value or "")
def _search_series_api(query: str) -> List[SeriesResult]:
query = (query or "").strip()
if not query:
return []
_ensure_requests()
sess = get_requests_session("serienstream", headers=HEADERS)
terms = [query]
if " " in query:
# Fallback: einzelne Tokens liefern in der API oft bessere Treffer.
terms.extend([token for token in query.split() if token])
seen_urls: set[str] = set()
for term in terms:
response = None
try:
response = sess.get(
f"{_get_base_url()}/api/search/suggest",
params={"term": term},
headers=HEADERS,
timeout=SEARCH_TIMEOUT,
)
response.raise_for_status()
except Exception:
continue
try:
payload = response.json()
except Exception:
continue
finally:
if response is not None:
try:
response.close()
except Exception:
pass
shows = payload.get("shows") if isinstance(payload, dict) else None
if not isinstance(shows, list):
continue
results: List[SeriesResult] = []
for item in shows:
if not isinstance(item, dict):
continue
title = (item.get("name") or "").strip()
href = (item.get("url") or "").strip()
if not title or not href:
continue
url_abs = _absolute_url(href)
if not url_abs or url_abs in seen_urls:
continue
if "/staffel-" in url_abs or "/episode-" in url_abs:
continue
seen_urls.add(url_abs)
results.append(SeriesResult(title=title, description="", url=url_abs))
if not results:
continue
filtered = [entry for entry in results if _matches_query(query, title=entry.title)]
if filtered:
return filtered
# Falls nur Token-Suche möglich war, zumindest die Ergebnisse liefern.
if term != query:
return results
return []
def _search_series_server(query: str) -> List[SeriesResult]:
if not query:
return []
api_results = _search_series_api(query)
if api_results:
return api_results
base = _get_base_url()
search_url = f"{base}/search?q={quote(query)}"
alt_url = f"{base}/suche?q={quote(query)}"
for url in (search_url, alt_url):
try:
body = _get_html_simple(url)
except Exception:
continue
if not body:
continue
soup = BeautifulSoup(body, "html.parser")
root = soup.select_one(".search-results-list")
if root is None:
continue
seen_urls: set[str] = set()
results: List[SeriesResult] = []
for card in root.select(".cover-card"):
anchor = card.select_one("a[href*='/serie/']")
if not anchor:
continue
href = (anchor.get("href") or "").strip()
url_abs = _absolute_url(href)
if not url_abs or url_abs in seen_urls:
continue
if "/staffel-" in url_abs or "/episode-" in url_abs:
continue
title_tag = card.select_one(".show-title") or card.select_one("h3") or card.select_one("h4")
title = (title_tag.get_text(" ", strip=True) if title_tag else anchor.get_text(" ", strip=True)).strip()
if not title:
continue
seen_urls.add(url_abs)
results.append(SeriesResult(title=title, description="", url=url_abs))
if results:
return results
return []
def _extract_catalog_index_from_html(body: str, *, progress_callback: ProgressCallback = None) -> List[SeriesResult]:
items: List[SeriesResult] = []
if not body:
return items
seen_urls: set[str] = set()
item_re = re.compile(
r"<li[^>]*class=[\"'][^\"']*series-item[^\"']*[\"'][^>]*>(.*?)</li>",
re.IGNORECASE | re.DOTALL,
)
anchor_re = re.compile(r"<a[^>]+href=[\"']([^\"']+)[\"'][^>]*>(.*?)</a>", re.IGNORECASE | re.DOTALL)
data_search_re = re.compile(r"data-search=[\"']([^\"']*)[\"']", re.IGNORECASE)
for idx, match in enumerate(item_re.finditer(body), start=1):
if idx == 1 or idx % 200 == 0:
_emit_progress(progress_callback, f"Katalog parsen {idx}", 62)
block = match.group(0)
inner = match.group(1) or ""
anchor_match = anchor_re.search(inner)
if not anchor_match:
continue
href = (anchor_match.group(1) or "").strip()
url = _absolute_url(href)
if not url or "/serie/" not in url or "/staffel-" in url or "/episode-" in url:
continue
if url in seen_urls:
continue
seen_urls.add(url)
title_raw = anchor_match.group(2) or ""
title = unescape(re.sub(r"\s+", " ", _strip_tags(title_raw))).strip()
if not title:
continue
search_match = data_search_re.search(block)
description = (search_match.group(1) or "").strip() if search_match else ""
items.append(SeriesResult(title=title, description=description, url=url))
return items
def _catalog_index_from_soup(soup: BeautifulSoupT) -> List[SeriesResult]:
items: List[SeriesResult] = []
if not soup:
return items
seen_urls: set[str] = set()
for item in soup.select("li.series-item"):
anchor = item.find("a", href=True)
if not anchor:
continue
href = (anchor.get("href") or "").strip()
url = _absolute_url(href)
if not url or "/serie/" not in url or "/staffel-" in url or "/episode-" in url:
continue
if url in seen_urls:
continue
seen_urls.add(url)
title = (anchor.get_text(" ", strip=True) or "").strip()
if not title:
continue
description = (item.get("data-search") or "").strip()
items.append(SeriesResult(title=title, description=description, url=url))
return items
def _load_catalog_index_from_cache() -> Optional[List[SeriesResult]]:
global _CATALOG_INDEX_MEMORY
expires_at, cached = _CATALOG_INDEX_MEMORY
if cached and expires_at > time.time():
return list(cached)
raw = _session_cache_get(CATALOG_SEARCH_CACHE_KEY)
if not isinstance(raw, list):
return None
items: List[SeriesResult] = []
for entry in raw:
if not isinstance(entry, list) or len(entry) < 2:
continue
title = str(entry[0] or "").strip()
url = str(entry[1] or "").strip()
description = str(entry[2] or "") if len(entry) > 2 else ""
if title and url:
items.append(SeriesResult(title=title, description=description, url=url))
if items:
_CATALOG_INDEX_MEMORY = (time.time() + CATALOG_SEARCH_TTL_SECONDS, list(items))
return items or None
def _store_catalog_index_in_cache(items: List[SeriesResult]) -> None:
global _CATALOG_INDEX_MEMORY
if not items:
return
_CATALOG_INDEX_MEMORY = (time.time() + CATALOG_SEARCH_TTL_SECONDS, list(items))
payload: List[List[str]] = []
for entry in items:
if not entry.title or not entry.url:
continue
payload.append([entry.title, entry.url, entry.description])
_session_cache_set(CATALOG_SEARCH_CACHE_KEY, payload, ttl_seconds=CATALOG_SEARCH_TTL_SECONDS)
def search_series(query: str, *, progress_callback: ProgressCallback = None) -> List[SeriesResult]:
"""Sucht Serien im (/serien)-Katalog nach Titel. Nutzt Cache + Ein-Pass-Filter."""
_ensure_requests() _ensure_requests()
if not _normalize_search_text(query): if not _normalize_search_text(query):
return [] return []
# Direkter Abruf wie in fetch_serien.py. _emit_progress(progress_callback, "Server-Suche", 15)
server_results = _search_series_server(query)
if server_results:
_emit_progress(progress_callback, f"Server-Treffer: {len(server_results)}", 35)
return [entry for entry in server_results if entry.title and _matches_query(query, title=entry.title)]
_emit_progress(progress_callback, "Pruefe Such-Cache", 42)
cached = _load_catalog_index_from_cache()
if cached is not None:
_emit_progress(progress_callback, f"Cache-Treffer: {len(cached)}", 52)
return [entry for entry in cached if entry.title and _matches_query(query, title=entry.title)]
_emit_progress(progress_callback, "Lade Katalogseite", 58)
catalog_url = f"{_get_base_url()}/serien?by=genre" catalog_url = f"{_get_base_url()}/serien?by=genre"
soup = _get_soup_simple(catalog_url) body = _get_html_simple(catalog_url)
results: List[SeriesResult] = [] items = _extract_catalog_index_from_html(body, progress_callback=progress_callback)
for series in parse_series_catalog(soup).values(): if not items:
for entry in series: _emit_progress(progress_callback, "Fallback-Parser", 70)
if entry.title and _matches_query(query, title=entry.title): soup = BeautifulSoup(body, "html.parser")
results.append(entry) items = _catalog_index_from_soup(soup)
return results if items:
_store_catalog_index_in_cache(items)
_emit_progress(progress_callback, f"Filtere Treffer ({len(items)})", 85)
return [entry for entry in items if entry.title and _matches_query(query, title=entry.title)]
def parse_series_catalog(soup: BeautifulSoupT) -> Dict[str, List[SeriesResult]]: def parse_series_catalog(soup: BeautifulSoupT) -> Dict[str, List[SeriesResult]]:
@@ -731,15 +1034,23 @@ def resolve_redirect(target_url: str) -> Optional[str]:
_get_soup(_get_base_url(), session=session) _get_soup(_get_base_url(), session=session)
except Exception: except Exception:
pass pass
response = session.get( response = None
normalized_url, try:
headers=HEADERS, response = session.get(
timeout=DEFAULT_TIMEOUT, normalized_url,
allow_redirects=True, headers=HEADERS,
) timeout=DEFAULT_TIMEOUT,
if response.url: allow_redirects=True,
_log_url(response.url, kind="RESOLVED") )
return response.url if response.url else None if response.url:
_log_url(response.url, kind="RESOLVED")
return response.url if response.url else None
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def scrape_series_detail( def scrape_series_detail(
@@ -805,6 +1116,7 @@ class SerienstreamPlugin(BasisPlugin):
self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {} self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {}
self._latest_cache: Dict[int, List[LatestEpisode]] = {} self._latest_cache: Dict[int, List[LatestEpisode]] = {}
self._latest_hoster_cache: Dict[str, List[str]] = {} self._latest_hoster_cache: Dict[str, List[str]] = {}
self._series_metadata_cache: Dict[str, Tuple[Dict[str, str], Dict[str, str]]] = {}
self.is_available = True self.is_available = True
self.unavailable_reason: Optional[str] = None self.unavailable_reason: Optional[str] = None
if not self._requests_available: # pragma: no cover - optional dependency if not self._requests_available: # pragma: no cover - optional dependency
@@ -851,12 +1163,30 @@ class SerienstreamPlugin(BasisPlugin):
cache_key = title.casefold() cache_key = title.casefold()
if self._title_url_cache.get(cache_key) != url: if self._title_url_cache.get(cache_key) != url:
self._title_url_cache[cache_key] = url self._title_url_cache[cache_key] = url
self._save_title_url_cache() self._save_title_url_cache()
if url:
return return
current = self._series_results.get(title) current = self._series_results.get(title)
if current is None: if current is None:
self._series_results[title] = SeriesResult(title=title, description=description, url="") self._series_results[title] = SeriesResult(title=title, description=description, url="")
@staticmethod
def _metadata_cache_key(title: str) -> str:
return (title or "").strip().casefold()
def _series_for_title(self, title: str) -> Optional[SeriesResult]:
direct = self._series_results.get(title)
if direct and direct.url:
return direct
lookup_key = (title or "").strip().casefold()
for item in self._series_results.values():
if item.title.casefold().strip() == lookup_key and item.url:
return item
cached_url = self._title_url_cache.get(lookup_key, "")
if cached_url:
return SeriesResult(title=title, description="", url=cached_url)
return None
@staticmethod @staticmethod
def _season_links_cache_name(series_url: str) -> str: def _season_links_cache_name(series_url: str) -> str:
digest = hashlib.sha1((series_url or "").encode("utf-8")).hexdigest()[:20] digest = hashlib.sha1((series_url or "").encode("utf-8")).hexdigest()[:20]
@@ -1274,7 +1604,28 @@ class SerienstreamPlugin(BasisPlugin):
self._season_links_cache[title] = list(session_links) self._season_links_cache[title] = list(session_links)
return list(session_links) return list(session_links)
try: try:
seasons = scrape_series_detail(series.url, load_episodes=False) series_soup = _get_soup(series.url, session=get_requests_session("serienstream", headers=HEADERS))
info_labels, art = _extract_series_metadata(series_soup)
if series.description and "plot" not in info_labels:
info_labels["plot"] = series.description
cache_key = self._metadata_cache_key(title)
if info_labels or art:
self._series_metadata_cache[cache_key] = (info_labels, art)
base_series_url = _series_root_url(_extract_canonical_url(series_soup, series.url))
season_links = _extract_season_links(series_soup)
season_count = _extract_number_of_seasons(series_soup)
if season_count and (not season_links or len(season_links) < season_count):
existing = {number for number, _ in season_links}
for number in range(1, season_count + 1):
if number in existing:
continue
season_url = f"{base_series_url}/staffel-{number}"
_log_parsed_url(season_url)
season_links.append((number, season_url))
season_links.sort(key=lambda item: item[0])
seasons = [SeasonInfo(number=number, url=url, episodes=[]) for number, url in season_links]
seasons.sort(key=lambda s: s.number)
except Exception as exc: # pragma: no cover - defensive logging except Exception as exc: # pragma: no cover - defensive logging
raise RuntimeError(f"Serienstream-Staffeln konnten nicht geladen werden: {exc}") from exc raise RuntimeError(f"Serienstream-Staffeln konnten nicht geladen werden: {exc}") from exc
self._season_links_cache[title] = list(seasons) self._season_links_cache[title] = list(seasons)
@@ -1288,6 +1639,41 @@ class SerienstreamPlugin(BasisPlugin):
return return
self._remember_series_result(title, series_url) self._remember_series_result(title, series_url)
def metadata_for(self, title: str) -> Tuple[Dict[str, str], Dict[str, str], Optional[List[Any]]]:
title = (title or "").strip()
if not title or not self._requests_available:
return {}, {}, None
cache_key = self._metadata_cache_key(title)
cached = self._series_metadata_cache.get(cache_key)
if cached is not None:
info, art = cached
return dict(info), dict(art), None
series = self._series_for_title(title)
if series is None or not series.url:
info = {"title": title}
self._series_metadata_cache[cache_key] = (dict(info), {})
return info, {}, None
info: Dict[str, str] = {"title": title}
art: Dict[str, str] = {}
if series.description:
info["plot"] = series.description
try:
soup = _get_soup(series.url, session=get_requests_session("serienstream", headers=HEADERS))
parsed_info, parsed_art = _extract_series_metadata(soup)
if parsed_info:
info.update(parsed_info)
if parsed_art:
art.update(parsed_art)
except Exception:
pass
self._series_metadata_cache[cache_key] = (dict(info), dict(art))
return info, art, None
def series_url_for_title(self, title: str) -> str: def series_url_for_title(self, title: str) -> str:
title = (title or "").strip() title = (title or "").strip()
if not title: if not title:
@@ -1348,7 +1734,7 @@ class SerienstreamPlugin(BasisPlugin):
return self._episode_label_cache.get(cache_key, {}).get(episode_label) return self._episode_label_cache.get(cache_key, {}).get(episode_label)
return None return None
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
query = query.strip() query = query.strip()
if not query: if not query:
self._series_results.clear() self._series_results.clear()
@@ -1362,7 +1748,8 @@ class SerienstreamPlugin(BasisPlugin):
try: try:
# Nutzt den Katalog (/serien), der jetzt nach Genres gruppiert ist. # Nutzt den Katalog (/serien), der jetzt nach Genres gruppiert ist.
# Alternativ gäbe es ein Ajax-Endpoint, aber der ist nicht immer zuverlässig erreichbar. # Alternativ gäbe es ein Ajax-Endpoint, aber der ist nicht immer zuverlässig erreichbar.
results = search_series(query) _emit_progress(progress_callback, "Serienstream Suche startet", 10)
results = search_series(query, progress_callback=progress_callback)
except Exception as exc: # pragma: no cover - defensive logging except Exception as exc: # pragma: no cover - defensive logging
self._series_results.clear() self._series_results.clear()
self._season_cache.clear() self._season_cache.clear()
@@ -1375,6 +1762,7 @@ class SerienstreamPlugin(BasisPlugin):
self._season_cache.clear() self._season_cache.clear()
self._season_links_cache.clear() self._season_links_cache.clear()
self._episode_label_cache.clear() self._episode_label_cache.clear()
_emit_progress(progress_callback, f"Treffer aufbereitet: {len(results)}", 95)
return [result.title for result in results] return [result.title for result in results]
def _ensure_seasons(self, title: str) -> List[SeasonInfo]: def _ensure_seasons(self, title: str) -> List[SeasonInfo]:
@@ -1443,6 +1831,18 @@ class SerienstreamPlugin(BasisPlugin):
except Exception as exc: # pragma: no cover - defensive logging except Exception as exc: # pragma: no cover - defensive logging
raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc
def episode_url_for(self, title: str, season: str, episode: str) -> str:
cache_key = (title, season)
cached = self._episode_label_cache.get(cache_key)
if cached:
info = cached.get(episode)
if info and info.url:
return info.url
episode_info = self._lookup_episode(title, season, episode)
if episode_info and episode_info.url:
return episode_info.url
return ""
def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]:
if not self._requests_available: if not self._requests_available:
raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.") raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.")

View File

@@ -19,7 +19,7 @@ import hashlib
import os import os
import re import re
import json import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeAlias from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from urllib.parse import urlencode, urljoin from urllib.parse import urlencode, urljoin
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
@@ -51,8 +51,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover else: # pragma: no cover
RequestsSession: TypeAlias = Any RequestsSession = Any
BeautifulSoupT: TypeAlias = Any BeautifulSoupT = Any
ADDON_ID = "plugin.video.viewit" ADDON_ID = "plugin.video.viewit"
@@ -78,6 +78,16 @@ HEADERS = {
"Accept-Language": "de-DE,de;q=0.9,en;q=0.8", "Accept-Language": "de-DE,de;q=0.9,en;q=0.8",
"Connection": "keep-alive", "Connection": "keep-alive",
} }
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
@dataclass(frozen=True) @dataclass(frozen=True)
@@ -584,15 +594,25 @@ class TopstreamfilmPlugin(BasisPlugin):
session = self._get_session() session = self._get_session()
self._log_url(url, kind="VISIT") self._log_url(url, kind="VISIT")
self._notify_url(url) self._notify_url(url)
response = None
try: try:
response = session.get(url, timeout=DEFAULT_TIMEOUT) response = session.get(url, timeout=DEFAULT_TIMEOUT)
response.raise_for_status() response.raise_for_status()
except Exception as exc: except Exception as exc:
self._log_error(f"GET {url} failed: {exc}") self._log_error(f"GET {url} failed: {exc}")
raise raise
self._log_url(response.url, kind="OK") try:
self._log_response_html(response.url, response.text) final_url = (response.url or url) if response is not None else url
return BeautifulSoup(response.text, "html.parser") body = (response.text or "") if response is not None else ""
self._log_url(final_url, kind="OK")
self._log_response_html(final_url, body)
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_detail_soup(self, title: str) -> Optional[BeautifulSoupT]: def _get_detail_soup(self, title: str) -> Optional[BeautifulSoupT]:
title = (title or "").strip() title = (title or "").strip()
@@ -814,7 +834,7 @@ class TopstreamfilmPlugin(BasisPlugin):
# Sonst: Serie via Streams-Accordion parsen (falls vorhanden). # Sonst: Serie via Streams-Accordion parsen (falls vorhanden).
self._parse_stream_accordion(soup, title=title) self._parse_stream_accordion(soup, title=title)
async def search_titles(self, query: str) -> List[str]: async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
"""Sucht Titel ueber eine HTML-Suche. """Sucht Titel ueber eine HTML-Suche.
Erwartetes HTML (Snippet): Erwartetes HTML (Snippet):
@@ -827,6 +847,7 @@ class TopstreamfilmPlugin(BasisPlugin):
query = (query or "").strip() query = (query or "").strip()
if not query: if not query:
return [] return []
_emit_progress(progress_callback, "Topstreamfilm Suche", 15)
session = self._get_session() session = self._get_session()
url = self._get_base_url() + "/" url = self._get_base_url() + "/"
@@ -834,6 +855,7 @@ class TopstreamfilmPlugin(BasisPlugin):
request_url = f"{url}?{urlencode(params)}" request_url = f"{url}?{urlencode(params)}"
self._log_url(request_url, kind="GET") self._log_url(request_url, kind="GET")
self._notify_url(request_url) self._notify_url(request_url)
response = None
try: try:
response = session.get( response = session.get(
url, url,
@@ -844,15 +866,28 @@ class TopstreamfilmPlugin(BasisPlugin):
except Exception as exc: except Exception as exc:
self._log_error(f"GET {request_url} failed: {exc}") self._log_error(f"GET {request_url} failed: {exc}")
raise raise
self._log_url(response.url, kind="OK") try:
self._log_response_html(response.url, response.text) final_url = (response.url or request_url) if response is not None else request_url
body = (response.text or "") if response is not None else ""
self._log_url(final_url, kind="OK")
self._log_response_html(final_url, body)
if BeautifulSoup is None: if BeautifulSoup is None:
return [] return []
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
hits: List[SearchHit] = [] hits: List[SearchHit] = []
for item in soup.select("li.TPostMv"): items = soup.select("li.TPostMv")
total_items = max(1, len(items))
for idx, item in enumerate(items, start=1):
if idx == 1 or idx % 20 == 0:
_emit_progress(progress_callback, f"Treffer pruefen {idx}/{total_items}", 55)
anchor = item.select_one("a[href]") anchor = item.select_one("a[href]")
if not anchor: if not anchor:
continue continue
@@ -885,6 +920,7 @@ class TopstreamfilmPlugin(BasisPlugin):
self._title_to_url[hit.title] = hit.url self._title_to_url[hit.title] = hit.url
titles.append(hit.title) titles.append(hit.title)
self._save_title_url_cache() self._save_title_url_cache()
_emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95)
return titles return titles
def genres(self) -> List[str]: def genres(self) -> List[str]:

View File

@@ -1,79 +1,85 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<settings> <settings>
<category label="Logging"> <category label="Debug und Logs">
<setting id="debug_log_urls" type="bool" label="URL-Logging aktivieren (global)" default="false" /> <setting id="debug_log_urls" type="bool" label="URLs mitschreiben (global)" default="false" />
<setting id="debug_dump_html" type="bool" label="HTML-Dumps aktivieren (global)" default="false" /> <setting id="debug_dump_html" type="bool" label="HTML speichern (global)" default="false" />
<setting id="debug_show_url_info" type="bool" label="URL-Info anzeigen (global)" default="false" /> <setting id="debug_show_url_info" type="bool" label="Aktuelle URL anzeigen (global)" default="false" />
<setting id="debug_log_errors" type="bool" label="Fehler-Logging aktivieren (global)" default="false" /> <setting id="debug_log_errors" type="bool" label="Fehler mitschreiben (global)" default="false" />
<setting id="log_max_mb" type="number" label="URL-Log: max. Datei-Größe (MB)" default="5" /> <setting id="log_max_mb" type="number" label="URL-Log: maximale Dateigroesse (MB)" default="5" />
<setting id="log_max_files" type="number" label="URL-Log: max. Rotationen" default="3" /> <setting id="log_max_files" type="number" label="URL-Log: Anzahl alter Dateien" default="3" />
<setting id="dump_max_files" type="number" label="HTML-Dumps: max. Dateien pro Plugin" default="200" /> <setting id="dump_max_files" type="number" label="HTML: maximale Dateien pro Plugin" default="200" />
<setting id="log_urls_serienstream" type="bool" label="Serienstream: URL-Logging" default="false" /> <setting id="log_urls_serienstream" type="bool" label="Serienstream: URLs mitschreiben" default="false" />
<setting id="dump_html_serienstream" type="bool" label="Serienstream: HTML-Dumps" default="false" /> <setting id="dump_html_serienstream" type="bool" label="Serienstream: HTML speichern" default="false" />
<setting id="show_url_info_serienstream" type="bool" label="Serienstream: URL-Info anzeigen" default="false" /> <setting id="show_url_info_serienstream" type="bool" label="Serienstream: Aktuelle URL anzeigen" default="false" />
<setting id="log_errors_serienstream" type="bool" label="Serienstream: Fehler loggen" default="false" /> <setting id="log_errors_serienstream" type="bool" label="Serienstream: Fehler mitschreiben" default="false" />
<setting id="log_urls_aniworld" type="bool" label="Aniworld: URL-Logging" default="false" /> <setting id="log_urls_aniworld" type="bool" label="Aniworld: URLs mitschreiben" default="false" />
<setting id="dump_html_aniworld" type="bool" label="Aniworld: HTML-Dumps" default="false" /> <setting id="dump_html_aniworld" type="bool" label="Aniworld: HTML speichern" default="false" />
<setting id="show_url_info_aniworld" type="bool" label="Aniworld: URL-Info anzeigen" default="false" /> <setting id="show_url_info_aniworld" type="bool" label="Aniworld: Aktuelle URL anzeigen" default="false" />
<setting id="log_errors_aniworld" type="bool" label="Aniworld: Fehler loggen" default="false" /> <setting id="log_errors_aniworld" type="bool" label="Aniworld: Fehler mitschreiben" default="false" />
<setting id="log_urls_topstreamfilm" type="bool" label="Topstreamfilm: URL-Logging" default="false" /> <setting id="log_urls_topstreamfilm" type="bool" label="Topstreamfilm: URLs mitschreiben" default="false" />
<setting id="dump_html_topstreamfilm" type="bool" label="Topstreamfilm: HTML-Dumps" default="false" /> <setting id="dump_html_topstreamfilm" type="bool" label="Topstreamfilm: HTML speichern" default="false" />
<setting id="show_url_info_topstreamfilm" type="bool" label="Topstreamfilm: URL-Info anzeigen" default="false" /> <setting id="show_url_info_topstreamfilm" type="bool" label="Topstreamfilm: Aktuelle URL anzeigen" default="false" />
<setting id="log_errors_topstreamfilm" type="bool" label="Topstreamfilm: Fehler loggen" default="false" /> <setting id="log_errors_topstreamfilm" type="bool" label="Topstreamfilm: Fehler mitschreiben" default="false" />
<setting id="log_urls_einschalten" type="bool" label="Einschalten: URL-Logging" default="false" /> <setting id="log_urls_einschalten" type="bool" label="Einschalten: URLs mitschreiben" default="false" />
<setting id="dump_html_einschalten" type="bool" label="Einschalten: HTML-Dumps" default="false" /> <setting id="dump_html_einschalten" type="bool" label="Einschalten: HTML speichern" default="false" />
<setting id="show_url_info_einschalten" type="bool" label="Einschalten: URL-Info anzeigen" default="false" /> <setting id="show_url_info_einschalten" type="bool" label="Einschalten: Aktuelle URL anzeigen" default="false" />
<setting id="log_errors_einschalten" type="bool" label="Einschalten: Fehler loggen" default="false" /> <setting id="log_errors_einschalten" type="bool" label="Einschalten: Fehler mitschreiben" default="false" />
<setting id="log_urls_filmpalast" type="bool" label="Filmpalast: URL-Logging" default="false" /> <setting id="log_urls_filmpalast" type="bool" label="Filmpalast: URLs mitschreiben" default="false" />
<setting id="dump_html_filmpalast" type="bool" label="Filmpalast: HTML-Dumps" default="false" /> <setting id="dump_html_filmpalast" type="bool" label="Filmpalast: HTML speichern" default="false" />
<setting id="show_url_info_filmpalast" type="bool" label="Filmpalast: URL-Info anzeigen" default="false" /> <setting id="show_url_info_filmpalast" type="bool" label="Filmpalast: Aktuelle URL anzeigen" default="false" />
<setting id="log_errors_filmpalast" type="bool" label="Filmpalast: Fehler loggen" default="false" /> <setting id="log_errors_filmpalast" type="bool" label="Filmpalast: Fehler mitschreiben" default="false" />
</category> </category>
<category label="TopStream"> <category label="TopStream">
<setting id="topstream_base_url" type="text" label="Domain (BASE_URL)" default="https://topstreamfilm.live" /> <setting id="topstream_base_url" type="text" label="Basis-URL" default="https://topstreamfilm.live" />
<setting id="topstream_genre_max_pages" type="number" label="Genres: max. Seiten laden (Pagination)" default="20" /> <setting id="topstreamfilm_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
<setting id="topstream_genre_max_pages" type="number" label="Genres: max. Seiten laden" default="20" />
</category> </category>
<category label="SerienStream"> <category label="SerienStream">
<setting id="serienstream_base_url" type="text" label="Domain (BASE_URL)" default="https://s.to" /> <setting id="serienstream_base_url" type="text" label="Basis-URL" default="https://s.to" />
<setting id="serienstream_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
</category> </category>
<category label="AniWorld"> <category label="AniWorld">
<setting id="aniworld_base_url" type="text" label="Domain (BASE_URL)" default="https://aniworld.to" /> <setting id="aniworld_base_url" type="text" label="Basis-URL" default="https://aniworld.to" />
<setting id="aniworld_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
</category> </category>
<category label="Einschalten"> <category label="Einschalten">
<setting id="einschalten_base_url" type="text" label="Domain (BASE_URL)" default="https://einschalten.in" /> <setting id="einschalten_base_url" type="text" label="Basis-URL" default="https://einschalten.in" />
<setting id="einschalten_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
</category> </category>
<category label="Filmpalast"> <category label="Filmpalast">
<setting id="filmpalast_base_url" type="text" label="Domain (BASE_URL)" default="https://filmpalast.to" /> <setting id="filmpalast_base_url" type="text" label="Basis-URL" default="https://filmpalast.to" />
<setting id="filmpalast_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
</category> </category>
<category label="Doku-Streams"> <category label="Doku-Streams">
<setting id="doku_streams_base_url" type="text" label="Domain (BASE_URL)" default="https://doku-streams.com" /> <setting id="doku_streams_base_url" type="text" label="Basis-URL" default="https://doku-streams.com" />
<setting id="doku_streams_metadata_source" type="enum" label="Metadatenquelle" default="0" values="Automatisch|Quelle|TMDB|Mischen" />
</category> </category>
<category label="TMDB"> <category label="TMDB">
<setting id="tmdb_enabled" type="bool" label="TMDB aktivieren" default="true" /> <setting id="tmdb_enabled" type="bool" label="TMDB aktivieren" default="true" />
<setting id="tmdb_api_key" type="text" label="TMDB API Key" default="" /> <setting id="tmdb_api_key" type="text" label="TMDB API Key" default="" />
<setting id="tmdb_language" type="text" label="TMDB Sprache (z.B. de-DE)" default="de-DE" /> <setting id="tmdb_language" type="text" label="TMDB Sprache (z. B. de-DE)" default="de-DE" />
<setting id="tmdb_prefetch_concurrency" type="number" label="TMDB: Parallelität (Prefetch, 1-20)" default="6" /> <setting id="tmdb_prefetch_concurrency" type="number" label="TMDB: gleichzeitige Anfragen (1-20)" default="6" />
<setting id="tmdb_show_plot" type="bool" label="TMDB Plot anzeigen" default="true" /> <setting id="tmdb_show_plot" type="bool" label="TMDB Beschreibung anzeigen" default="true" />
<setting id="tmdb_show_art" type="bool" label="TMDB Poster/Thumb anzeigen" default="true" /> <setting id="tmdb_show_art" type="bool" label="TMDB Poster und Vorschaubild anzeigen" default="true" />
<setting id="tmdb_show_fanart" type="bool" label="TMDB Fanart/Backdrop anzeigen" default="true" /> <setting id="tmdb_show_fanart" type="bool" label="TMDB Fanart/Backdrop anzeigen" default="true" />
<setting id="tmdb_show_rating" type="bool" label="TMDB Rating anzeigen" default="true" /> <setting id="tmdb_show_rating" type="bool" label="TMDB Bewertung anzeigen" default="true" />
<setting id="tmdb_show_votes" type="bool" label="TMDB Vote-Count anzeigen" default="false" /> <setting id="tmdb_show_votes" type="bool" label="TMDB Stimmen anzeigen" default="false" />
<setting id="tmdb_show_cast" type="bool" label="TMDB Cast anzeigen" default="false" /> <setting id="tmdb_show_cast" type="bool" label="TMDB Besetzung anzeigen" default="false" />
<setting id="tmdb_show_episode_cast" type="bool" label="TMDB Besetzung pro Episode anzeigen" default="false" /> <setting id="tmdb_show_episode_cast" type="bool" label="TMDB Besetzung pro Episode anzeigen" default="false" />
<setting id="tmdb_genre_metadata" type="bool" label="TMDB Meta in Genre-Liste anzeigen" default="false" /> <setting id="tmdb_genre_metadata" type="bool" label="TMDB Daten in Genre-Listen anzeigen" default="false" />
<setting id="tmdb_log_requests" type="bool" label="TMDB API Requests loggen" default="false" /> <setting id="tmdb_log_requests" type="bool" label="TMDB API-Anfragen loggen" default="false" />
<setting id="tmdb_log_responses" type="bool" label="TMDB API Antworten loggen" default="false" /> <setting id="tmdb_log_responses" type="bool" label="TMDB API-Antworten loggen" default="false" />
</category> </category>
<category label="Update"> <category label="Update">
<setting id="update_repo_url" type="text" label="Update-URL (addons.xml)" default="http://127.0.0.1:8080/repo/addons.xml" /> <setting id="update_repo_url" type="text" label="Update-URL (addons.xml)" default="http://127.0.0.1:8080/repo/addons.xml" />
<setting id="run_update_check" type="action" label="Jetzt auf Updates pruefen" action="RunPlugin(plugin://plugin.video.viewit/?action=check_updates)" option="close" /> <setting id="run_update_check" type="action" label="Jetzt nach Updates suchen" action="RunPlugin(plugin://plugin.video.viewit/?action=check_updates)" option="close" />
<setting id="update_info" type="text" label="Kodi-Repository-Updates werden ueber den Kodi-Update-Mechanismus verarbeitet." default="" enable="false" /> <setting id="update_info" type="text" label="Updates laufen ueber den normalen Kodi-Update-Mechanismus." default="" enable="false" />
<setting id="update_version_addon" type="text" label="ViewIT Addon Version" default="-" enable="false" /> <setting id="update_version_addon" type="text" label="ViewIT Version" default="-" enable="false" />
<setting id="update_version_serienstream" type="text" label="Serienstream Plugin Version" default="-" enable="false" /> <setting id="update_version_serienstream" type="text" label="Serienstream Version" default="-" enable="false" />
<setting id="update_version_aniworld" type="text" label="Aniworld Plugin Version" default="-" enable="false" /> <setting id="update_version_aniworld" type="text" label="Aniworld Version" default="-" enable="false" />
<setting id="update_version_einschalten" type="text" label="Einschalten Plugin Version" default="-" enable="false" /> <setting id="update_version_einschalten" type="text" label="Einschalten Version" default="-" enable="false" />
<setting id="update_version_topstreamfilm" type="text" label="Topstreamfilm Plugin Version" default="-" enable="false" /> <setting id="update_version_topstreamfilm" type="text" label="Topstreamfilm Version" default="-" enable="false" />
<setting id="update_version_filmpalast" type="text" label="Filmpalast Plugin Version" default="-" enable="false" /> <setting id="update_version_filmpalast" type="text" label="Filmpalast Version" default="-" enable="false" />
<setting id="update_version_doku_streams" type="text" label="Doku-Streams Plugin Version" default="-" enable="false" /> <setting id="update_version_doku_streams" type="text" label="Doku-Streams Version" default="-" enable="false" />
</category> </category>
</settings> </settings>

View File

@@ -14,6 +14,7 @@ except ImportError: # pragma: no cover
TMDB_API_BASE = "https://api.themoviedb.org/3" TMDB_API_BASE = "https://api.themoviedb.org/3"
TMDB_IMAGE_BASE = "https://image.tmdb.org/t/p" TMDB_IMAGE_BASE = "https://image.tmdb.org/t/p"
MAX_CAST_MEMBERS = 30
_TMDB_THREAD_LOCAL = threading.local() _TMDB_THREAD_LOCAL = threading.local()
@@ -73,53 +74,17 @@ def _fetch_credits(
return [] return []
params = {"api_key": api_key, "language": (language or "de-DE").strip()} params = {"api_key": api_key, "language": (language or "de-DE").strip()}
url = f"{TMDB_API_BASE}/{kind}/{tmdb_id}/credits?{urlencode(params)}" url = f"{TMDB_API_BASE}/{kind}/{tmdb_id}/credits?{urlencode(params)}"
if callable(log): status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses)
log(f"TMDB GET {url}")
try:
response = requests.get(url, timeout=timeout)
except Exception as exc: # pragma: no cover
if callable(log):
log(f"TMDB ERROR /{kind}/{{id}}/credits request_failed error={exc!r}")
return []
status = getattr(response, "status_code", None)
if callable(log): if callable(log):
log(f"TMDB RESPONSE /{kind}/{{id}}/credits status={status}") log(f"TMDB RESPONSE /{kind}/{{id}}/credits status={status}")
if status != 200: if log_responses and payload is None and body_text:
log(f"TMDB RESPONSE_BODY /{kind}/{{id}}/credits body={body_text[:2000]}")
if status != 200 or not isinstance(payload, dict):
return [] return []
try:
payload = response.json() or {}
except Exception:
return []
if callable(log) and log_responses:
try:
dumped = json.dumps(payload, ensure_ascii=False)
except Exception:
dumped = str(payload)
log(f"TMDB RESPONSE_BODY /{kind}/{{id}}/credits body={dumped[:2000]}")
cast_payload = payload.get("cast") or [] cast_payload = payload.get("cast") or []
if callable(log): if callable(log):
log(f"TMDB CREDITS /{kind}/{{id}}/credits cast={len(cast_payload)}") log(f"TMDB CREDITS /{kind}/{{id}}/credits cast={len(cast_payload)}")
with_images: List[TmdbCastMember] = [] return _parse_cast_payload(cast_payload)
without_images: List[TmdbCastMember] = []
for entry in cast_payload:
name = (entry.get("name") or "").strip()
role = (entry.get("character") or "").strip()
thumb = _image_url(entry.get("profile_path") or "", size="w185")
if not name:
continue
member = TmdbCastMember(name=name, role=role, thumb=thumb)
if thumb:
with_images.append(member)
else:
without_images.append(member)
# Viele Kodi-Skins zeigen bei fehlendem Thumbnail Platzhalter-Köpfe.
# Bevorzugt daher Cast-Einträge mit Bild; nur wenn gar keine Bilder existieren,
# geben wir Namen ohne Bild zurück.
if with_images:
return with_images[:30]
return without_images[:30]
def _parse_cast_payload(cast_payload: object) -> List[TmdbCastMember]: def _parse_cast_payload(cast_payload: object) -> List[TmdbCastMember]:
@@ -141,8 +106,8 @@ def _parse_cast_payload(cast_payload: object) -> List[TmdbCastMember]:
else: else:
without_images.append(member) without_images.append(member)
if with_images: if with_images:
return with_images[:30] return with_images[:MAX_CAST_MEMBERS]
return without_images[:30] return without_images[:MAX_CAST_MEMBERS]
def _tmdb_get_json( def _tmdb_get_json(
@@ -163,23 +128,29 @@ def _tmdb_get_json(
if callable(log): if callable(log):
log(f"TMDB GET {url}") log(f"TMDB GET {url}")
sess = session or _get_tmdb_session() or requests.Session() sess = session or _get_tmdb_session() or requests.Session()
response = None
try: try:
response = sess.get(url, timeout=timeout) response = sess.get(url, timeout=timeout)
status = getattr(response, "status_code", None)
payload: object | None = None
body_text = ""
try:
payload = response.json()
except Exception:
try:
body_text = (response.text or "").strip()
except Exception:
body_text = ""
except Exception as exc: # pragma: no cover except Exception as exc: # pragma: no cover
if callable(log): if callable(log):
log(f"TMDB ERROR request_failed url={url} error={exc!r}") log(f"TMDB ERROR request_failed url={url} error={exc!r}")
return None, None, "" return None, None, ""
finally:
status = getattr(response, "status_code", None) if response is not None:
payload: object | None = None try:
body_text = "" response.close()
try: except Exception:
payload = response.json() pass
except Exception:
try:
body_text = (response.text or "").strip()
except Exception:
body_text = ""
if callable(log): if callable(log):
log(f"TMDB RESPONSE status={status} url={url}") log(f"TMDB RESPONSE status={status} url={url}")
@@ -214,49 +185,17 @@ def fetch_tv_episode_credits(
return [] return []
params = {"api_key": api_key, "language": (language or "de-DE").strip()} params = {"api_key": api_key, "language": (language or "de-DE").strip()}
url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}/episode/{episode_number}/credits?{urlencode(params)}" url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}/episode/{episode_number}/credits?{urlencode(params)}"
if callable(log): status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses)
log(f"TMDB GET {url}")
try:
response = requests.get(url, timeout=timeout)
except Exception as exc: # pragma: no cover
if callable(log):
log(f"TMDB ERROR /tv/{{id}}/season/{{n}}/episode/{{e}}/credits request_failed error={exc!r}")
return []
status = getattr(response, "status_code", None)
if callable(log): if callable(log):
log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}}/episode/{{e}}/credits status={status}") log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}}/episode/{{e}}/credits status={status}")
if status != 200: if log_responses and payload is None and body_text:
log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}}/episode/{{e}}/credits body={body_text[:2000]}")
if status != 200 or not isinstance(payload, dict):
return [] return []
try:
payload = response.json() or {}
except Exception:
return []
if callable(log) and log_responses:
try:
dumped = json.dumps(payload, ensure_ascii=False)
except Exception:
dumped = str(payload)
log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}}/episode/{{e}}/credits body={dumped[:2000]}")
cast_payload = payload.get("cast") or [] cast_payload = payload.get("cast") or []
if callable(log): if callable(log):
log(f"TMDB CREDITS /tv/{{id}}/season/{{n}}/episode/{{e}}/credits cast={len(cast_payload)}") log(f"TMDB CREDITS /tv/{{id}}/season/{{n}}/episode/{{e}}/credits cast={len(cast_payload)}")
with_images: List[TmdbCastMember] = [] return _parse_cast_payload(cast_payload)
without_images: List[TmdbCastMember] = []
for entry in cast_payload:
name = (entry.get("name") or "").strip()
role = (entry.get("character") or "").strip()
thumb = _image_url(entry.get("profile_path") or "", size="w185")
if not name:
continue
member = TmdbCastMember(name=name, role=role, thumb=thumb)
if thumb:
with_images.append(member)
else:
without_images.append(member)
if with_images:
return with_images[:30]
return without_images[:30]
def lookup_tv_show( def lookup_tv_show(
@@ -546,27 +485,13 @@ def lookup_tv_season_summary(
params = {"api_key": api_key, "language": (language or "de-DE").strip()} params = {"api_key": api_key, "language": (language or "de-DE").strip()}
url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}"
if callable(log): status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses)
log(f"TMDB GET {url}")
try:
response = requests.get(url, timeout=timeout)
except Exception:
return None
status = getattr(response, "status_code", None)
if callable(log): if callable(log):
log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status}") log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status}")
if status != 200: if log_responses and payload is None and body_text:
log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={body_text[:2000]}")
if status != 200 or not isinstance(payload, dict):
return None return None
try:
payload = response.json() or {}
except Exception:
return None
if callable(log) and log_responses:
try:
dumped = json.dumps(payload, ensure_ascii=False)
except Exception:
dumped = str(payload)
log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={dumped[:2000]}")
plot = (payload.get("overview") or "").strip() plot = (payload.get("overview") or "").strip()
poster_path = (payload.get("poster_path") or "").strip() poster_path = (payload.get("poster_path") or "").strip()
@@ -594,27 +519,9 @@ def lookup_tv_season(
return None return None
params = {"api_key": api_key, "language": (language or "de-DE").strip()} params = {"api_key": api_key, "language": (language or "de-DE").strip()}
url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}"
if callable(log): status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses)
log(f"TMDB GET {url}") episodes = (payload or {}).get("episodes") if isinstance(payload, dict) else []
try: episodes = episodes or []
response = requests.get(url, timeout=timeout)
except Exception as exc: # pragma: no cover
if callable(log):
log(f"TMDB ERROR /tv/{{id}}/season/{{n}} request_failed error={exc!r}")
return None
status = getattr(response, "status_code", None)
payload = None
body_text = ""
try:
payload = response.json() or {}
except Exception:
try:
body_text = (response.text or "").strip()
except Exception:
body_text = ""
episodes = (payload or {}).get("episodes") or []
if callable(log): if callable(log):
log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status} episodes={len(episodes)}") log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status} episodes={len(episodes)}")
if log_responses: if log_responses:

View File

@@ -1,55 +1,49 @@
# ViewIT Hauptlogik (`addon/default.py`) # ViewIT Hauptlogik (`addon/default.py`)
Dieses Dokument beschreibt den Einstiegspunkt des Addons und die zentrale Steuerlogik. Diese Datei ist der Router des Addons.
Sie verbindet Kodi UI, Plugin Calls und Playback.
## Aufgabe der Datei ## Kernaufgabe
`addon/default.py` ist der Router des Addons. Er: - Plugins laden
- lädt die PluginModule dynamisch, - Menues bauen
- stellt die KodiNavigation bereit, - Aktionen auf Plugin Methoden mappen
- übersetzt UIAktionen in PluginAufrufe, - Playback starten
- startet die Wiedergabe und verwaltet Playstate/Resume. - Playstate speichern
## Ablauf (high level) ## Ablauf
1. **PluginDiscovery**: Lädt alle `addon/plugins/*.py` (ohne `_`Prefix). Bevorzugt `Plugin = <Klasse>`, sonst werden `BasisPlugin`Subklassen deterministisch instanziiert. 1. Plugin Discovery fuer `addon/plugins/*.py` ohne `_` Prefix.
2. **Navigation**: Baut KodiListen (Serien/Staffeln/Episoden) auf Basis der PluginAntworten. 2. Navigation fuer Titel, Staffeln und Episoden.
3. **Playback**: Holt StreamLinks aus dem Plugin und startet die Wiedergabe. 3. Playback: Link holen, optional aufloesen, abspielen.
4. **Playstate**: Speichert ResumeDaten lokal (`playstate.json`) und setzt `playcount`/ResumeInfos. 4. Playstate: watched und resume in `playstate.json` schreiben.
## Routing & Aktionen ## Routing
Die Datei arbeitet mit URLParametern (KodiPluginStandard). Typische Aktionen: Der Router liest Query Parameter aus `sys.argv[2]`.
- `search` → Suche über ein Plugin Typische Aktionen:
- `seasons` → Staffeln für einen Titel - `search`
- `episodes` → Episoden für eine Staffel - `seasons`
- `play` → StreamLink auflösen und abspielen - `episodes`
- `play_episode`
- `play_movie`
- `play_episode_url`
Die genaue Aktion wird aus den QueryParametern gelesen und an das entsprechende Plugin delegiert. ## Playstate
- Speicherort: Addon Profilordner, Datei `playstate.json`
- Key: Plugin + Titel + Staffel + Episode
- Werte: watched, playcount, resume_position, resume_total
## Playstate (Resume/Watched) ## Wichtige Helper
- **Speicherort**: `playstate.json` im AddonProfilordner. - Plugin Loader und Discovery
- **Key**: Kombination aus PluginName, Titel, Staffel, Episode. - UI Builder fuer ListItems
- **Verwendung**: - Playstate Load/Save/Merge
- `playcount` wird gesetzt, wenn „gesehen“ markiert ist. - TMDB Merge mit Source Fallback
- `resume_position`/`resume_total` werden gesetzt, wenn vorhanden.
## Wichtige Hilfsfunktionen ## Fehlerverhalten
- **PluginLoader**: findet & instanziiert Plugins. - Importfehler pro Plugin werden isoliert behandelt.
- **UIHelper**: setzt ContentType, baut Verzeichnisseinträge. - Fehler in einem Plugin sollen das Addon nicht stoppen.
- **PlaystateHelper**: `_load_playstate`, `_save_playstate`, `_apply_playstate_to_info`. - User bekommt kurze Fehlermeldungen in Kodi.
- **MetadataMerge**: PluginMetadaten können TMDB übersteuern, TMDB dient als Fallback.
## Fehlerbehandlung ## Erweiterung
- PluginImportfehler werden isoliert behandelt, damit das Addon nicht komplett ausfällt. Fuer neue Aktion im Router:
- NetzwerkFehler werden in Plugins abgefangen, `default.py` sollte nur saubere Fehlermeldungen weitergeben. 1. Action im `run()` Handler registrieren.
2. ListItem mit passenden Parametern bauen.
## Debugging 3. Zielmethode im Plugin bereitstellen.
- Globale DebugSettings werden über `addon/resources/settings.xml` gesteuert.
- Plugins loggen URLs/HTML optional (siehe jeweilige PluginDoku).
## Änderungen & Erweiterungen
Für neue Aktionen:
1. Neue Aktion im Router registrieren.
2. UIEinträge passend anlegen.
3. Entsprechende PluginMethode definieren oder erweitern.
## Hinweis zur Erstellung
Teile dieser Dokumentation wurden KIgestützt erstellt und bei Bedarf manuell angepasst.

View File

@@ -1,118 +1,85 @@
# ViewIT Entwicklerdoku Plugins (`addon/plugins/*_plugin.py`) # ViewIT Plugin Entwicklung (`addon/plugins/*_plugin.py`)
Diese Doku beschreibt, wie Plugins im ViewITAddon aufgebaut sind und wie neue ProviderIntegrationen entwickelt werden. Diese Datei zeigt, wie Plugins im Projekt aufgebaut sind und wie sie mit dem Router zusammenarbeiten.
## Grundlagen ## Grundlagen
- Jedes Plugin ist eine einzelne Datei unter `addon/plugins/`. - Ein Plugin ist eine Python Datei in `addon/plugins/`.
- Dateinamen **ohne** `_`Prefix werden automatisch geladen. - Dateien mit `_` Prefix werden nicht geladen.
- Jede Datei enthält eine Klasse, die von `BasisPlugin` erbt. - Plugin Klasse erbt von `BasisPlugin`.
- Optional: `Plugin = <Klasse>` als expliziter Einstiegspunkt (bevorzugt vom Loader). - Optional: `Plugin = <Klasse>` als klarer Einstiegspunkt.
## PflichtMethoden (BasisPlugin) ## Pflichtmethoden
Jedes Plugin muss diese Methoden implementieren: Jedes Plugin implementiert:
- `async search_titles(query: str) -> list[str]` - `async search_titles(query: str) -> list[str]`
- `seasons_for(title: str) -> list[str]` - `seasons_for(title: str) -> list[str]`
- `episodes_for(title: str, season: str) -> list[str]` - `episodes_for(title: str, season: str) -> list[str]`
## Vertrag Plugin ↔ Hauptlogik (`default.py`) ## Wichtige optionale Methoden
Die Hauptlogik ruft Plugin-Methoden auf und verarbeitet ausschließlich deren Rückgaben. - `stream_link_for(...)`
- `resolve_stream_link(...)`
- `metadata_for(...)`
- `available_hosters_for(...)`
- `series_url_for_title(...)`
- `remember_series_url(...)`
- `episode_url_for(...)`
- `available_hosters_for_url(...)`
- `stream_link_for_url(...)`
Wesentliche Rückgaben an die Hauptlogik: ## Film Provider Standard
- `search_titles(...)` → Liste von Titel-Strings für die Trefferliste Wenn keine echten Staffeln existieren:
- `seasons_for(...)` → Liste von Staffel-Labels - `seasons_for(title)` gibt `['Film']`
- `episodes_for(...)` → Liste von Episoden-Labels - `episodes_for(title, 'Film')` gibt `['Stream']`
- `stream_link_for(...)` → Hoster-/Player-Link (nicht zwingend finale Media-URL)
- `resolve_stream_link(...)` → finale/spielbare URL nach Redirect/Resolver
- `metadata_for(...)` → Info-Labels/Art (Plot/Poster) aus der Quelle
- Optional `available_hosters_for(...)` → auswählbare Hoster-Namen im Dialog
- Optional `series_url_for_title(...)` → stabile Detail-URL pro Titel für Folgeaufrufe
- Optional `remember_series_url(...)` → Übernahme einer bereits bekannten Detail-URL
Standard für Film-Provider (ohne echte Staffeln): ## Capabilities
- `seasons_for(title)` gibt `["Film"]` zurück Ein Plugin kann Features melden ueber `capabilities()`.
- `episodes_for(title, "Film")` gibt `["Stream"]` zurück Bekannte Werte:
- `popular_series`
- `genres`
- `latest_episodes`
- `new_titles`
- `alpha`
- `series_catalog`
## Optionale Features (Capabilities) ## Suche
Über `capabilities()` kann das Plugin zusätzliche Funktionen anbieten: Aktuelle Regeln fuer Suchtreffer:
- `popular_series``popular_series()` - Match auf Titel
- `genres``genres()` + `titles_for_genre(genre)` - Wortbasiert
- `latest_episodes``latest_episodes(page=1)` - Keine Teilwort Treffer im selben Wort
- `new_titles``new_titles_page(page=1)` - Beschreibungen nicht fuer Match nutzen
- `alpha``alpha_index()` + `titles_for_alpha_page(letter, page)`
- `series_catalog``series_catalog_page(page=1)`
## Empfohlene Struktur ## Settings
- Konstanten für URLs/Endpoints (BASE_URL, Pfade, Templates) Pro Plugin meist `*_base_url`.
- `requests` + `bs4` optional (fehlt beides, Plugin sollte sauber deaktivieren) Beispiele:
- HelperFunktionen für Parsing und Normalisierung - `serienstream_base_url`
- Caches für Such, Staffel und EpisodenDaten - `aniworld_base_url`
- `einschalten_base_url`
- `topstream_base_url`
- `filmpalast_base_url`
- `doku_streams_base_url`
## Suche (aktuelle Policy) ## Playback Flow
- **Nur TitelMatches** 1. Episode oder Film auswaehlen.
- **Wortbasierter Match** nach Normalisierung (Lowercase + NichtAlnum → Leerzeichen) 2. Optional Hosterliste anzeigen.
- Keine Teilwort-Treffer innerhalb eines Wortes (Beispiel: `hund` matcht nicht `thunder`) 3. `stream_link_for` oder `stream_link_for_url` aufrufen.
- Keine Beschreibung/Plot/Meta für Matches 4. `resolve_stream_link` aufrufen.
5. Finale URL an Kodi geben.
## Namensgebung ## Logging
- PluginKlassenname: `XxxPlugin` Nutze Helper aus `addon/plugin_helpers.py`:
- Anzeigename (Property `name`): **mit Großbuchstaben beginnen** (z.B. `Serienstream`, `Einschalten`)
## Settings pro Plugin
Standard: `*_base_url` (Domain / BASE_URL)
- Beispiele:
- `serienstream_base_url`
- `aniworld_base_url`
- `einschalten_base_url`
- `topstream_base_url`
- `filmpalast_base_url`
- `doku_streams_base_url`
## Playback
- `stream_link_for(...)` implementieren (liefert bevorzugten Hoster-Link).
- `available_hosters_for(...)` bereitstellen, wenn die Seite mehrere Hoster anbietet.
- `resolve_stream_link(...)` nach einheitlichem Flow umsetzen:
1. Redirects auflösen (falls vorhanden)
2. ResolveURL (`resolveurl_backend.resolve`) versuchen
3. Bei Fehlschlag auf den besten verfügbaren Link zurückfallen
- Optional `set_preferred_hosters(...)` unterstützen, damit die Hoster-Auswahl aus der Hauptlogik direkt greift.
## StandardFlow (empfohlen)
1. **Suche**: nur Titel liefern und Titel→Detail-URL mappen.
2. **Navigation**: `series_url_for_title`/`remember_series_url` unterstützen, damit URLs zwischen Aufrufen stabil bleiben.
3. **Auswahl Hoster**: Hoster-Namen aus der Detailseite extrahieren und anbieten.
4. **Playback**: Hoster-Link liefern, danach konsistent über `resolve_stream_link` finalisieren.
5. **Metadaten**: `metadata_for` nutzen, Plot/Poster aus der Quelle zurückgeben.
6. **Fallbacks**: bei Layout-Unterschieden defensiv parsen und Logging aktivierbar halten.
## Debugging
Global gesteuert über Settings:
- `debug_log_urls`
- `debug_dump_html`
- `debug_show_url_info`
Plugins sollten die Helper aus `addon/plugin_helpers.py` nutzen:
- `log_url(...)` - `log_url(...)`
- `dump_response_html(...)` - `dump_response_html(...)`
- `notify_url(...)` - `notify_url(...)`
## Template ## Build und Checks
`addon/plugins/_template_plugin.py` dient als Startpunkt für neue Provider. - ZIP: `./scripts/build_kodi_zip.sh`
- Addon Ordner: `./scripts/build_install_addon.sh`
- Manifest: `python3 scripts/generate_plugin_manifest.py`
- Snapshot Checks: `python3 qa/run_plugin_snapshots.py`
## Build & Test ## Kurze Checkliste
- ZIP bauen: `./scripts/build_kodi_zip.sh` - `name` gesetzt und korrekt
- AddonOrdner: `./scripts/build_install_addon.sh` - `*_base_url` in Settings vorhanden
- PluginManifest aktualisieren: `python3 scripts/generate_plugin_manifest.py` - Suche liefert nur passende Titel
- Live-Snapshot-Checks: `python3 qa/run_plugin_snapshots.py` (aktualisieren mit `--update`) - Playback Methoden vorhanden
- Fehler und Timeouts behandelt
## BeispielCheckliste - Cache nur da, wo er Zeit spart
- [ ] `name` korrekt gesetzt
- [ ] `*_base_url` in Settings vorhanden
- [ ] Suche matcht nur Titel und wortbasiert
- [ ] `stream_link_for` + `resolve_stream_link` folgen dem Standard-Flow
- [ ] Optional: `available_hosters_for` + `set_preferred_hosters` vorhanden
- [ ] Optional: `series_url_for_title` + `remember_series_url` vorhanden
- [ ] Fehlerbehandlung und Timeouts vorhanden
- [ ] Optional: Caches für Performance
## Hinweis zur Erstellung
Teile dieser Dokumentation wurden KIgestützt erstellt und bei Bedarf manuell angepasst.

View File

@@ -1,115 +1,71 @@
## ViewIt Plugin-System # ViewIT Plugin System
Dieses Dokument beschreibt, wie das Plugin-System von **ViewIt** funktioniert und wie die Community neue Integrationen hinzufügen kann. Dieses Dokument beschreibt Laden, Vertrag und Betrieb der Plugins.
### Überblick ## Ueberblick
Der Router laedt Provider Integrationen aus `addon/plugins/*.py`.
Aktive Plugins werden instanziiert und im UI genutzt.
ViewIt lädt Provider-Integrationen dynamisch aus `addon/plugins/*.py`. Jede Datei enthält eine Klasse, die von `BasisPlugin` erbt. Beim Start werden alle Plugins instanziiert und nur aktiv genutzt, wenn sie verfügbar sind. Relevante Dateien:
- `addon/default.py`
- `addon/plugin_interface.py`
- `docs/DEFAULT_ROUTER.md`
- `docs/PLUGIN_DEVELOPMENT.md`
Weitere Details: ## Aktuelle Plugins
- `docs/DEFAULT_ROUTER.md` (Hauptlogik in `addon/default.py`) - `serienstream_plugin.py`
- `docs/PLUGIN_DEVELOPMENT.md` (Entwicklerdoku für Plugins) - `topstreamfilm_plugin.py`
- `docs/PLUGIN_MANIFEST.json` (zentraler Überblick über Plugins, Versionen, Capabilities) - `einschalten_plugin.py`
- `aniworld_plugin.py`
- `filmpalast_plugin.py`
- `dokustreams_plugin.py`
- `_template_plugin.py` (Vorlage)
### Aktuelle Plugins ## Discovery Ablauf
In `addon/default.py`:
1. Finde `*.py` in `addon/plugins/`
2. Ueberspringe Dateien mit `_` Prefix
3. Importiere Modul
4. Nutze `Plugin = <Klasse>`, falls vorhanden
5. Sonst instanziiere `BasisPlugin` Subklassen deterministisch
6. Ueberspringe Plugins mit `is_available = False`
- `serienstream_plugin.py` Serienstream (s.to) ## Basis Interface
- `topstreamfilm_plugin.py` Topstreamfilm `BasisPlugin` definiert den Kern:
- `einschalten_plugin.py` Einschalten - `search_titles`
- `aniworld_plugin.py` Aniworld - `seasons_for`
- `filmpalast_plugin.py` Filmpalast - `episodes_for`
- `dokustreams_plugin.py` Doku-Streams
- `_template_plugin.py` Vorlage für neue Plugins
### Plugin-Discovery (Ladeprozess) Weitere Methoden sind optional und werden nur genutzt, wenn vorhanden.
Der Loader in `addon/default.py`: ## Capabilities
Plugins koennen Features aktiv melden.
Typische Werte:
- `popular_series`
- `genres`
- `latest_episodes`
- `new_titles`
- `alpha`
- `series_catalog`
1. Sucht alle `*.py` in `addon/plugins/` Das UI zeigt nur Menues fuer aktiv gemeldete Features.
2. Überspringt Dateien, die mit `_` beginnen
3. Lädt Module dynamisch
4. Nutzt `Plugin = <Klasse>` als bevorzugten Einstiegspunkt (falls vorhanden)
5. Fallback: instanziert Klassen, die von `BasisPlugin` erben (deterministisch sortiert)
6. Ignoriert Plugins mit `is_available = False`
Damit bleiben fehlerhafte Plugins isoliert und blockieren nicht das gesamte Add-on. ## Metadaten Quelle
`prefer_source_metadata = True` bedeutet:
- Quelle zuerst
- TMDB nur Fallback
### Plugin-Manifest (Audit & Repro) ## Stabilitaet
`docs/PLUGIN_MANIFEST.json` listet alle Plugins mit Version, Capabilities und Basis-Settings. - Keine Netz Calls im Import Block.
Erzeugung: `python3 scripts/generate_plugin_manifest.py` - Fehler im Plugin muessen lokal behandelt werden.
- Ein defektes Plugin darf andere Plugins nicht blockieren.
### BasisPlugin verpflichtende Methoden ## Build
Kodi ZIP bauen:
Definiert in `addon/plugin_interface.py`: ```bash
- `async search_titles(query: str) -> list[str]`
- `seasons_for(title: str) -> list[str]`
- `episodes_for(title: str, season: str) -> list[str]`
- optional `metadata_for(title: str) -> (info_labels, art, cast)`
### Optionale Features (Capabilities)
Plugins können zusätzliche Features anbieten:
- `capabilities() -> set[str]`
- `popular_series`: liefert beliebte Serien
- `genres`: Genre-Liste verfügbar
- `latest_episodes`: neue Episoden verfügbar
- `new_titles`: neue Titel verfügbar
- `alpha`: A-Z Index verfügbar
- `series_catalog`: Serienkatalog verfügbar
- `popular_series() -> list[str]`
- `genres() -> list[str]`
- `titles_for_genre(genre: str) -> list[str]`
- `latest_episodes(page: int = 1) -> list[LatestEpisode]` (wenn angeboten)
- `new_titles_page(page: int = 1) -> list[str]` (wenn angeboten)
- `alpha_index() -> list[str]` (wenn angeboten)
- `series_catalog_page(page: int = 1) -> list[str]` (wenn angeboten)
Metadaten:
- `prefer_source_metadata = True` bedeutet: Plugin-Metadaten gehen vor TMDB, TMDB dient nur als Fallback.
ViewIt zeigt im UI nur die Features an, die ein Plugin tatsächlich liefert.
### Plugin-Struktur (empfohlen)
Eine Integration sollte typischerweise bieten:
- Konstante `BASE_URL`
- `search_titles()` mit Provider-Suche
- `seasons_for()` und `episodes_for()` mit HTML-Parsing
- `stream_link_for()` optional für direkte Playback-Links
- `metadata_for()` optional für Plot/Poster aus der Quelle
- Optional: `available_hosters_for()` oder Provider-spezifische Helfer
Als Startpunkt dient `addon/plugins/_template_plugin.py`.
### Community-Erweiterungen (Workflow)
1. Fork/Branch erstellen
2. Neue Datei unter `addon/plugins/` hinzufügen (z.B. `meinprovider_plugin.py`)
3. Klasse erstellen, die `BasisPlugin` implementiert
4. In Kodi testen (ZIP bauen, installieren)
5. PR öffnen
### Qualitätsrichtlinien
- Keine Netzwerkzugriffe im Import-Top-Level
- Netzwerkzugriffe nur in Methoden (z.B. `search_titles`)
- Fehler sauber abfangen und verständliche Fehlermeldungen liefern
- Kein globaler Zustand, der über Instanzen hinweg überrascht
- Provider-spezifische Parser in Helper-Funktionen kapseln
- Reproduzierbare Reihenfolge: `Plugin`-Alias nutzen oder Klassenname eindeutig halten
### Debugging & Logs
Hilfreiche Logs werden nach `userdata/addon_data/plugin.video.viewit/logs/` geschrieben.
Provider sollten URL-Logging optional halten (Settings).
### ZIP-Build
```
./scripts/build_kodi_zip.sh ./scripts/build_kodi_zip.sh
``` ```
Das ZIP liegt anschließend unter `dist/plugin.video.viewit-<version>.zip`. Ergebnis:
`dist/plugin.video.viewit-<version>.zip`

View File

@@ -21,8 +21,20 @@ fi
mkdir -p "${REPO_DIR}" mkdir -p "${REPO_DIR}"
read -r ADDON_ID ADDON_VERSION < <(python3 - "${PLUGIN_ADDON_XML}" <<'PY'
import sys
import xml.etree.ElementTree as ET
root = ET.parse(sys.argv[1]).getroot()
print(root.attrib.get("id", "plugin.video.viewit"), root.attrib.get("version", "0.0.0"))
PY
)
PLUGIN_ZIP="$("${ROOT_DIR}/scripts/build_kodi_zip.sh")" PLUGIN_ZIP="$("${ROOT_DIR}/scripts/build_kodi_zip.sh")"
cp -f "${PLUGIN_ZIP}" "${REPO_DIR}/" PLUGIN_ZIP_NAME="$(basename "${PLUGIN_ZIP}")"
PLUGIN_ADDON_DIR_IN_REPO="${REPO_DIR}/${ADDON_ID}"
mkdir -p "${PLUGIN_ADDON_DIR_IN_REPO}"
cp -f "${PLUGIN_ZIP}" "${PLUGIN_ADDON_DIR_IN_REPO}/${PLUGIN_ZIP_NAME}"
read -r REPO_ADDON_ID REPO_ADDON_VERSION < <(python3 - "${REPO_ADDON_XML}" <<'PY' read -r REPO_ADDON_ID REPO_ADDON_VERSION < <(python3 - "${REPO_ADDON_XML}" <<'PY'
import sys import sys
@@ -74,6 +86,9 @@ REPO_ZIP_NAME="${REPO_ADDON_ID}-${REPO_ADDON_VERSION}.zip"
REPO_ZIP_PATH="${REPO_DIR}/${REPO_ZIP_NAME}" REPO_ZIP_PATH="${REPO_DIR}/${REPO_ZIP_NAME}"
rm -f "${REPO_ZIP_PATH}" rm -f "${REPO_ZIP_PATH}"
python3 "${ROOT_DIR}/scripts/zip_deterministic.py" "${REPO_ZIP_PATH}" "${TMP_REPO_ADDON_DIR}" >/dev/null python3 "${ROOT_DIR}/scripts/zip_deterministic.py" "${REPO_ZIP_PATH}" "${TMP_REPO_ADDON_DIR}" >/dev/null
REPO_ADDON_DIR_IN_REPO="${REPO_DIR}/${REPO_ADDON_ID}"
mkdir -p "${REPO_ADDON_DIR_IN_REPO}"
cp -f "${REPO_ZIP_PATH}" "${REPO_ADDON_DIR_IN_REPO}/${REPO_ZIP_NAME}"
python3 - "${PLUGIN_ADDON_XML}" "${TMP_REPO_ADDON_DIR}/addon.xml" "${REPO_DIR}/addons.xml" <<'PY' python3 - "${PLUGIN_ADDON_XML}" "${TMP_REPO_ADDON_DIR}/addon.xml" "${REPO_DIR}/addons.xml" <<'PY'
import sys import sys
@@ -107,4 +122,5 @@ echo "Repo built:"
echo " ${REPO_DIR}/addons.xml" echo " ${REPO_DIR}/addons.xml"
echo " ${REPO_DIR}/addons.xml.md5" echo " ${REPO_DIR}/addons.xml.md5"
echo " ${REPO_ZIP_PATH}" echo " ${REPO_ZIP_PATH}"
echo " ${REPO_DIR}/$(basename "${PLUGIN_ZIP}")" echo " ${PLUGIN_ADDON_DIR_IN_REPO}/${PLUGIN_ZIP_NAME}"
echo " ${REPO_ADDON_DIR_IN_REPO}/${REPO_ZIP_NAME}"