782 lines
27 KiB
Python
782 lines
27 KiB
Python
"""Moflix-Stream Plugin für ViewIT.
|
||
|
||
Nutzt die JSON-REST-API von moflix-stream.xyz.
|
||
Kein HTML-Parsing nötig – alle Daten kommen als JSON.
|
||
"""
|
||
|
||
from __future__ import annotations
|
||
|
||
import re
|
||
from typing import TYPE_CHECKING, Any, Callable, List, Optional
|
||
from urllib.parse import quote, quote_plus, urlparse
|
||
|
||
try: # pragma: no cover - optional dependency
|
||
import requests
|
||
except ImportError as exc: # pragma: no cover
|
||
requests = None
|
||
REQUESTS_AVAILABLE = False
|
||
REQUESTS_IMPORT_ERROR = exc
|
||
else:
|
||
REQUESTS_AVAILABLE = True
|
||
REQUESTS_IMPORT_ERROR = None
|
||
|
||
from plugin_interface import BasisPlugin
|
||
|
||
if TYPE_CHECKING: # pragma: no cover
|
||
from requests import Session as RequestsSession
|
||
else: # pragma: no cover
|
||
RequestsSession = Any
|
||
|
||
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# Konstanten
|
||
# ---------------------------------------------------------------------------
|
||
|
||
ADDON_ID = "plugin.video.viewit"
|
||
BASE_URL = "https://moflix-stream.xyz"
|
||
DEFAULT_TIMEOUT = 20
|
||
|
||
HEADERS = {
|
||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||
"Accept": "application/json, text/plain, */*",
|
||
"Accept-Language": "de-DE,de;q=0.9,en;q=0.8",
|
||
"Connection": "keep-alive",
|
||
"Referer": BASE_URL + "/",
|
||
}
|
||
|
||
# Separate Header-Definition für VidHide-Requests (moflix-stream.click)
|
||
# Separater Browser-UA verhindert UA-basierte Blockierung durch VidHide
|
||
_VIDHIDE_HEADERS = {
|
||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
||
"Accept-Language": "de-DE,de;q=0.9,en;q=0.8",
|
||
"Connection": "keep-alive",
|
||
"Referer": BASE_URL + "/",
|
||
"Sec-Fetch-Dest": "document",
|
||
"Sec-Fetch-Mode": "navigate",
|
||
"Sec-Fetch-Site": "cross-site",
|
||
}
|
||
|
||
# Hoster-Domains, die erfahrungsgemäß 403 liefern oder kein ResolveURL-Support haben
|
||
_VIDEO_SKIP_DOMAINS: frozenset[str] = frozenset({
|
||
"gupload.xyz",
|
||
"veev.to",
|
||
})
|
||
|
||
# Hoster-Domains, die direkt über eine eigene API auflösbar sind (bevorzugen)
|
||
_VIDEO_PREFER_DOMAINS: frozenset[str] = frozenset({
|
||
"vidara.to",
|
||
})
|
||
|
||
_URL_SEARCH = BASE_URL + "/api/v1/search/{q1}?query={q2}&limit=8"
|
||
_URL_CHANNEL = BASE_URL + "/api/v1/channel/{slug}?channelType=channel&restriction=&paginate=simple"
|
||
_URL_TITLE = (
|
||
BASE_URL + "/api/v1/titles/{id}"
|
||
"?load=images,genres,productionCountries,keywords,videos,primaryVideo,seasons,compactCredits"
|
||
)
|
||
_URL_EPISODES = BASE_URL + "/api/v1/titles/{id}/seasons/{s}/episodes?perPage=100&query=&page=1"
|
||
_URL_EPISODE = (
|
||
BASE_URL + "/api/v1/titles/{id}/seasons/{s}/episodes/{e}"
|
||
"?load=videos,compactCredits,primaryVideo"
|
||
)
|
||
|
||
# Genre-Slugs (hardcodiert, da keine Genre-API vorhanden)
|
||
GENRE_SLUGS: dict[str, str] = {
|
||
"Action": "action",
|
||
"Animation": "animation",
|
||
"Dokumentation": "dokumentation",
|
||
"Drama": "drama",
|
||
"Familie": "top-kids-liste",
|
||
"Fantasy": "fantasy",
|
||
"Horror": "horror",
|
||
"Komödie": "comedy",
|
||
"Krimi": "crime",
|
||
"Liebesfilm": "romance",
|
||
"Science-Fiction": "science-fiction",
|
||
"Thriller": "thriller",
|
||
}
|
||
|
||
# Collections (Slugs aus dem offiziellen xStream-Plugin)
|
||
COLLECTION_SLUGS: dict[str, str] = {
|
||
"American Pie Complete Collection": "the-american-pie-collection",
|
||
"Bud Spencer & Terence Hill": "bud-spencer-terence-hill-collection",
|
||
"DC Superhelden Collection": "the-dc-universum-collection",
|
||
"Mission: Impossible Collection": "the-mission-impossible-collection",
|
||
"Fast & Furious Collection": "fast-furious-movie-collection",
|
||
"Halloween Collection": "halloween-movie-collection",
|
||
"Herr der Ringe Collection": "der-herr-der-ringe-collection",
|
||
"James Bond Collection": "the-james-bond-collection",
|
||
"Jason Bourne Collection": "the-jason-bourne-collection",
|
||
"Jurassic Park Collection": "the-jurassic-park-collection",
|
||
"Kinder & Familienfilme": "top-kids-liste",
|
||
"Marvel Cinematic Universe": "the-marvel-cinematic-universe-collection",
|
||
"Olsenbande Collection": "the-olsenbande-collection",
|
||
"Planet der Affen Collection": "the-planet-der-affen-collection",
|
||
"Rocky Collection": "rocky-the-knockout-collection",
|
||
"Star Trek Kinofilm Collection": "the-star-trek-movies-collection",
|
||
"Star Wars Collection": "the-star-wars-collection",
|
||
"Stirb Langsam Collection": "stirb-langsam-collection",
|
||
"X-Men Collection": "x-men-collection",
|
||
}
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# Hilfsfunktionen (Modul-Ebene)
|
||
# ---------------------------------------------------------------------------
|
||
|
||
def _extract_first_number(label: str) -> int | None:
|
||
"""Extrahiert erste Ganzzahl aus einem Label. 'Staffel 2' → 2."""
|
||
m = re.search(r"\d+", label or "")
|
||
return int(m.group()) if m else None
|
||
|
||
|
||
def _normalize_video_name(name: str, src: str) -> str:
|
||
"""Normalisiert den Hoster-Namen eines Video-Objekts.
|
||
|
||
'Mirror-HDCloud' → Domain aus src; 'VidCloud-720' → 'VidCloud'
|
||
"""
|
||
name = (name or "").strip()
|
||
if name.lower().startswith("mirror"):
|
||
parsed = urlparse(src or "")
|
||
host = parsed.netloc or ""
|
||
return host.split(".")[0].capitalize() if host else name
|
||
return name.split("-")[0].strip() or name
|
||
|
||
|
||
def _safe_str(value: object) -> str:
|
||
"""Konvertiert einen Wert sicher zu String, None → ''."""
|
||
if value is None:
|
||
return ""
|
||
return str(value).strip()
|
||
|
||
|
||
def _unpack_packer(packed_js: str) -> str:
|
||
"""Entpackt Dean Edwards p.a.c.k.e.r. JavaScript.
|
||
|
||
Format:
|
||
eval(function(p,a,c,k,e,d){...}('code',base,count,'k1|k2|...'.split('|'),0,0))
|
||
|
||
Findet die gepackte Zeichenkette, die Basis und den Schlüssel-String,
|
||
konvertiert jeden Token (base-N → Index) und ersetzt ihn durch das
|
||
jeweilige Schlüsselwort.
|
||
"""
|
||
m = re.search(
|
||
r"'((?:[^'\\]|\\.){20,})'\s*,\s*(\d+)\s*,\s*\d+\s*,\s*"
|
||
r"'((?:[^'\\]|\\.)*)'\s*\.split\s*\(\s*'\|'\s*\)",
|
||
packed_js,
|
||
)
|
||
if not m:
|
||
return packed_js
|
||
|
||
packed = m.group(1).replace("\\'", "'").replace("\\\\", "\\")
|
||
base = int(m.group(2))
|
||
keys = m.group(3).split("|")
|
||
|
||
_digits = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||
|
||
def _unbase(s: str) -> int:
|
||
result = 0
|
||
for ch in s:
|
||
if ch not in _digits:
|
||
raise ValueError(f"Not a base-{base} digit: {ch!r}")
|
||
result = result * base + _digits.index(ch)
|
||
return result
|
||
|
||
def _replace(m2: re.Match) -> str: # type: ignore[type-arg]
|
||
token = m2.group(0)
|
||
try:
|
||
idx = _unbase(token)
|
||
replacement = keys[idx] if idx < len(keys) else ""
|
||
return replacement if replacement else token
|
||
except (ValueError, IndexError):
|
||
return token
|
||
|
||
return re.sub(r"\b\w+\b", _replace, packed)
|
||
|
||
|
||
# ---------------------------------------------------------------------------
|
||
# Plugin-Klasse
|
||
# ---------------------------------------------------------------------------
|
||
|
||
class MoflixPlugin(BasisPlugin):
|
||
"""Moflix-Stream Integration für ViewIT.
|
||
|
||
Verwendet die offizielle JSON-REST-API – kein HTML-Scraping.
|
||
"""
|
||
|
||
name = "Moflix"
|
||
|
||
def __init__(self) -> None:
|
||
# title (str) → vollständige API-URL /api/v1/titles/{id}
|
||
self._title_to_url: dict[str, str] = {}
|
||
# title → (plot, poster_url, fanart_url)
|
||
self._title_meta: dict[str, tuple[str, str, str]] = {}
|
||
# title → True wenn Serie, False wenn Film
|
||
self._is_series: dict[str, bool] = {}
|
||
# (title, season_nr) → Moflix-API-ID (ändert sich pro Staffel!)
|
||
self._season_api_ids: dict[tuple[str, int], str] = {}
|
||
# (title, season_nr) → Liste der Episode-Labels
|
||
self._episode_labels: dict[tuple[str, int], list[str]] = {}
|
||
# bevorzugte Hoster für Hoster-Dialog
|
||
self._preferred_hosters: list[str] = []
|
||
|
||
# ------------------------------------------------------------------
|
||
# Verfügbarkeit
|
||
# ------------------------------------------------------------------
|
||
|
||
@property
|
||
def is_available(self) -> bool:
|
||
return REQUESTS_AVAILABLE
|
||
|
||
@property
|
||
def unavailable_reason(self) -> str:
|
||
if REQUESTS_AVAILABLE:
|
||
return ""
|
||
return f"requests nicht verfügbar: {REQUESTS_IMPORT_ERROR}"
|
||
|
||
# ------------------------------------------------------------------
|
||
# HTTP
|
||
# ------------------------------------------------------------------
|
||
|
||
def _get_session(self) -> RequestsSession:
|
||
from http_session_pool import get_requests_session
|
||
return get_requests_session("moflix", headers=HEADERS)
|
||
|
||
def _get_json(self, url: str, headers: dict | None = None) -> dict | list | None:
|
||
"""GET-Request, gibt geparste JSON-Antwort zurück oder None bei Fehler."""
|
||
session = self._get_session()
|
||
response = None
|
||
try:
|
||
response = session.get(url, headers=headers or HEADERS, timeout=DEFAULT_TIMEOUT)
|
||
response.raise_for_status()
|
||
return response.json()
|
||
except Exception:
|
||
return None
|
||
finally:
|
||
if response is not None:
|
||
try:
|
||
response.close()
|
||
except Exception:
|
||
pass
|
||
|
||
def _get_html(
|
||
self,
|
||
url: str,
|
||
headers: dict | None = None,
|
||
fresh_session: bool = False,
|
||
) -> str | None:
|
||
"""GET-Request, gibt den Response-Text (HTML) zurück oder None bei Fehler.
|
||
|
||
fresh_session=True: eigene requests.Session (keine gecachten Cookies/State).
|
||
"""
|
||
response = None
|
||
try:
|
||
if fresh_session:
|
||
import requests as _req
|
||
session = _req.Session()
|
||
else:
|
||
session = self._get_session()
|
||
|
||
req_headers = headers or {
|
||
**HEADERS,
|
||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||
}
|
||
response = session.get(url, headers=req_headers, timeout=DEFAULT_TIMEOUT)
|
||
response.raise_for_status()
|
||
return response.text
|
||
except Exception:
|
||
return None
|
||
finally:
|
||
if response is not None:
|
||
try:
|
||
response.close()
|
||
except Exception:
|
||
pass
|
||
|
||
# ------------------------------------------------------------------
|
||
# Interne Hilfsmethoden
|
||
# ------------------------------------------------------------------
|
||
|
||
def _cache_channel_entry(self, entry: dict) -> str:
|
||
"""Cached einen Kanal/Sucheintrag und gibt den Titel zurück (oder '' zum Überspringen)."""
|
||
title = _safe_str(entry.get("name"))
|
||
if not title:
|
||
return ""
|
||
api_id = _safe_str(entry.get("id"))
|
||
if not api_id:
|
||
return ""
|
||
|
||
self._title_to_url[title] = _URL_TITLE.format(id=api_id)
|
||
is_series = bool(entry.get("is_series", False))
|
||
self._is_series[title] = is_series
|
||
|
||
plot = _safe_str(entry.get("description"))
|
||
poster = _safe_str(entry.get("poster"))
|
||
fanart = _safe_str(entry.get("backdrop"))
|
||
self._title_meta[title] = (plot, poster, fanart)
|
||
return title
|
||
|
||
def _titles_from_channel(self, slug: str, page: int = 1) -> list[str]:
|
||
"""Lädt Titel eines Moflix-Channels (Kategorie/Genre/Collection)."""
|
||
url = _URL_CHANNEL.format(slug=slug)
|
||
if page > 1:
|
||
url = f"{url}&page={page}"
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
entries = []
|
||
try:
|
||
entries = data["channel"]["content"]["data"]
|
||
except (KeyError, TypeError):
|
||
return []
|
||
titles: list[str] = []
|
||
for entry in (entries or []):
|
||
if not isinstance(entry, dict):
|
||
continue
|
||
t = self._cache_channel_entry(entry)
|
||
if t:
|
||
titles.append(t)
|
||
return titles
|
||
|
||
def _ensure_title_url(self, title: str) -> str:
|
||
"""Gibt die gecachte API-URL für einen Titel zurück, oder ''."""
|
||
return self._title_to_url.get(title, "")
|
||
|
||
def _resolve_title(self, title: str) -> None:
|
||
"""Cache-Miss-Fallback: Titel per Such-API nachschlagen und cachen.
|
||
|
||
Wird aufgerufen wenn der In-Memory-Cache leer ist (z.B. nach einem
|
||
neuen Kodi-Addon-Aufruf, der eine frische Plugin-Instanz erzeugt).
|
||
"""
|
||
q1 = quote(title)
|
||
q2 = quote_plus(title)
|
||
url = _URL_SEARCH.format(q1=q1, q2=q2)
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return
|
||
for entry in (data.get("results") or []):
|
||
if not isinstance(entry, dict):
|
||
continue
|
||
if _safe_str(entry.get("name")) == title:
|
||
self._cache_channel_entry(entry)
|
||
return
|
||
|
||
# ------------------------------------------------------------------
|
||
# Pflicht-Methoden
|
||
# ------------------------------------------------------------------
|
||
|
||
async def search_titles(
|
||
self,
|
||
query: str,
|
||
progress_callback: ProgressCallback = None,
|
||
) -> List[str]:
|
||
query = (query or "").strip()
|
||
if not query or not REQUESTS_AVAILABLE:
|
||
return []
|
||
|
||
q1 = quote(query)
|
||
q2 = quote_plus(query)
|
||
url = _URL_SEARCH.format(q1=q1, q2=q2)
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
|
||
results = data.get("results") or []
|
||
titles: list[str] = []
|
||
for entry in results:
|
||
if not isinstance(entry, dict):
|
||
continue
|
||
# Personen überspringen
|
||
if "person" in _safe_str(entry.get("model_type")):
|
||
continue
|
||
t = self._cache_channel_entry(entry)
|
||
if t:
|
||
titles.append(t)
|
||
return titles
|
||
|
||
def seasons_for(self, title: str) -> List[str]:
|
||
title = (title or "").strip()
|
||
if not title:
|
||
return []
|
||
|
||
# Film: direkt zum Stream
|
||
if self._is_series.get(title) is False:
|
||
return ["Film"]
|
||
|
||
url = self._ensure_title_url(title)
|
||
if not url:
|
||
self._resolve_title(title)
|
||
url = self._ensure_title_url(title)
|
||
if not url:
|
||
return []
|
||
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
|
||
seasons_raw = []
|
||
try:
|
||
seasons_raw = data["seasons"]["data"]
|
||
except (KeyError, TypeError):
|
||
pass
|
||
|
||
if not seasons_raw:
|
||
# Kein Staffel-Daten → Film-Fallback
|
||
return ["Film"]
|
||
|
||
# Nach Staffelnummer sortieren
|
||
seasons_raw = sorted(seasons_raw, key=lambda s: int(s.get("number", 0) or 0))
|
||
|
||
labels: list[str] = []
|
||
for season in seasons_raw:
|
||
if not isinstance(season, dict):
|
||
continue
|
||
nr = season.get("number")
|
||
api_id = _safe_str(season.get("title_id"))
|
||
if nr is None or not api_id:
|
||
continue
|
||
try:
|
||
season_nr = int(nr)
|
||
except (ValueError, TypeError):
|
||
continue
|
||
self._season_api_ids[(title, season_nr)] = api_id
|
||
labels.append(f"Staffel {season_nr}")
|
||
|
||
return labels
|
||
|
||
def episodes_for(self, title: str, season: str) -> List[str]:
|
||
title = (title or "").strip()
|
||
season = (season or "").strip()
|
||
if not title or not season:
|
||
return []
|
||
|
||
# Film: Episode = Titel selbst
|
||
if season == "Film":
|
||
return [title]
|
||
|
||
season_nr = _extract_first_number(season)
|
||
if season_nr is None:
|
||
return []
|
||
|
||
# Cache-Hit
|
||
cached = self._episode_labels.get((title, season_nr))
|
||
if cached is not None:
|
||
return cached
|
||
|
||
api_id = self._season_api_ids.get((title, season_nr), "")
|
||
if not api_id:
|
||
# Staffeln nachladen falls noch nicht gecacht
|
||
self.seasons_for(title)
|
||
api_id = self._season_api_ids.get((title, season_nr), "")
|
||
if not api_id:
|
||
return []
|
||
|
||
url = _URL_EPISODES.format(id=api_id, s=season_nr)
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
|
||
episodes_raw = []
|
||
try:
|
||
episodes_raw = data["pagination"]["data"]
|
||
except (KeyError, TypeError):
|
||
pass
|
||
|
||
labels: list[str] = []
|
||
for ep in (episodes_raw or []):
|
||
if not isinstance(ep, dict):
|
||
continue
|
||
# Episoden ohne Video überspringen
|
||
if ep.get("primary_video") is None:
|
||
continue
|
||
ep_nr_raw = ep.get("episode_number")
|
||
ep_name = _safe_str(ep.get("name"))
|
||
try:
|
||
ep_nr = int(ep_nr_raw or 0)
|
||
except (ValueError, TypeError):
|
||
continue
|
||
if ep_nr <= 0:
|
||
continue
|
||
|
||
label = f"Episode {ep_nr}"
|
||
if ep_name:
|
||
label = f"{label} – {ep_name}"
|
||
|
||
labels.append(label)
|
||
|
||
self._episode_labels[(title, season_nr)] = labels
|
||
return labels
|
||
|
||
# ------------------------------------------------------------------
|
||
# Stream
|
||
# ------------------------------------------------------------------
|
||
|
||
def _videos_for(self, title: str, season: str, episode: str) -> list[dict]:
|
||
"""Gibt die rohe videos[]-Liste für einen Titel/Staffel/Episode zurück."""
|
||
title = (title or "").strip()
|
||
season = (season or "").strip()
|
||
|
||
if season == "Film":
|
||
url = self._ensure_title_url(title)
|
||
if not url:
|
||
self._resolve_title(title)
|
||
url = self._ensure_title_url(title)
|
||
if not url:
|
||
return []
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
return (data.get("title") or {}).get("videos") or []
|
||
|
||
season_nr = _extract_first_number(season)
|
||
episode_nr = _extract_first_number(episode)
|
||
if season_nr is None or episode_nr is None:
|
||
return []
|
||
|
||
api_id = self._season_api_ids.get((title, season_nr), "")
|
||
if not api_id:
|
||
self.seasons_for(title)
|
||
api_id = self._season_api_ids.get((title, season_nr), "")
|
||
if not api_id:
|
||
return []
|
||
|
||
url = _URL_EPISODE.format(id=api_id, s=season_nr, e=episode_nr)
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return []
|
||
return (data.get("episode") or {}).get("videos") or []
|
||
|
||
def _hosters_from_videos(self, videos: list) -> dict[str, str]:
|
||
"""Konvertiert videos[] zu {Hoster-Name → src-URL}, mit Skip/Prefer-Logik."""
|
||
hosters: dict[str, str] = {}
|
||
seen: set[str] = set()
|
||
for v in videos:
|
||
if not isinstance(v, dict):
|
||
continue
|
||
src = _safe_str(v.get("src"))
|
||
if not src or "youtube" in src.lower():
|
||
continue
|
||
domain = urlparse(src).netloc.lstrip("www.")
|
||
if domain in _VIDEO_SKIP_DOMAINS:
|
||
continue
|
||
name = _normalize_video_name(_safe_str(v.get("name")), src)
|
||
if not name:
|
||
name = domain
|
||
base_name = name
|
||
i = 2
|
||
while name in seen:
|
||
name = f"{base_name} {i}"
|
||
i += 1
|
||
seen.add(name)
|
||
hosters[name] = src
|
||
return hosters
|
||
|
||
def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]:
|
||
videos = self._videos_for(title, season, episode)
|
||
return list(self._hosters_from_videos(videos).keys())
|
||
|
||
def set_preferred_hosters(self, hosters: List[str]) -> None:
|
||
self._preferred_hosters = [h for h in hosters if h]
|
||
|
||
def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]:
|
||
videos = self._videos_for(title, season, episode)
|
||
if not videos:
|
||
return None
|
||
hosters = self._hosters_from_videos(videos)
|
||
if not hosters:
|
||
return None
|
||
# Bevorzugten Hoster nutzen falls gesetzt
|
||
for preferred in self._preferred_hosters:
|
||
key = preferred.casefold()
|
||
for name, url in hosters.items():
|
||
if key in name.casefold() or key in url.casefold():
|
||
return url
|
||
# Fallback: Prefer-Domains zuerst, dann Rest
|
||
for url in hosters.values():
|
||
domain = urlparse(url).netloc.lstrip("www.")
|
||
if domain in _VIDEO_PREFER_DOMAINS:
|
||
return url
|
||
return next(iter(hosters.values()))
|
||
|
||
def _resolve_vidara(self, filecode: str) -> Optional[str]:
|
||
"""Löst einen vidara.to-Filecode über die vidara-API auf → HLS-URL."""
|
||
api_url = f"https://vidara.to/api/stream?filecode={filecode}"
|
||
vidara_headers = {
|
||
**HEADERS,
|
||
"Referer": f"https://vidara.to/e/{filecode}",
|
||
"Origin": "https://vidara.to",
|
||
}
|
||
data = self._get_json(api_url, headers=vidara_headers)
|
||
if not isinstance(data, dict):
|
||
return None
|
||
return _safe_str(data.get("streaming_url")) or None
|
||
|
||
def _resolve_vidhide(self, embed_url: str) -> Optional[str]:
|
||
"""Löst einen VidHide-Embed-Link (moflix-stream.click) auf → HLS-URL.
|
||
|
||
Verwendet eine frische Session mit echtem Chrome-UA um UA-basierte
|
||
Blockierungen zu umgehen. Entpackt p.a.c.k.e.r.-JS und extrahiert
|
||
den HLS-Stream aus links.hls4/hls3/hls2.
|
||
"""
|
||
# Frische Session (NICHT die gecachte "moflix"-Session) mit VidHide-Headers
|
||
html = self._get_html(embed_url, headers=_VIDHIDE_HEADERS, fresh_session=True)
|
||
if not html or "eval(function(p,a,c,k,e" not in html:
|
||
return None
|
||
|
||
unpacked = _unpack_packer(html)
|
||
|
||
# Priorität: hls4 > hls3 > hls2
|
||
for hls_key in ("hls4", "hls3", "hls2"):
|
||
m = re.search(rf'"{hls_key}"\s*:\s*"(https://[^"]+)"', unpacked)
|
||
if m:
|
||
url = m.group(1)
|
||
if url:
|
||
# Kodi braucht Referer + UA als Header-Suffix damit der CDN die HLS-URL akzeptiert
|
||
from urllib.parse import urlencode
|
||
headers = urlencode({
|
||
"Referer": embed_url,
|
||
"User-Agent": _VIDHIDE_HEADERS["User-Agent"],
|
||
})
|
||
return f"{url}|{headers}"
|
||
return None
|
||
|
||
def resolve_stream_link(self, link: str) -> Optional[str]:
|
||
link = (link or "").strip()
|
||
if not link:
|
||
return None
|
||
|
||
# vidara.to: direkt über eigene API auflösen
|
||
vidara_m = re.search(r'vidara\.to/e/([A-Za-z0-9_-]+)', link)
|
||
if vidara_m:
|
||
resolved = self._resolve_vidara(vidara_m.group(1))
|
||
if resolved:
|
||
return resolved
|
||
|
||
# VidHide (moflix-stream.click): zuerst ResolveURL probieren (FileLions-Modul
|
||
# nutzt Kodis libcurl mit anderem TLS-Fingerprint), dann eigenen Resolver
|
||
if "moflix-stream.click" in link:
|
||
try:
|
||
from plugin_helpers import resolve_via_resolveurl
|
||
resolved = resolve_via_resolveurl(link, fallback_to_link=False)
|
||
if resolved:
|
||
return resolved
|
||
except Exception:
|
||
pass
|
||
# Fallback: eigener p.a.c.k.e.r. Resolver
|
||
resolved = self._resolve_vidhide(link)
|
||
if resolved:
|
||
return resolved
|
||
return None
|
||
|
||
# Fallback: ResolveURL (ohne Link-Fallback – lieber None als unauflösbaren Link)
|
||
try:
|
||
from plugin_helpers import resolve_via_resolveurl
|
||
return resolve_via_resolveurl(link, fallback_to_link=False)
|
||
except Exception:
|
||
return None
|
||
|
||
# ------------------------------------------------------------------
|
||
# Metadaten
|
||
# ------------------------------------------------------------------
|
||
|
||
def metadata_for(
|
||
self, title: str
|
||
) -> tuple[dict[str, str], dict[str, str], list[object] | None]:
|
||
title = (title or "").strip()
|
||
if not title:
|
||
return {}, {}, None
|
||
|
||
info: dict[str, str] = {"title": title}
|
||
art: dict[str, str] = {}
|
||
|
||
# Cache-Hit
|
||
cached = self._title_meta.get(title)
|
||
if cached:
|
||
plot, poster, fanart = cached
|
||
if plot:
|
||
info["plot"] = plot
|
||
if poster:
|
||
art["thumb"] = poster
|
||
art["poster"] = poster
|
||
if fanart:
|
||
art["fanart"] = fanart
|
||
art["landscape"] = fanart
|
||
if "plot" in info or art:
|
||
return info, art, None
|
||
|
||
# API-Abruf
|
||
url = self._ensure_title_url(title)
|
||
if not url:
|
||
return info, art, None
|
||
|
||
data = self._get_json(url)
|
||
if not isinstance(data, dict):
|
||
return info, art, None
|
||
|
||
title_obj = data.get("title") or {}
|
||
plot = _safe_str(title_obj.get("description"))
|
||
poster = _safe_str(title_obj.get("poster"))
|
||
fanart = _safe_str(title_obj.get("backdrop"))
|
||
rating_raw = title_obj.get("rating")
|
||
year_raw = _safe_str(title_obj.get("release_date"))
|
||
|
||
if plot:
|
||
info["plot"] = plot
|
||
if rating_raw is not None:
|
||
try:
|
||
info["rating"] = str(float(rating_raw))
|
||
except (ValueError, TypeError):
|
||
pass
|
||
if year_raw and len(year_raw) >= 4:
|
||
info["year"] = year_raw[:4]
|
||
|
||
if poster:
|
||
art["thumb"] = poster
|
||
art["poster"] = poster
|
||
if fanart:
|
||
art["fanart"] = fanart
|
||
art["landscape"] = fanart
|
||
|
||
# Cachen
|
||
self._title_meta[title] = (plot, poster, fanart)
|
||
|
||
return info, art, None
|
||
|
||
# ------------------------------------------------------------------
|
||
# Browsing-Features
|
||
# ------------------------------------------------------------------
|
||
|
||
def popular_series(self) -> List[str]:
|
||
return self._titles_from_channel("series")
|
||
|
||
def new_titles(self) -> List[str]:
|
||
return self._titles_from_channel("now-playing")
|
||
|
||
def new_titles_page(self, page: int = 1) -> List[str]:
|
||
return self._titles_from_channel("now-playing", page=page)
|
||
|
||
def genres(self) -> List[str]:
|
||
return sorted(GENRE_SLUGS.keys())
|
||
|
||
def titles_for_genre(self, genre: str) -> List[str]:
|
||
return self.titles_for_genre_page(genre, 1)
|
||
|
||
def titles_for_genre_page(self, genre: str, page: int = 1) -> List[str]:
|
||
slug = GENRE_SLUGS.get(genre, "")
|
||
if not slug:
|
||
return []
|
||
return self._titles_from_channel(slug, page=page)
|
||
|
||
def collections(self) -> List[str]:
|
||
return sorted(COLLECTION_SLUGS.keys())
|
||
|
||
def titles_for_collection(self, collection: str, page: int = 1) -> List[str]:
|
||
slug = COLLECTION_SLUGS.get(collection, "")
|
||
if not slug:
|
||
return []
|
||
return self._titles_from_channel(slug, page=page)
|
||
|
||
def capabilities(self) -> set[str]:
|
||
return {"popular_series", "new_titles", "collections", "genres"}
|