main: consolidate integrated changes after v0.1.54

This commit is contained in:
2026-02-23 20:38:36 +01:00
parent 6ce1bf71c1
commit 22b9ae9c31
38 changed files with 3139 additions and 1306 deletions

View File

@@ -13,7 +13,8 @@ import hashlib
import json
import re
import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import quote
try: # pragma: no cover - optional dependency
import requests
@@ -43,8 +44,8 @@ if TYPE_CHECKING: # pragma: no cover
from requests import Session as RequestsSession
from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found]
else: # pragma: no cover
RequestsSession: TypeAlias = Any
BeautifulSoupT: TypeAlias = Any
RequestsSession = Any
BeautifulSoupT = Any
SETTING_BASE_URL = "aniworld_base_url"
@@ -69,6 +70,16 @@ HEADERS = {
SESSION_CACHE_TTL_SECONDS = 300
SESSION_CACHE_PREFIX = "viewit.aniworld"
SESSION_CACHE_MAX_TITLE_URLS = 800
ProgressCallback = Optional[Callable[[str, Optional[int]], Any]]
def _emit_progress(callback: ProgressCallback, message: str, percent: Optional[int] = None) -> None:
if not callable(callback):
return
try:
callback(str(message or ""), None if percent is None else int(percent))
except Exception:
return
@dataclass
@@ -126,7 +137,7 @@ def _latest_episodes_url() -> str:
def _search_url(query: str) -> str:
return f"{_get_base_url()}/search?q={query}"
return f"{_get_base_url()}/search?q={quote((query or '').strip())}"
def _search_api_url() -> str:
@@ -289,37 +300,56 @@ def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> Beautif
_ensure_requests()
_log_visit(url)
sess = session or get_requests_session("aniworld", headers=HEADERS)
response = None
try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status()
except Exception as exc:
_log_error(f"GET {url} failed: {exc}")
raise
if response.url and response.url != url:
_log_url(response.url, kind="REDIRECT")
_log_response_html(url, response.text)
if _looks_like_cloudflare_challenge(response.text):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return BeautifulSoup(response.text, "html.parser")
try:
final_url = (response.url or url) if response is not None else url
body = (response.text or "") if response is not None else ""
if final_url != url:
_log_url(final_url, kind="REDIRECT")
_log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return BeautifulSoup(body, "html.parser")
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_html_simple(url: str) -> str:
_ensure_requests()
_log_visit(url)
sess = get_requests_session("aniworld", headers=HEADERS)
response = None
try:
response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status()
except Exception as exc:
_log_error(f"GET {url} failed: {exc}")
raise
if response.url and response.url != url:
_log_url(response.url, kind="REDIRECT")
body = response.text
_log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return body
try:
final_url = (response.url or url) if response is not None else url
body = (response.text or "") if response is not None else ""
if final_url != url:
_log_url(final_url, kind="REDIRECT")
_log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
return body
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _get_soup_simple(url: str) -> BeautifulSoupT:
@@ -351,17 +381,27 @@ def _post_json(url: str, *, payload: Dict[str, str], session: Optional[RequestsS
_ensure_requests()
_log_visit(url)
sess = session or get_requests_session("aniworld", headers=HEADERS)
response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status()
if response.url and response.url != url:
_log_url(response.url, kind="REDIRECT")
_log_response_html(url, response.text)
if _looks_like_cloudflare_challenge(response.text):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
response = None
try:
return response.json()
except Exception:
return None
response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT)
response.raise_for_status()
final_url = (response.url or url) if response is not None else url
body = (response.text or "") if response is not None else ""
if final_url != url:
_log_url(final_url, kind="REDIRECT")
_log_response_html(url, body)
if _looks_like_cloudflare_challenge(body):
raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.")
try:
return response.json()
except Exception:
return None
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str:
@@ -555,10 +595,18 @@ def resolve_redirect(target_url: str) -> Optional[str]:
_log_visit(normalized_url)
session = get_requests_session("aniworld", headers=HEADERS)
_get_soup(_get_base_url(), session=session)
response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
if response.url:
_log_url(response.url, kind="RESOLVED")
return response.url if response.url else None
response = None
try:
response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
if response.url:
_log_url(response.url, kind="RESOLVED")
return response.url if response.url else None
finally:
if response is not None:
try:
response.close()
except Exception:
pass
def fetch_episode_hoster_names(episode_url: str) -> List[str]:
@@ -629,11 +677,12 @@ def fetch_episode_stream_link(
return resolved
def search_animes(query: str) -> List[SeriesResult]:
def search_animes(query: str, *, progress_callback: ProgressCallback = None) -> List[SeriesResult]:
_ensure_requests()
query = (query or "").strip()
if not query:
return []
_emit_progress(progress_callback, "AniWorld API-Suche", 15)
session = get_requests_session("aniworld", headers=HEADERS)
try:
session.get(_get_base_url(), headers=HEADERS, timeout=DEFAULT_TIMEOUT)
@@ -643,7 +692,9 @@ def search_animes(query: str) -> List[SeriesResult]:
results: List[SeriesResult] = []
seen: set[str] = set()
if isinstance(data, list):
for entry in data:
for idx, entry in enumerate(data, start=1):
if idx == 1 or idx % 50 == 0:
_emit_progress(progress_callback, f"API auswerten {idx}/{len(data)}", 35)
if not isinstance(entry, dict):
continue
title = _strip_html((entry.get("title") or "").strip())
@@ -665,10 +716,16 @@ def search_animes(query: str) -> List[SeriesResult]:
seen.add(key)
description = (entry.get("description") or "").strip()
results.append(SeriesResult(title=title, description=description, url=url))
_emit_progress(progress_callback, f"API-Treffer: {len(results)}", 85)
return results
soup = _get_soup_simple(_search_url(requests.utils.quote(query)))
for anchor in soup.select("a[href^='/anime/stream/'][href]"):
_emit_progress(progress_callback, "HTML-Suche (Fallback)", 55)
soup = _get_soup_simple(_search_url(query))
anchors = soup.select("a[href^='/anime/stream/'][href]")
total_anchors = max(1, len(anchors))
for idx, anchor in enumerate(anchors, start=1):
if idx == 1 or idx % 100 == 0:
_emit_progress(progress_callback, f"HTML auswerten {idx}/{total_anchors}", 70)
href = (anchor.get("href") or "").strip()
if not href or "/staffel-" in href or "/episode-" in href:
continue
@@ -686,6 +743,7 @@ def search_animes(query: str) -> List[SeriesResult]:
continue
seen.add(key)
results.append(SeriesResult(title=title, description="", url=url))
_emit_progress(progress_callback, f"HTML-Treffer: {len(results)}", 85)
return results
@@ -696,6 +754,7 @@ class AniworldPlugin(BasisPlugin):
def __init__(self) -> None:
self._anime_results: Dict[str, SeriesResult] = {}
self._title_url_cache: Dict[str, str] = self._load_title_url_cache()
self._title_meta: Dict[str, tuple[str, str]] = {}
self._genre_names_cache: Optional[List[str]] = None
self._season_cache: Dict[str, List[SeasonInfo]] = {}
self._season_links_cache: Dict[str, List[SeasonInfo]] = {}
@@ -760,8 +819,135 @@ class AniworldPlugin(BasisPlugin):
changed = True
if changed and persist:
self._save_title_url_cache()
if description:
old_plot, old_poster = self._title_meta.get(title, ("", ""))
self._title_meta[title] = (description.strip() or old_plot, old_poster)
return changed
def _store_title_meta(self, title: str, *, plot: str = "", poster: str = "") -> None:
title = (title or "").strip()
if not title:
return
old_plot, old_poster = self._title_meta.get(title, ("", ""))
merged_plot = (plot or old_plot or "").strip()
merged_poster = (poster or old_poster or "").strip()
self._title_meta[title] = (merged_plot, merged_poster)
@staticmethod
def _is_series_image_url(url: str) -> bool:
value = (url or "").strip().casefold()
if not value:
return False
blocked = (
"/public/img/facebook",
"/public/img/logo",
"aniworld-logo",
"favicon",
"/public/img/german.svg",
"/public/img/japanese-",
)
return not any(marker in value for marker in blocked)
@staticmethod
def _extract_style_url(style_value: str) -> str:
style_value = (style_value or "").strip()
if not style_value:
return ""
match = re.search(r"url\((['\"]?)(.*?)\1\)", style_value, flags=re.IGNORECASE)
if not match:
return ""
return (match.group(2) or "").strip()
def _extract_series_metadata(self, soup: BeautifulSoupT) -> tuple[str, str, str]:
if not soup:
return "", "", ""
plot = ""
poster = ""
fanart = ""
root = soup.select_one("#series") or soup
description_node = root.select_one("p.seri_des")
if description_node is not None:
full_text = (description_node.get("data-full-description") or "").strip()
short_text = (description_node.get_text(" ", strip=True) or "").strip()
plot = full_text or short_text
if not plot:
for selector in ("meta[property='og:description']", "meta[name='description']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
plot = content
break
if not plot:
for selector in (".series-description", ".seri_des", ".description", "article p"):
node = soup.select_one(selector)
if node is None:
continue
text = (node.get_text(" ", strip=True) or "").strip()
if text:
plot = text
break
cover = root.select_one("div.seriesCoverBox img[itemprop='image'], div.seriesCoverBox img")
if cover is not None:
for attr in ("data-src", "src"):
value = (cover.get(attr) or "").strip()
if value:
candidate = _absolute_url(value)
if self._is_series_image_url(candidate):
poster = candidate
break
if not poster:
for selector in ("meta[property='og:image']", "meta[name='twitter:image']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
candidate = _absolute_url(content)
if self._is_series_image_url(candidate):
poster = candidate
break
if not poster:
for selector in ("img.seriesCoverBox", ".seriesCoverBox img"):
image = soup.select_one(selector)
if image is None:
continue
value = (image.get("data-src") or image.get("src") or "").strip()
if value:
candidate = _absolute_url(value)
if self._is_series_image_url(candidate):
poster = candidate
break
backdrop_node = root.select_one("section.title .backdrop, .SeriesSection .backdrop, .backdrop")
if backdrop_node is not None:
raw_style = (backdrop_node.get("style") or "").strip()
style_url = self._extract_style_url(raw_style)
if style_url:
candidate = _absolute_url(style_url)
if self._is_series_image_url(candidate):
fanart = candidate
if not fanart:
for selector in ("meta[property='og:image']",):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
candidate = _absolute_url(content)
if self._is_series_image_url(candidate):
fanart = candidate
break
return plot, poster, fanart
@staticmethod
def _season_links_cache_name(series_url: str) -> str:
digest = hashlib.sha1((series_url or "").encode("utf-8")).hexdigest()[:20]
@@ -893,6 +1079,43 @@ class AniworldPlugin(BasisPlugin):
return None
def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]:
title = (title or "").strip()
if not title:
return {}, {}, None
info: dict[str, str] = {"title": title}
art: dict[str, str] = {}
cached_plot, cached_poster = self._title_meta.get(title, ("", ""))
if cached_plot:
info["plot"] = cached_plot
if cached_poster:
art = {"thumb": cached_poster, "poster": cached_poster}
if "plot" in info and art:
return info, art, None
series = self._find_series_by_title(title)
if series is None or not series.url:
return info, art, None
if series.description and "plot" not in info:
info["plot"] = series.description
try:
soup = _get_soup(series.url, session=get_requests_session("aniworld", headers=HEADERS))
plot, poster, fanart = self._extract_series_metadata(soup)
except Exception:
plot, poster, fanart = "", "", ""
if plot:
info["plot"] = plot
if poster:
art = {"thumb": poster, "poster": poster}
if fanart:
art["fanart"] = fanart
art["landscape"] = fanart
self._store_title_meta(title, plot=info.get("plot", ""), poster=poster)
return info, art, None
def _ensure_popular(self) -> List[SeriesResult]:
if self._popular_cache is not None:
return list(self._popular_cache)
@@ -1151,7 +1374,7 @@ class AniworldPlugin(BasisPlugin):
return self._episode_label_cache.get(cache_key, {}).get(episode_label)
return None
async def search_titles(self, query: str) -> List[str]:
async def search_titles(self, query: str, progress_callback: ProgressCallback = None) -> List[str]:
query = (query or "").strip()
if not query:
self._anime_results.clear()
@@ -1163,7 +1386,8 @@ class AniworldPlugin(BasisPlugin):
if not self._requests_available:
raise RuntimeError("AniworldPlugin kann ohne requests/bs4 nicht suchen.")
try:
results = search_animes(query)
_emit_progress(progress_callback, "AniWorld Suche startet", 10)
results = search_animes(query, progress_callback=progress_callback)
except Exception as exc: # pragma: no cover
self._anime_results.clear()
self._season_cache.clear()
@@ -1178,6 +1402,7 @@ class AniworldPlugin(BasisPlugin):
self._season_cache.clear()
self._season_links_cache.clear()
self._episode_label_cache.clear()
_emit_progress(progress_callback, f"Treffer aufbereitet: {len(results)}", 95)
return [result.title for result in results]
def _ensure_seasons(self, title: str) -> List[SeasonInfo]:
@@ -1213,6 +1438,18 @@ class AniworldPlugin(BasisPlugin):
_log_url(link, kind="FOUND")
return link
def episode_url_for(self, title: str, season: str, episode: str) -> str:
cache_key = (title, season)
cached = self._episode_label_cache.get(cache_key)
if cached:
info = cached.get(episode)
if info and info.url:
return info.url
episode_info = self._lookup_episode(title, season, episode)
if episode_info and episode_info.url:
return episode_info.url
return ""
def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]:
if not self._requests_available:
raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.")