import os
import pytest
try:
from bs4 import BeautifulSoup
except Exception: # pragma: no cover - optional in local env
BeautifulSoup = None
from addon.plugins import serienstream_plugin as sp
pytestmark = pytest.mark.skipif(BeautifulSoup is None, reason="bs4 not available")
def _soup(html: str):
return BeautifulSoup(html, "html.parser")
def test_search_series_api_first(monkeypatch):
"""search_series() kombiniert API-Treffer mit Katalog-Cache (ohne Duplikate)."""
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
monkeypatch.setattr(sp, "_search_series_api", lambda q: [
sp.SeriesResult(title="Star Trek", description="", url="https://s.to/serie/star-trek"),
])
# Katalog-Cache: eine bekannte + eine neue URL
cache_items = [
sp.SeriesResult(title="Star Trek", description="", url="https://s.to/serie/star-trek"), # Duplikat
sp.SeriesResult(title="Star Trek: Academy", description="", url="https://s.to/serie/star-trek-academy"),
]
monkeypatch.setattr(sp, "_load_catalog_index_from_cache", lambda: cache_items)
results = sp.search_series("trek")
titles = [r.title for r in results]
# API-Treffer zuerst, Duplikate (gleiche URL) werden entfernt
assert titles[0] == "Star Trek"
assert "Star Trek: Academy" in titles
assert titles.count("Star Trek") == 1
def test_search_series_falls_back_to_catalog_cache(monkeypatch):
"""Wenn API und Server-Suche leer sind, wird der Katalog-Cache als Fallback genutzt."""
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
# API und Server-Suche liefern nichts
monkeypatch.setattr(sp, "_search_series_api", lambda q: [])
monkeypatch.setattr(sp, "_search_series_server", lambda q: [])
# Katalog-Cache mit Testdaten fuellen
cache_items = [
sp.SeriesResult(title="Der Hund", description="", url="https://s.to/serie/der-hund"),
sp.SeriesResult(title="Hundeleben", description="", url="https://s.to/serie/hundeleben"),
]
monkeypatch.setattr(sp, "_load_catalog_index_from_cache", lambda: cache_items)
results = sp.search_series("hund")
titles = [r.title for r in results]
# Nur Ganzwort-Treffer (nicht Hundeleben)
assert titles == ["Der Hund"]
def test_extract_season_links():
html = """
"""
seasons = sp._extract_season_links(_soup(html))
assert seasons == [(1, "https://s.to/serie/x/staffel-1"), (2, "https://s.to/serie/x/staffel-2")]
def test_extract_episodes_skips_upcoming_and_tba():
html = """
1
Ep1
2
DEMNÄCHST
— TBA —
"""
episodes = sp._extract_episodes(_soup(html))
assert [e.number for e in episodes] == [1]
def test_fetch_episode_hoster_names(monkeypatch):
html = """
"""
def fake_get_soup(url, session=None):
return _soup(html)
monkeypatch.setattr(sp, "_get_soup", fake_get_soup)
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
names = sp.fetch_episode_hoster_names("/serie/x/staffel-1/episode-1")
assert names == ["VOE", "Vidoza"]
def test_fetch_episode_stream_link_prefers_requested_hoster(monkeypatch):
html = """
"""
def fake_get_soup(url, session=None):
return _soup(html)
monkeypatch.setattr(sp, "_get_soup", fake_get_soup)
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
link = sp.fetch_episode_stream_link("/serie/x/staffel-1/episode-1", preferred_hosters=["vidoza"])
assert link == "https://s.to/redirect/vidoza"
def test_extract_latest_episodes():
html = """
Show X
S 1
E 2
Heute
"""
episodes = sp._extract_latest_episodes(_soup(html))
assert len(episodes) == 1
assert episodes[0].series_title == "Show X"
assert episodes[0].season == 1
assert episodes[0].episode == 2
def test_episode_url_for_uses_episode_cache(monkeypatch):
plugin = sp.SerienstreamPlugin()
info = sp.EpisodeInfo(
number=2,
title="Folge 2",
original_title="",
url="https://s.to/serie/x/staffel-1/episode-2",
)
plugin._episode_label_cache[("Show X", "Staffel 1")] = {"Episode 2: Folge 2": info}
called = {"lookup": False}
def _fail_lookup(*_args, **_kwargs):
called["lookup"] = True
return None
monkeypatch.setattr(plugin, "_lookup_episode", _fail_lookup)
url = plugin.episode_url_for("Show X", "Staffel 1", "Episode 2: Folge 2")
assert url == "https://s.to/serie/x/staffel-1/episode-2"
assert called["lookup"] is False
def test_parse_series_catalog_groups_and_entries():
html = """
Genre A
Genre B
"""
catalog = sp.parse_series_catalog(_soup(html))
assert list(catalog.keys()) == ["Genre A", "Genre B"]
assert [e.title for e in catalog["Genre A"]] == ["A"]
assert [e.title for e in catalog["Genre B"]] == ["B"]
def test_titles_for_genre_from_catalog(monkeypatch):
html = """
Drama
"""
monkeypatch.setattr(sp, "_get_soup_simple", lambda url: _soup(html))
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
plugin = sp.SerienstreamPlugin()
titles = plugin.titles_for_genre("Drama")
assert titles == ["Drama 1"]
def test_popular_series_parsing(monkeypatch):
html = """
Meistgesehen
"""
monkeypatch.setattr(sp, "_get_soup_simple", lambda url: _soup(html))
monkeypatch.setattr(sp, "_get_base_url", lambda: "https://s.to")
plugin = sp.SerienstreamPlugin()
titles = plugin.popular_series()
assert titles == ["Popular 1", "Popular 2"]
@pytest.mark.live
def test_live_staffel_page_skips_upcoming():
if not os.getenv("LIVE_TESTS"):
pytest.skip("LIVE_TESTS not set")
url = "https://s.to/serie/star-trek-starfleet-academy/staffel-1"
soup = sp._get_soup_simple(url)
rows = soup.select("table.episode-table tbody tr.episode-row")
upcoming_rows = [row for row in rows if "upcoming" in (row.get("class") or [])]
episodes = sp._extract_episodes(soup)
assert len(episodes) == len(rows) - len(upcoming_rows)
@pytest.mark.live
def test_live_genres_and_titles():
if not os.getenv("LIVE_TESTS"):
pytest.skip("LIVE_TESTS not set")
plugin = sp.SerienstreamPlugin()
genres = plugin.genres()
assert isinstance(genres, list) and genres
sample = genres[0]
titles = plugin.titles_for_genre(sample)
assert isinstance(titles, list)