Add Filmpalast A-Z browsing and document Gitea release upload
This commit is contained in:
@@ -227,6 +227,8 @@ class FilmpalastPlugin(BasisPlugin):
|
||||
self._hoster_cache: Dict[str, Dict[str, str]] = {}
|
||||
self._genre_to_url: Dict[str, str] = {}
|
||||
self._genre_page_count_cache: Dict[str, int] = {}
|
||||
self._alpha_to_url: Dict[str, str] = {}
|
||||
self._alpha_page_count_cache: Dict[str, int] = {}
|
||||
self._requests_available = REQUESTS_AVAILABLE
|
||||
self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS)
|
||||
self._preferred_hosters: List[str] = list(self._default_preferred_hosters)
|
||||
@@ -495,7 +497,79 @@ class FilmpalastPlugin(BasisPlugin):
|
||||
return max_page
|
||||
|
||||
def capabilities(self) -> set[str]:
|
||||
return {"genres"}
|
||||
return {"genres", "alpha"}
|
||||
|
||||
def _parse_alpha_links(self, soup: BeautifulSoupT) -> Dict[str, str]:
|
||||
alpha: Dict[str, str] = {}
|
||||
if not soup:
|
||||
return alpha
|
||||
for anchor in soup.select("section#movietitle a[href], #movietitle a[href], aside #movietitle a[href]"):
|
||||
name = (anchor.get_text(" ", strip=True) or "").strip()
|
||||
href = (anchor.get("href") or "").strip()
|
||||
if not name or not href:
|
||||
continue
|
||||
if "/search/alpha/" not in href:
|
||||
continue
|
||||
if name in alpha:
|
||||
continue
|
||||
alpha[name] = _absolute_url(href)
|
||||
return alpha
|
||||
|
||||
def alpha_index(self) -> List[str]:
|
||||
if not self._requests_available:
|
||||
return []
|
||||
if self._alpha_to_url:
|
||||
return list(self._alpha_to_url.keys())
|
||||
try:
|
||||
soup = _get_soup(_absolute_url("/"), session=get_requests_session("filmpalast", headers=HEADERS))
|
||||
except Exception:
|
||||
return []
|
||||
parsed = self._parse_alpha_links(soup)
|
||||
if parsed:
|
||||
self._alpha_to_url = dict(parsed)
|
||||
return list(self._alpha_to_url.keys())
|
||||
|
||||
def alpha_page_count(self, letter: str) -> int:
|
||||
letter = (letter or "").strip()
|
||||
if not letter:
|
||||
return 1
|
||||
if letter in self._alpha_page_count_cache:
|
||||
return max(1, int(self._alpha_page_count_cache.get(letter, 1)))
|
||||
if not self._alpha_to_url:
|
||||
self.alpha_index()
|
||||
base_url = self._alpha_to_url.get(letter, "")
|
||||
if not base_url:
|
||||
return 1
|
||||
try:
|
||||
soup = _get_soup(base_url, session=get_requests_session("filmpalast", headers=HEADERS))
|
||||
except Exception:
|
||||
return 1
|
||||
pages = self._extract_last_page(soup)
|
||||
self._alpha_page_count_cache[letter] = max(1, pages)
|
||||
return self._alpha_page_count_cache[letter]
|
||||
|
||||
def titles_for_alpha_page(self, letter: str, page: int) -> List[str]:
|
||||
letter = (letter or "").strip()
|
||||
if not letter or not self._requests_available:
|
||||
return []
|
||||
if not self._alpha_to_url:
|
||||
self.alpha_index()
|
||||
base_url = self._alpha_to_url.get(letter, "")
|
||||
if not base_url:
|
||||
return []
|
||||
page = max(1, int(page or 1))
|
||||
url = base_url if page == 1 else urljoin(base_url.rstrip("/") + "/", f"page/{page}")
|
||||
try:
|
||||
soup = _get_soup(url, session=get_requests_session("filmpalast", headers=HEADERS))
|
||||
except Exception:
|
||||
return []
|
||||
hits = self._parse_listing_hits(soup)
|
||||
return self._apply_hits_to_title_index(hits)
|
||||
|
||||
def titles_for_alpha(self, letter: str) -> List[str]:
|
||||
titles = self.titles_for_alpha_page(letter, 1)
|
||||
titles.sort(key=lambda value: value.casefold())
|
||||
return titles
|
||||
|
||||
def genres(self) -> List[str]:
|
||||
if not self._requests_available:
|
||||
|
||||
Reference in New Issue
Block a user