Compare commits

...

7 Commits

10 changed files with 822 additions and 285 deletions

View File

@@ -29,6 +29,20 @@ Es durchsucht Provider und startet Streams.
- Plugins: `addon/plugins/*_plugin.py` - Plugins: `addon/plugins/*_plugin.py`
- Settings: `addon/resources/settings.xml` - Settings: `addon/resources/settings.xml`
## TMDB API Key einrichten
- TMDB Account anlegen und API Key (v3) erstellen: `https://www.themoviedb.org/settings/api`
- In Kodi das ViewIT Addon oeffnen: `Einstellungen -> TMDB`
- `TMDB aktivieren` einschalten
- `TMDB API Key` eintragen
- Optional `TMDB Sprache` setzen (z. B. `de-DE`)
- Optional die Anzeige-Optionen aktivieren/deaktivieren:
- `TMDB Beschreibung anzeigen`
- `TMDB Poster und Vorschaubild anzeigen`
- `TMDB Fanart/Backdrop anzeigen`
- `TMDB Bewertung anzeigen`
- `TMDB Stimmen anzeigen`
- `TMDB Besetzung anzeigen`
## Tests ## Tests
- Dev Pakete installieren: `./.venv/bin/pip install -r requirements-dev.txt` - Dev Pakete installieren: `./.venv/bin/pip install -r requirements-dev.txt`
- Tests starten: `./.venv/bin/pytest` - Tests starten: `./.venv/bin/pytest`

View File

@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='utf-8'?> <?xml version='1.0' encoding='utf-8'?>
<addon id="plugin.video.viewit" name="ViewIt" version="0.1.57" provider-name="ViewIt"> <addon id="plugin.video.viewit" name="ViewIt" version="0.1.58" provider-name="ViewIt">
<requires> <requires>
<import addon="xbmc.python" version="3.0.0" /> <import addon="xbmc.python" version="3.0.0" />
<import addon="script.module.requests" /> <import addon="script.module.requests" />
@@ -18,4 +18,4 @@
<license>GPL-3.0-or-later</license> <license>GPL-3.0-or-later</license>
<platform>all</platform> <platform>all</platform>
</extension> </extension>
</addon> </addon>

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,9 @@ from __future__ import annotations
from datetime import datetime from datetime import datetime
import hashlib import hashlib
import os import os
import re
from typing import Optional from typing import Optional
from urllib.parse import parse_qsl, urlencode
try: # pragma: no cover - Kodi runtime try: # pragma: no cover - Kodi runtime
import xbmcaddon # type: ignore[import-not-found] import xbmcaddon # type: ignore[import-not-found]
@@ -237,3 +239,40 @@ def dump_response_html(
max_files = get_setting_int(addon_id, max_files_setting_id, default=200) max_files = get_setting_int(addon_id, max_files_setting_id, default=200)
_prune_dump_files(log_dir, prefix=filename_prefix, max_files=max_files) _prune_dump_files(log_dir, prefix=filename_prefix, max_files=max_files)
_append_text_file(path, content) _append_text_file(path, content)
def normalize_resolved_stream_url(final_url: str, *, source_url: str = "") -> str:
"""Normalisiert hoster-spezifische Header im finalen Stream-Link.
`final_url` kann ein Kodi-Header-Suffix enthalten: `url|Key=Value&...`.
Die Funktion passt nur bekannte Problemfaelle an und laesst sonst alles unveraendert.
"""
url = (final_url or "").strip()
if not url:
return ""
normalized = _normalize_supervideo_serversicuro(url, source_url=source_url)
return normalized
def _normalize_supervideo_serversicuro(final_url: str, *, source_url: str = "") -> str:
if "serversicuro.cc/hls/" not in final_url.casefold() or "|" not in final_url:
return final_url
source = (source_url or "").strip()
code_match = re.search(
r"supervideo\.(?:tv|cc)/(?:e/)?([a-z0-9]+)(?:\\.html)?",
source,
flags=re.IGNORECASE,
)
if not code_match:
return final_url
code = (code_match.group(1) or "").strip()
if not code:
return final_url
media_url, header_suffix = final_url.split("|", 1)
headers = dict(parse_qsl(header_suffix, keep_blank_values=True))
headers["Referer"] = f"https://supervideo.cc/e/{code}"
return f"{media_url}|{urlencode(headers)}"

View File

@@ -754,6 +754,7 @@ class AniworldPlugin(BasisPlugin):
def __init__(self) -> None: def __init__(self) -> None:
self._anime_results: Dict[str, SeriesResult] = {} self._anime_results: Dict[str, SeriesResult] = {}
self._title_url_cache: Dict[str, str] = self._load_title_url_cache() self._title_url_cache: Dict[str, str] = self._load_title_url_cache()
self._title_meta: Dict[str, tuple[str, str]] = {}
self._genre_names_cache: Optional[List[str]] = None self._genre_names_cache: Optional[List[str]] = None
self._season_cache: Dict[str, List[SeasonInfo]] = {} self._season_cache: Dict[str, List[SeasonInfo]] = {}
self._season_links_cache: Dict[str, List[SeasonInfo]] = {} self._season_links_cache: Dict[str, List[SeasonInfo]] = {}
@@ -818,8 +819,135 @@ class AniworldPlugin(BasisPlugin):
changed = True changed = True
if changed and persist: if changed and persist:
self._save_title_url_cache() self._save_title_url_cache()
if description:
old_plot, old_poster = self._title_meta.get(title, ("", ""))
self._title_meta[title] = (description.strip() or old_plot, old_poster)
return changed return changed
def _store_title_meta(self, title: str, *, plot: str = "", poster: str = "") -> None:
title = (title or "").strip()
if not title:
return
old_plot, old_poster = self._title_meta.get(title, ("", ""))
merged_plot = (plot or old_plot or "").strip()
merged_poster = (poster or old_poster or "").strip()
self._title_meta[title] = (merged_plot, merged_poster)
@staticmethod
def _is_series_image_url(url: str) -> bool:
value = (url or "").strip().casefold()
if not value:
return False
blocked = (
"/public/img/facebook",
"/public/img/logo",
"aniworld-logo",
"favicon",
"/public/img/german.svg",
"/public/img/japanese-",
)
return not any(marker in value for marker in blocked)
@staticmethod
def _extract_style_url(style_value: str) -> str:
style_value = (style_value or "").strip()
if not style_value:
return ""
match = re.search(r"url\((['\"]?)(.*?)\1\)", style_value, flags=re.IGNORECASE)
if not match:
return ""
return (match.group(2) or "").strip()
def _extract_series_metadata(self, soup: BeautifulSoupT) -> tuple[str, str, str]:
if not soup:
return "", "", ""
plot = ""
poster = ""
fanart = ""
root = soup.select_one("#series") or soup
description_node = root.select_one("p.seri_des")
if description_node is not None:
full_text = (description_node.get("data-full-description") or "").strip()
short_text = (description_node.get_text(" ", strip=True) or "").strip()
plot = full_text or short_text
if not plot:
for selector in ("meta[property='og:description']", "meta[name='description']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
plot = content
break
if not plot:
for selector in (".series-description", ".seri_des", ".description", "article p"):
node = soup.select_one(selector)
if node is None:
continue
text = (node.get_text(" ", strip=True) or "").strip()
if text:
plot = text
break
cover = root.select_one("div.seriesCoverBox img[itemprop='image'], div.seriesCoverBox img")
if cover is not None:
for attr in ("data-src", "src"):
value = (cover.get(attr) or "").strip()
if value:
candidate = _absolute_url(value)
if self._is_series_image_url(candidate):
poster = candidate
break
if not poster:
for selector in ("meta[property='og:image']", "meta[name='twitter:image']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
candidate = _absolute_url(content)
if self._is_series_image_url(candidate):
poster = candidate
break
if not poster:
for selector in ("img.seriesCoverBox", ".seriesCoverBox img"):
image = soup.select_one(selector)
if image is None:
continue
value = (image.get("data-src") or image.get("src") or "").strip()
if value:
candidate = _absolute_url(value)
if self._is_series_image_url(candidate):
poster = candidate
break
backdrop_node = root.select_one("section.title .backdrop, .SeriesSection .backdrop, .backdrop")
if backdrop_node is not None:
raw_style = (backdrop_node.get("style") or "").strip()
style_url = self._extract_style_url(raw_style)
if style_url:
candidate = _absolute_url(style_url)
if self._is_series_image_url(candidate):
fanart = candidate
if not fanart:
for selector in ("meta[property='og:image']",):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
candidate = _absolute_url(content)
if self._is_series_image_url(candidate):
fanart = candidate
break
return plot, poster, fanart
@staticmethod @staticmethod
def _season_links_cache_name(series_url: str) -> str: def _season_links_cache_name(series_url: str) -> str:
digest = hashlib.sha1((series_url or "").encode("utf-8")).hexdigest()[:20] digest = hashlib.sha1((series_url or "").encode("utf-8")).hexdigest()[:20]
@@ -951,6 +1079,43 @@ class AniworldPlugin(BasisPlugin):
return None return None
def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]:
title = (title or "").strip()
if not title:
return {}, {}, None
info: dict[str, str] = {"title": title}
art: dict[str, str] = {}
cached_plot, cached_poster = self._title_meta.get(title, ("", ""))
if cached_plot:
info["plot"] = cached_plot
if cached_poster:
art = {"thumb": cached_poster, "poster": cached_poster}
if "plot" in info and art:
return info, art, None
series = self._find_series_by_title(title)
if series is None or not series.url:
return info, art, None
if series.description and "plot" not in info:
info["plot"] = series.description
try:
soup = _get_soup(series.url, session=get_requests_session("aniworld", headers=HEADERS))
plot, poster, fanart = self._extract_series_metadata(soup)
except Exception:
plot, poster, fanart = "", "", ""
if plot:
info["plot"] = plot
if poster:
art = {"thumb": poster, "poster": poster}
if fanart:
art["fanart"] = fanart
art["landscape"] = fanart
self._store_title_meta(title, plot=info.get("plot", ""), poster=poster)
return info, art, None
def _ensure_popular(self) -> List[SeriesResult]: def _ensure_popular(self) -> List[SeriesResult]:
if self._popular_cache is not None: if self._popular_cache is not None:
return list(self._popular_cache) return list(self._popular_cache)

View File

@@ -244,6 +244,7 @@ class FilmpalastPlugin(BasisPlugin):
def __init__(self) -> None: def __init__(self) -> None:
self._title_to_url: Dict[str, str] = {} self._title_to_url: Dict[str, str] = {}
self._title_meta: Dict[str, tuple[str, str]] = {}
self._series_entries: Dict[str, Dict[int, Dict[int, EpisodeEntry]]] = {} self._series_entries: Dict[str, Dict[int, Dict[int, EpisodeEntry]]] = {}
self._hoster_cache: Dict[str, Dict[str, str]] = {} self._hoster_cache: Dict[str, Dict[str, str]] = {}
self._genre_to_url: Dict[str, str] = {} self._genre_to_url: Dict[str, str] = {}
@@ -722,6 +723,64 @@ class FilmpalastPlugin(BasisPlugin):
return hit.url return hit.url
return "" return ""
def _store_title_meta(self, title: str, *, plot: str = "", poster: str = "") -> None:
title = (title or "").strip()
if not title:
return
old_plot, old_poster = self._title_meta.get(title, ("", ""))
merged_plot = (plot or old_plot or "").strip()
merged_poster = (poster or old_poster or "").strip()
self._title_meta[title] = (merged_plot, merged_poster)
def _extract_detail_metadata(self, soup: BeautifulSoupT) -> tuple[str, str]:
if not soup:
return "", ""
root = soup.select_one("div#content[role='main']") or soup
detail = root.select_one("article.detail") or root
plot = ""
poster = ""
# Filmpalast Detailseite: bevorzugt den dedizierten Filmhandlung-Block.
plot_node = detail.select_one(
"li[itemtype='http://schema.org/Movie'] span[itemprop='description']"
)
if plot_node is not None:
plot = (plot_node.get_text(" ", strip=True) or "").strip()
if not plot:
hidden_plot = detail.select_one("cite span.hidden")
if hidden_plot is not None:
plot = (hidden_plot.get_text(" ", strip=True) or "").strip()
if not plot:
for selector in ("meta[property='og:description']", "meta[name='description']"):
node = root.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
plot = content
break
# Filmpalast Detailseite: Cover liegt stabil in `img.cover2`.
cover = detail.select_one("img.cover2")
if cover is not None:
value = (cover.get("data-src") or cover.get("src") or "").strip()
if value:
candidate = _absolute_url(value)
lower = candidate.casefold()
if "/themes/" not in lower and "spacer.gif" not in lower and "/files/movies/" in lower:
poster = candidate
if not poster:
thumb_node = detail.select_one("li[itemtype='http://schema.org/Movie'] img[itemprop='image']")
if thumb_node is not None:
value = (thumb_node.get("data-src") or thumb_node.get("src") or "").strip()
if value:
candidate = _absolute_url(value)
lower = candidate.casefold()
if "/themes/" not in lower and "spacer.gif" not in lower and "/files/movies/" in lower:
poster = candidate
return plot, poster
def remember_series_url(self, title: str, series_url: str) -> None: def remember_series_url(self, title: str, series_url: str) -> None:
title = (title or "").strip() title = (title or "").strip()
series_url = (series_url or "").strip() series_url = (series_url or "").strip()
@@ -742,6 +801,52 @@ class FilmpalastPlugin(BasisPlugin):
return _series_hint_value(series_key) return _series_hint_value(series_key)
return "" return ""
def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]:
title = (title or "").strip()
if not title:
return {}, {}, None
info: dict[str, str] = {"title": title}
art: dict[str, str] = {}
cached_plot, cached_poster = self._title_meta.get(title, ("", ""))
if cached_plot:
info["plot"] = cached_plot
if cached_poster:
art = {"thumb": cached_poster, "poster": cached_poster}
if "plot" in info and art:
return info, art, None
detail_url = self._ensure_title_url(title)
if not detail_url:
series_key = self._series_key_for_title(title) or self._ensure_series_entries_for_title(title)
if series_key:
seasons = self._series_entries.get(series_key, {})
first_entry: Optional[EpisodeEntry] = None
for season_number in sorted(seasons.keys()):
episodes = seasons.get(season_number, {})
for episode_number in sorted(episodes.keys()):
first_entry = episodes.get(episode_number)
if first_entry is not None:
break
if first_entry is not None:
break
detail_url = first_entry.url if first_entry is not None else ""
if not detail_url:
return info, art, None
try:
soup = _get_soup(detail_url, session=get_requests_session("filmpalast", headers=HEADERS))
plot, poster = self._extract_detail_metadata(soup)
except Exception:
plot, poster = "", ""
if plot:
info["plot"] = plot
if poster:
art = {"thumb": poster, "poster": poster}
self._store_title_meta(title, plot=info.get("plot", ""), poster=poster)
return info, art, None
def is_movie(self, title: str) -> bool: def is_movie(self, title: str) -> bool:
title = (title or "").strip() title = (title or "").strip()
if not title: if not title:

View File

@@ -1096,7 +1096,7 @@ class SerienstreamPlugin(BasisPlugin):
name = "Serienstream" name = "Serienstream"
version = "1.0.0" version = "1.0.0"
POPULAR_GENRE_LABEL = "⭐ Beliebte Serien" POPULAR_GENRE_LABEL = "Haeufig gesehen"
def __init__(self) -> None: def __init__(self) -> None:
self._series_results: Dict[str, SeriesResult] = {} self._series_results: Dict[str, SeriesResult] = {}

View File

@@ -20,7 +20,7 @@ import os
import re import re
import json import json
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
from urllib.parse import urlencode, urljoin from urllib.parse import urljoin
try: # pragma: no cover - optional dependency try: # pragma: no cover - optional dependency
import requests import requests
@@ -97,6 +97,7 @@ class SearchHit:
title: str title: str
url: str url: str
description: str = "" description: str = ""
poster: str = ""
def _normalize_search_text(value: str) -> str: def _normalize_search_text(value: str) -> str:
@@ -149,6 +150,7 @@ class TopstreamfilmPlugin(BasisPlugin):
self._season_to_episode_numbers: Dict[tuple[str, str], List[int]] = {} self._season_to_episode_numbers: Dict[tuple[str, str], List[int]] = {}
self._episode_title_by_number: Dict[tuple[str, int, int], str] = {} self._episode_title_by_number: Dict[tuple[str, int, int], str] = {}
self._detail_html_cache: Dict[str, str] = {} self._detail_html_cache: Dict[str, str] = {}
self._title_meta: Dict[str, tuple[str, str]] = {}
self._popular_cache: List[str] | None = None self._popular_cache: List[str] | None = None
self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS)
self._preferred_hosters: List[str] = list(self._default_preferred_hosters) self._preferred_hosters: List[str] = list(self._default_preferred_hosters)
@@ -429,6 +431,7 @@ class TopstreamfilmPlugin(BasisPlugin):
continue continue
seen.add(hit.title) seen.add(hit.title)
self._title_to_url[hit.title] = hit.url self._title_to_url[hit.title] = hit.url
self._store_title_meta(hit.title, plot=hit.description, poster=hit.poster)
titles.append(hit.title) titles.append(hit.title)
if titles: if titles:
self._save_title_url_cache() self._save_title_url_cache()
@@ -487,6 +490,69 @@ class TopstreamfilmPlugin(BasisPlugin):
except Exception: except Exception:
return "" return ""
def _pick_image_from_node(self, node: Any) -> str:
if node is None:
return ""
image = node.select_one("img")
if image is None:
return ""
for attr in ("data-src", "src"):
value = (image.get(attr) or "").strip()
if value and "lazy_placeholder" not in value.casefold():
return self._absolute_external_url(value, base=self._get_base_url())
srcset = (image.get("data-srcset") or image.get("srcset") or "").strip()
if srcset:
first = srcset.split(",")[0].strip().split(" ", 1)[0].strip()
if first:
return self._absolute_external_url(first, base=self._get_base_url())
return ""
def _store_title_meta(self, title: str, *, plot: str = "", poster: str = "") -> None:
title = (title or "").strip()
if not title:
return
old_plot, old_poster = self._title_meta.get(title, ("", ""))
merged_plot = (plot or old_plot or "").strip()
merged_poster = (poster or old_poster or "").strip()
self._title_meta[title] = (merged_plot, merged_poster)
def _extract_detail_metadata(self, soup: BeautifulSoupT) -> tuple[str, str]:
if not soup:
return "", ""
plot = ""
poster = ""
for selector in ("meta[property='og:description']", "meta[name='description']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
plot = content
break
if not plot:
candidates: list[str] = []
for paragraph in soup.select("article p, .TPost p, .Description p, .entry-content p"):
text = (paragraph.get_text(" ", strip=True) or "").strip()
if len(text) >= 60:
candidates.append(text)
if candidates:
plot = max(candidates, key=len)
for selector in ("meta[property='og:image']", "meta[name='twitter:image']"):
node = soup.select_one(selector)
if node is None:
continue
content = (node.get("content") or "").strip()
if content:
poster = self._absolute_external_url(content, base=self._get_base_url())
break
if not poster:
for selector in ("article", ".TPost", ".entry-content"):
poster = self._pick_image_from_node(soup.select_one(selector))
if poster:
break
return plot, poster
def _clear_stream_index_for_title(self, title: str) -> None: def _clear_stream_index_for_title(self, title: str) -> None:
for key in list(self._season_to_episode_numbers.keys()): for key in list(self._season_to_episode_numbers.keys()):
if key[0] == title: if key[0] == title:
@@ -721,7 +787,17 @@ class TopstreamfilmPlugin(BasisPlugin):
continue continue
if is_movie_hint: if is_movie_hint:
self._movie_title_hint.add(title) self._movie_title_hint.add(title)
hits.append(SearchHit(title=title, url=self._absolute_url(href), description="")) description_tag = item.select_one(".TPMvCn .Description, .Description, .entry-summary")
description = (description_tag.get_text(" ", strip=True) or "").strip() if description_tag else ""
poster = self._pick_image_from_node(item)
hits.append(
SearchHit(
title=title,
url=self._absolute_url(href),
description=description,
poster=poster,
)
)
return hits return hits
def is_movie(self, title: str) -> bool: def is_movie(self, title: str) -> bool:
@@ -794,6 +870,7 @@ class TopstreamfilmPlugin(BasisPlugin):
continue continue
seen.add(hit.title) seen.add(hit.title)
self._title_to_url[hit.title] = hit.url self._title_to_url[hit.title] = hit.url
self._store_title_meta(hit.title, plot=hit.description, poster=hit.poster)
titles.append(hit.title) titles.append(hit.title)
if titles: if titles:
self._save_title_url_cache() self._save_title_url_cache()
@@ -905,7 +982,8 @@ class TopstreamfilmPlugin(BasisPlugin):
self._movie_title_hint.add(title) self._movie_title_hint.add(title)
description_tag = item.select_one(".TPMvCn .Description") description_tag = item.select_one(".TPMvCn .Description")
description = description_tag.get_text(" ", strip=True) if description_tag else "" description = description_tag.get_text(" ", strip=True) if description_tag else ""
hit = SearchHit(title=title, url=self._absolute_url(href), description=description) poster = self._pick_image_from_node(item)
hit = SearchHit(title=title, url=self._absolute_url(href), description=description, poster=poster)
if _matches_query(query, title=hit.title, description=hit.description): if _matches_query(query, title=hit.title, description=hit.description):
hits.append(hit) hits.append(hit)
@@ -918,11 +996,41 @@ class TopstreamfilmPlugin(BasisPlugin):
continue continue
seen.add(hit.title) seen.add(hit.title)
self._title_to_url[hit.title] = hit.url self._title_to_url[hit.title] = hit.url
self._store_title_meta(hit.title, plot=hit.description, poster=hit.poster)
titles.append(hit.title) titles.append(hit.title)
self._save_title_url_cache() self._save_title_url_cache()
_emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95) _emit_progress(progress_callback, f"Fertig: {len(titles)} Treffer", 95)
return titles return titles
def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]:
title = (title or "").strip()
if not title:
return {}, {}, None
info: dict[str, str] = {"title": title}
art: dict[str, str] = {}
cached_plot, cached_poster = self._title_meta.get(title, ("", ""))
if cached_plot:
info["plot"] = cached_plot
if cached_poster:
art = {"thumb": cached_poster, "poster": cached_poster}
if "plot" in info and art:
return info, art, None
soup = self._get_detail_soup(title)
if soup is None:
return info, art, None
plot, poster = self._extract_detail_metadata(soup)
if plot:
info["plot"] = plot
if poster:
art = {"thumb": poster, "poster": poster}
self._store_title_meta(title, plot=plot, poster=poster)
return info, art, None
def genres(self) -> List[str]: def genres(self) -> List[str]:
if not REQUESTS_AVAILABLE or BeautifulSoup is None: if not REQUESTS_AVAILABLE or BeautifulSoup is None:
return [] return []

View File

@@ -71,7 +71,12 @@
<setting id="tmdb_log_responses" type="bool" label="TMDB API-Antworten loggen" default="false" /> <setting id="tmdb_log_responses" type="bool" label="TMDB API-Antworten loggen" default="false" />
</category> </category>
<category label="Update"> <category label="Update">
<setting id="update_repo_url" type="text" label="Update-URL (addons.xml)" default="http://127.0.0.1:8080/repo/addons.xml" /> <setting id="update_channel" type="enum" label="Update-Kanal" default="0" values="Main|Nightly|Custom" />
<setting id="auto_update_enabled" type="bool" label="Automatische Updates (beim Start pruefen)" default="false" />
<setting id="update_repo_url_main" type="text" label="Main URL (addons.xml)" default="https://gitea.it-drui.de/viewit/ViewIT-Kodi-Repo/raw/branch/main/addons.xml" />
<setting id="update_repo_url_nightly" type="text" label="Nightly URL (addons.xml)" default="https://gitea.it-drui.de/viewit/ViewIT-Kodi-Repo/raw/branch/nightly/addons.xml" />
<setting id="update_repo_url" type="text" label="Custom URL (addons.xml)" default="https://gitea.it-drui.de/viewit/ViewIT-Kodi-Repo/raw/branch/main/addons.xml" />
<setting id="auto_update_last_ts" type="text" label="Auto-Update letzte Pruefung (intern)" default="0" visible="false" />
<setting id="run_update_check" type="action" label="Jetzt nach Updates suchen" action="RunPlugin(plugin://plugin.video.viewit/?action=check_updates)" option="close" /> <setting id="run_update_check" type="action" label="Jetzt nach Updates suchen" action="RunPlugin(plugin://plugin.video.viewit/?action=check_updates)" option="close" />
<setting id="update_info" type="text" label="Updates laufen ueber den normalen Kodi-Update-Mechanismus." default="" enable="false" /> <setting id="update_info" type="text" label="Updates laufen ueber den normalen Kodi-Update-Mechanismus." default="" enable="false" />
<setting id="update_version_addon" type="text" label="ViewIT Version" default="-" enable="false" /> <setting id="update_version_addon" type="text" label="ViewIT Version" default="-" enable="false" />

44
docs/RELEASE.md Normal file
View File

@@ -0,0 +1,44 @@
# Release Flow (Main + Nightly)
This project uses two release channels:
- `nightly`: integration and test channel
- `main`: stable channel
## Rules
- Feature work goes to `nightly` only.
- Promote from `nightly` to `main` with `--squash` only.
- `main` version has no suffix (`0.1.60`).
- `nightly` version uses `-nightly` and is always at least one patch higher than `main` (`0.1.61-nightly`).
- Keep changelogs split:
- `CHANGELOG-NIGHTLY.md`
- `CHANGELOG.md`
## Nightly publish
1) Finish changes on `nightly`.
2) Bump addon version in `addon/addon.xml` to `X.Y.Z-nightly`.
3) Build and publish nightly repo artifacts.
4) Push `nightly`.
## Promote nightly to main
```bash
git checkout main
git pull origin main
git merge --squash nightly
git commit -m "release: X.Y.Z"
```
Then:
1) Set `addon/addon.xml` version to `X.Y.Z` (without `-nightly`).
2) Build and publish main repo artifacts.
3) Push `main`.
4) Optional tag: `vX.Y.Z`.
## Local ZIPs (separated)
- Main ZIP output: `dist/local_zips/main/`
- Nightly ZIP output: `dist/local_zips/nightly/`