commit ee275bee4716a9ddaa97144f76cae4ff39a96e7b Author: itdrui.de Date: Sun Feb 1 17:55:30 2026 +0100 Implement ViewIt Plugin System Documentation and Update Project Notes - Added comprehensive documentation for the ViewIt Plugin System, detailing the plugin loading process, required methods, optional features, and community extension workflow. - Updated project notes to reflect the current structure, build process, search logic, and known issues. - Introduced new build scripts for installing the add-on and creating ZIP packages. - Added test scripts for TMDB API integration, including argument parsing and logging functionality. - Enhanced existing plugins with improved search logic and error handling. diff --git a/addon/LICENSE.txt b/addon/LICENSE.txt new file mode 100644 index 0000000..678a7ba --- /dev/null +++ b/addon/LICENSE.txt @@ -0,0 +1,598 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for the +work, and the source code for shared libraries and dynamically linked +subprograms that the work is specifically designed to require, such as +by intimate data communication or control flow between those subprograms +and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified it, +and giving a relevant date. + + b) The work must carry prominent notices stating that it is released +under this License and any conditions added under section 7. This +requirement modifies the requirement in section 4 to "keep intact all +notices". + + c) You must license the entire work, as a whole, under this License +to anyone who comes into possession of a copy. This License will +therefore apply, along with any applicable section 7 additional terms, +to the whole of the work, and all its parts, regardless of how they are +packaged. This License gives no permission to license the work in any +other way, but it does not invalidate such permission if you have +separately received it. + + d) If the work has interactive user interfaces, each must display +Appropriate Legal Notices; however, if the Program has interactive +interfaces that do not display Appropriate Legal Notices, your work +need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by the +Corresponding Source fixed on a durable physical medium customarily +used for software interchange. + + b) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by a written +offer, valid for at least three years and valid for as long as you +offer spare parts or customer support for that product model, to give +anyone who possesses the object code either (1) a copy of the +Corresponding Source for all the software in the product that is +covered by this License, on a durable physical medium customarily used +for software interchange, for a price no more than your reasonable cost +of physically performing this conveying of source, or (2) access to +copy the Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the +written offer to provide the Corresponding Source. This alternative +is allowed only occasionally and noncommercially, and only if you +received the object code with such an offer, in accord with subsection +6b. + + d) Convey the object code by offering access from a designated place +(gratis or for a charge), and offer equivalent access to the +Corresponding Source in the same way through the same place at no +further charge. You need not require recipients to copy the +Corresponding Source along with the object code. If the place to copy +the object code is a network server, the Corresponding Source may be on +a different server (operated by you or a third party) that supports +equivalent copying facilities, provided you maintain clear directions +next to the object code saying where to find the Corresponding Source. +Regardless of what server hosts the Corresponding Source, you remain +obligated to ensure that it is available for as long as needed to +satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided +you inform other peers where the object code and Corresponding Source +of the work are being offered to the general public at no charge under +subsection 6d. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the +terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or +author attributions in that material or in the Appropriate Legal +Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or +requiring that modified versions of such material be marked in +reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or +authors of the material; or + + e) Declining to grant rights under trademark law for use of some +trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that +material by anyone who conveys the material (or modified versions of +it) with contractual assumptions of liability to the recipient, for +any liability that these contractual assumptions directly impose on +those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that transaction +who receives a copy of the work also receives whatever licenses to the +work the party's predecessor in interest had or could give under the +previous paragraph, plus a right to possession of the Corresponding +Source of the work from the predecessor in interest, if the +predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or hereafter +acquired, that would be infringed by some manner, permitted by this +License, of making, using, or selling its contributor version, but do +not include claims that would be infringed only as a consequence of +further modification of the contributor version. For purposes of this +definition, "control" includes the right to grant patent sublicenses in +a manner consistent with the requirements of this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is conditioned +on the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the business +of distributing software, under which you make payment to the third +party based on the extent of your activity of conveying the work, and +under which the third party grants, to any of the parties who would +receive the covered work from you, a discriminatory patent license (a) +in connection with copies of the covered work conveyed by you (or +copies made from those copies), or (b) primarily for and in connection +with specific products or compilations that contain the covered work, +unless you entered into that arrangement, or that patent license was +granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not convey it at all. For example, if you agree to terms that +obligate you to collect a royalty for further conveying from those to +whom you convey the Program, the only way you could satisfy both those +terms and this License would be to refrain entirely from conveying the +Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + diff --git a/addon/NOTICE.txt b/addon/NOTICE.txt new file mode 100644 index 0000000..b7d030d --- /dev/null +++ b/addon/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright (C) 2026 ViewIt contributors + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +This Kodi addon depends on `script.module.resolveurl`. diff --git a/addon/__pycache__/default.cpython-312.pyc b/addon/__pycache__/default.cpython-312.pyc new file mode 100644 index 0000000..6026002 Binary files /dev/null and b/addon/__pycache__/default.cpython-312.pyc differ diff --git a/addon/__pycache__/http_session_pool.cpython-312.pyc b/addon/__pycache__/http_session_pool.cpython-312.pyc new file mode 100644 index 0000000..83e6a44 Binary files /dev/null and b/addon/__pycache__/http_session_pool.cpython-312.pyc differ diff --git a/addon/__pycache__/plugin_helpers.cpython-312.pyc b/addon/__pycache__/plugin_helpers.cpython-312.pyc new file mode 100644 index 0000000..5a651b3 Binary files /dev/null and b/addon/__pycache__/plugin_helpers.cpython-312.pyc differ diff --git a/addon/__pycache__/plugin_interface.cpython-312.pyc b/addon/__pycache__/plugin_interface.cpython-312.pyc new file mode 100644 index 0000000..89a7f67 Binary files /dev/null and b/addon/__pycache__/plugin_interface.cpython-312.pyc differ diff --git a/addon/__pycache__/regex_patterns.cpython-312.pyc b/addon/__pycache__/regex_patterns.cpython-312.pyc new file mode 100644 index 0000000..4d7fc68 Binary files /dev/null and b/addon/__pycache__/regex_patterns.cpython-312.pyc differ diff --git a/addon/__pycache__/resolveurl_backend.cpython-312.pyc b/addon/__pycache__/resolveurl_backend.cpython-312.pyc new file mode 100644 index 0000000..1f14f61 Binary files /dev/null and b/addon/__pycache__/resolveurl_backend.cpython-312.pyc differ diff --git a/addon/__pycache__/tmdb.cpython-312.pyc b/addon/__pycache__/tmdb.cpython-312.pyc new file mode 100644 index 0000000..6e208ab Binary files /dev/null and b/addon/__pycache__/tmdb.cpython-312.pyc differ diff --git a/addon/addon.xml b/addon/addon.xml new file mode 100644 index 0000000..92f80c7 --- /dev/null +++ b/addon/addon.xml @@ -0,0 +1,21 @@ + + + + + + + + + + video + + + ViewIt Kodi Plugin + Streaming-Addon für Streamingseiten: Suche, Staffeln/Episoden und Wiedergabe. + + icon.png + + GPL-3.0-or-later + all + + diff --git a/addon/default.py b/addon/default.py new file mode 100644 index 0000000..191f345 --- /dev/null +++ b/addon/default.py @@ -0,0 +1,2417 @@ +#!/usr/bin/env python3 +"""ViewIt Kodi-Addon Einstiegspunkt. + +Dieses Modul ist der Router fuer die Kodi-Navigation: es rendert Menues, +ruft Plugin-Implementierungen auf und startet die Wiedergabe. +""" + +from __future__ import annotations + +import asyncio +from contextlib import contextmanager +from datetime import datetime +import importlib.util +import inspect +import os +import re +import sys +from pathlib import Path +from types import ModuleType +from urllib.parse import parse_qs, urlencode + +try: # pragma: no cover - Kodi runtime + import xbmc # type: ignore[import-not-found] + import xbmcaddon # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] + import xbmcplugin # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow importing outside Kodi (e.g. linting) + xbmc = None + xbmcaddon = None + xbmcgui = None + xbmcplugin = None + xbmcvfs = None + + class _XbmcStub: + LOGDEBUG = 0 + LOGINFO = 1 + LOGWARNING = 2 + + @staticmethod + def log(message: str, level: int = 1) -> None: + print(f"[KodiStub:{level}] {message}") + + class Player: + def play(self, item: str, listitem: object | None = None) -> None: + print(f"[KodiStub] play: {item}") + + class _XbmcGuiStub: + INPUT_ALPHANUM = 0 + NOTIFICATION_INFO = 0 + + class Dialog: + def input(self, heading: str, type: int = 0) -> str: + raise RuntimeError("xbmcgui ist nicht verfuegbar (KodiStub).") + + def select(self, heading: str, options: list[str]) -> int: + raise RuntimeError("xbmcgui ist nicht verfuegbar (KodiStub).") + + def notification(self, heading: str, message: str, icon: int = 0, time: int = 0) -> None: + print(f"[KodiStub] notification: {heading}: {message}") + + class ListItem: + def __init__(self, label: str = "", path: str = "") -> None: + self._label = label + self._path = path + + def setInfo(self, type: str, infoLabels: dict[str, str]) -> None: + return + + class _XbmcPluginStub: + @staticmethod + def addDirectoryItem(*, handle: int, url: str, listitem: object, isFolder: bool) -> None: + print(f"[KodiStub] addDirectoryItem: {url}") + + @staticmethod + def endOfDirectory(handle: int) -> None: + print(f"[KodiStub] endOfDirectory: {handle}") + + @staticmethod + def setPluginCategory(handle: int, category: str) -> None: + print(f"[KodiStub] category: {category}") + + xbmc = _XbmcStub() + xbmcgui = _XbmcGuiStub() + xbmcplugin = _XbmcPluginStub() + +from plugin_interface import BasisPlugin +from tmdb import TmdbCastMember, fetch_tv_episode_credits, lookup_movie, lookup_tv_season, lookup_tv_season_summary, lookup_tv_show + +PLUGIN_DIR = Path(__file__).with_name("plugins") +_PLUGIN_CACHE: dict[str, BasisPlugin] | None = None +_TMDB_CACHE: dict[str, tuple[dict[str, str], dict[str, str]]] = {} +_TMDB_CAST_CACHE: dict[str, list[TmdbCastMember]] = {} +_TMDB_ID_CACHE: dict[str, int] = {} +_TMDB_SEASON_CACHE: dict[tuple[int, int, str, str], dict[int, tuple[dict[str, str], dict[str, str]]]] = {} +_TMDB_SEASON_SUMMARY_CACHE: dict[tuple[int, int, str, str], tuple[dict[str, str], dict[str, str]]] = {} +_TMDB_EPISODE_CAST_CACHE: dict[tuple[int, int, int, str], list[TmdbCastMember]] = {} +_TMDB_LOG_PATH: str | None = None +_GENRE_TITLES_CACHE: dict[tuple[str, str], list[str]] = {} +_ADDON_INSTANCE = None +_PLAYSTATE_CACHE: dict[str, dict[str, object]] | None = None +WATCHED_THRESHOLD = 0.9 + + +def _tmdb_prefetch_concurrency() -> int: + """Max number of concurrent TMDB lookups when prefetching metadata for lists.""" + try: + raw = _get_setting_string("tmdb_prefetch_concurrency").strip() + value = int(raw) if raw else 6 + except Exception: + value = 6 + return max(1, min(20, value)) + + +def _log(message: str, level: int = xbmc.LOGINFO) -> None: + xbmc.log(f"[ViewIt] {message}", level) + + +def _busy_open() -> None: + try: # pragma: no cover - Kodi runtime + if xbmc is not None and hasattr(xbmc, "executebuiltin"): + xbmc.executebuiltin("ActivateWindow(busydialognocancel)") + except Exception: + pass + + +def _busy_close() -> None: + try: # pragma: no cover - Kodi runtime + if xbmc is not None and hasattr(xbmc, "executebuiltin"): + xbmc.executebuiltin("Dialog.Close(busydialognocancel)") + xbmc.executebuiltin("Dialog.Close(busydialog)") + except Exception: + pass + + +@contextmanager +def _busy_dialog(): + _busy_open() + try: + yield + finally: + _busy_close() + + +def _get_handle() -> int: + return int(sys.argv[1]) if len(sys.argv) > 1 else -1 + + +def _set_content(handle: int, content: str) -> None: + """Hint Kodi about the content type so skins can show watched/resume overlays.""" + content = (content or "").strip() + if not content: + return + try: # pragma: no cover - Kodi runtime + setter = getattr(xbmcplugin, "setContent", None) + if callable(setter): + setter(handle, content) + except Exception: + pass + + +def _get_addon(): + global _ADDON_INSTANCE + if xbmcaddon is None: + return None + if _ADDON_INSTANCE is None: + _ADDON_INSTANCE = xbmcaddon.Addon() + return _ADDON_INSTANCE + + +def _playstate_key(*, plugin_name: str, title: str, season: str, episode: str) -> str: + plugin_name = (plugin_name or "").strip() + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + return f"{plugin_name}\t{title}\t{season}\t{episode}" + + +def _playstate_path() -> str: + return _get_log_path("playstate.json") + + +def _load_playstate() -> dict[str, dict[str, object]]: + global _PLAYSTATE_CACHE + if _PLAYSTATE_CACHE is not None: + return _PLAYSTATE_CACHE + path = _playstate_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + else: + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + data = json.loads(raw or "{}") + if isinstance(data, dict): + normalized: dict[str, dict[str, object]] = {} + for key, value in data.items(): + if isinstance(key, str) and isinstance(value, dict): + normalized[key] = dict(value) + _PLAYSTATE_CACHE = normalized + return normalized + except Exception: + pass + _PLAYSTATE_CACHE = {} + return {} + + +def _save_playstate(state: dict[str, dict[str, object]]) -> None: + global _PLAYSTATE_CACHE + _PLAYSTATE_CACHE = state + path = _playstate_path() + try: + payload = json.dumps(state, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + +def _get_playstate(key: str) -> dict[str, object]: + return dict(_load_playstate().get(key, {}) or {}) + + +def _set_playstate(key: str, value: dict[str, object]) -> None: + state = _load_playstate() + if value: + state[key] = dict(value) + else: + state.pop(key, None) + _save_playstate(state) + + +def _apply_playstate_to_info(info_labels: dict[str, object], playstate: dict[str, object]) -> dict[str, object]: + info_labels = dict(info_labels or {}) + watched = bool(playstate.get("watched") or False) + resume_position = playstate.get("resume_position") + resume_total = playstate.get("resume_total") + if watched: + info_labels["playcount"] = 1 + info_labels.pop("resume_position", None) + info_labels.pop("resume_total", None) + else: + try: + pos = int(resume_position) if resume_position is not None else 0 + tot = int(resume_total) if resume_total is not None else 0 + except Exception: + pos, tot = 0, 0 + if pos > 0 and tot > 0: + info_labels["resume_position"] = pos + info_labels["resume_total"] = tot + return info_labels + + +def _time_label(seconds: int) -> str: + try: + seconds = int(seconds or 0) + except Exception: + seconds = 0 + if seconds <= 0: + return "" + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + secs = seconds % 60 + if hours > 0: + return f"{hours:02d}:{minutes:02d}:{secs:02d}" + return f"{minutes:02d}:{secs:02d}" + + +def _label_with_playstate(label: str, playstate: dict[str, object]) -> str: + watched = bool(playstate.get("watched") or False) + if watched: + return f"✓ {label}" + resume_pos = playstate.get("resume_position") + try: + pos = int(resume_pos) if resume_pos is not None else 0 + except Exception: + pos = 0 + if pos > 0: + return f"↩ {_time_label(pos)} {label}" + return label + + +def _title_playstate(plugin_name: str, title: str) -> dict[str, object]: + return _get_playstate(_playstate_key(plugin_name=plugin_name, title=title, season="", episode="")) + + +def _season_playstate(plugin_name: str, title: str, season: str) -> dict[str, object]: + return _get_playstate(_playstate_key(plugin_name=plugin_name, title=title, season=season, episode="")) + + +def _get_setting_string(setting_id: str) -> str: + if xbmcaddon is None: + return "" + addon = _get_addon() + if addon is None: + return "" + getter = getattr(addon, "getSettingString", None) + if callable(getter): + try: + return str(getter(setting_id) or "") + except TypeError: + return "" + getter = getattr(addon, "getSetting", None) + if callable(getter): + try: + return str(getter(setting_id) or "") + except TypeError: + return "" + return "" + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + if xbmcaddon is None: + return default + addon = _get_addon() + if addon is None: + return default + getter = getattr(addon, "getSettingBool", None) + if callable(getter): + # Kodi kann für unbekannte Settings stillschweigend `False` liefern. + # Damit neue Settings mit `default=True` korrekt funktionieren, prüfen wir auf leeren Raw-Value. + raw_getter = getattr(addon, "getSetting", None) + if callable(raw_getter): + try: + raw = str(raw_getter(setting_id) or "").strip() + except TypeError: + raw = "" + if raw == "": + return default + try: + return bool(getter(setting_id)) + except TypeError: + return default + getter = getattr(addon, "getSetting", None) + if callable(getter): + try: + raw = str(getter(setting_id) or "").strip().lower() + except TypeError: + return default + if raw in {"true", "1", "yes", "on"}: + return True + if raw in {"false", "0", "no", "off"}: + return False + return default + + +def _apply_video_info(item, info_labels: dict[str, object] | None, cast: list[TmdbCastMember] | None) -> None: + """Setzt Metadaten bevorzugt via InfoTagVideo (Kodi v20+), mit Fallback auf deprecated APIs.""" + + if not info_labels and not cast: + return + + info_labels = dict(info_labels or {}) + + get_tag = getattr(item, "getVideoInfoTag", None) + tag = None + if callable(get_tag): + try: + tag = get_tag() + except Exception: + tag = None + + if tag is not None: + try: + title = info_labels.get("title") or "" + plot = info_labels.get("plot") or "" + mediatype = info_labels.get("mediatype") or "" + tvshowtitle = info_labels.get("tvshowtitle") or "" + season = info_labels.get("season") + episode = info_labels.get("episode") + rating = info_labels.get("rating") + votes = info_labels.get("votes") + duration = info_labels.get("duration") + playcount = info_labels.get("playcount") + resume_position = info_labels.get("resume_position") + resume_total = info_labels.get("resume_total") + + setter = getattr(tag, "setTitle", None) + if callable(setter) and title: + setter(str(title)) + setter = getattr(tag, "setPlot", None) + if callable(setter) and plot: + setter(str(plot)) + setter = getattr(tag, "setMediaType", None) + if callable(setter) and mediatype: + setter(str(mediatype)) + setter = getattr(tag, "setTvShowTitle", None) + if callable(setter) and tvshowtitle: + setter(str(tvshowtitle)) + setter = getattr(tag, "setSeason", None) + if callable(setter) and season not in (None, "", 0, "0"): + setter(int(season)) # type: ignore[arg-type] + setter = getattr(tag, "setEpisode", None) + if callable(setter) and episode not in (None, "", 0, "0"): + setter(int(episode)) # type: ignore[arg-type] + + if rating not in (None, "", 0, "0"): + try: + rating_f = float(rating) # type: ignore[arg-type] + except Exception: + rating_f = 0.0 + if rating_f: + set_rating = getattr(tag, "setRating", None) + if callable(set_rating): + try: + if votes not in (None, "", 0, "0"): + set_rating(rating_f, int(votes), "tmdb") # type: ignore[misc] + else: + set_rating(rating_f) # type: ignore[misc] + except Exception: + try: + set_rating(rating_f, int(votes or 0), "tmdb", True) # type: ignore[misc] + except Exception: + pass + + if duration not in (None, "", 0, "0"): + try: + duration_i = int(duration) # type: ignore[arg-type] + except Exception: + duration_i = 0 + if duration_i: + set_duration = getattr(tag, "setDuration", None) + if callable(set_duration): + try: + set_duration(duration_i) + except Exception: + pass + + if playcount not in (None, "", 0, "0"): + try: + playcount_i = int(playcount) # type: ignore[arg-type] + except Exception: + playcount_i = 0 + if playcount_i: + set_playcount = getattr(tag, "setPlaycount", None) + if callable(set_playcount): + try: + set_playcount(playcount_i) + except Exception: + pass + + try: + pos = int(resume_position) if resume_position is not None else 0 + tot = int(resume_total) if resume_total is not None else 0 + except Exception: + pos, tot = 0, 0 + if pos > 0 and tot > 0: + set_resume = getattr(tag, "setResumePoint", None) + if callable(set_resume): + try: + set_resume(pos, tot) + except Exception: + try: + set_resume(pos) # type: ignore[misc] + except Exception: + pass + + if cast: + set_cast = getattr(tag, "setCast", None) + actor_cls = getattr(xbmc, "Actor", None) + if callable(set_cast) and actor_cls is not None: + actors = [] + for index, member in enumerate(cast[:30]): + try: + actors.append(actor_cls(member.name, member.role, index, member.thumb)) + except Exception: + try: + actors.append(actor_cls(member.name, member.role)) + except Exception: + continue + try: + set_cast(actors) + except Exception: + pass + elif callable(set_cast): + cast_dicts = [ + {"name": m.name, "role": m.role, "thumbnail": m.thumb} + for m in cast[:30] + if m.name + ] + try: + set_cast(cast_dicts) + except Exception: + pass + + return + except Exception: + # Fallback below + pass + + # Deprecated fallback for older Kodi. + try: + item.setInfo("video", info_labels) # type: ignore[arg-type] + except Exception: + pass + if cast: + set_cast = getattr(item, "setCast", None) + if callable(set_cast): + try: + set_cast([m.name for m in cast[:30] if m.name]) + except Exception: + pass + + +def _get_log_path(filename: str) -> str: + if xbmcaddon and xbmcvfs: + addon = xbmcaddon.Addon() + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + log_dir = os.path.join(profile, "logs") + if not xbmcvfs.exists(log_dir): + xbmcvfs.mkdirs(log_dir) + return os.path.join(log_dir, filename) + return os.path.join(os.path.dirname(__file__), filename) + + +def _tmdb_file_log(message: str) -> None: + global _TMDB_LOG_PATH + if _TMDB_LOG_PATH is None: + _TMDB_LOG_PATH = _get_log_path("tmdb.log") + timestamp = datetime.utcnow().isoformat(timespec="seconds") + "Z" + line = f"{timestamp}\t{message}\n" + try: + with open(_TMDB_LOG_PATH, "a", encoding="utf-8") as handle: + handle.write(line) + except Exception: + if xbmcvfs is None: + return + try: + handle = xbmcvfs.File(_TMDB_LOG_PATH, "a") + handle.write(line) + handle.close() + except Exception: + return + + +def _tmdb_labels_and_art(title: str) -> tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]: + title_key = (title or "").strip().casefold() + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + show_fanart = _get_setting_bool("tmdb_show_fanart", default=True) + show_rating = _get_setting_bool("tmdb_show_rating", default=True) + show_votes = _get_setting_bool("tmdb_show_votes", default=False) + show_cast = _get_setting_bool("tmdb_show_cast", default=False) + flags = f"p{int(show_plot)}a{int(show_art)}f{int(show_fanart)}r{int(show_rating)}v{int(show_votes)}c{int(show_cast)}" + cache_key = f"{language}|{flags}|{title_key}" + cached = _TMDB_CACHE.get(cache_key) + if cached is not None: + info, art = cached + # Cast wird nicht in _TMDB_CACHE gehalten (weil es ListItem.setCast betrifft), daher separat cachen: + cast_cached = _TMDB_CAST_CACHE.get(cache_key, []) + return info, art, list(cast_cached) + + info_labels: dict[str, str] = {"title": title} + art: dict[str, str] = {} + cast: list[TmdbCastMember] = [] + query = (title or "").strip() + api_key = _get_setting_string("tmdb_api_key").strip() + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + if api_key: + try: + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + # Einige Plugins liefern Titel wie "… – Der Film". Für TMDB ist oft der Basistitel besser. + candidates: list[str] = [] + if query: + candidates.append(query) + simplified = re.sub(r"\s*[-–]\s*der\s+film\s*$", "", query, flags=re.IGNORECASE).strip() + if simplified and simplified not in candidates: + candidates.append(simplified) + + meta = None + is_tv = False + for candidate in candidates: + meta = lookup_tv_show( + title=candidate, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + include_cast=show_cast, + ) + if meta: + is_tv = True + break + if not meta: + for candidate in candidates: + movie = lookup_movie( + title=candidate, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + include_cast=show_cast, + ) + if movie: + meta = movie + break + except Exception as exc: + try: + _tmdb_file_log(f"TMDB ERROR lookup_failed title={title!r} error={exc!r}") + except Exception: + pass + _log(f"TMDB Meta fehlgeschlagen: {exc}", xbmc.LOGDEBUG) + meta = None + if meta: + # Nur TV-IDs cachen (für Staffel-/Episoden-Lookups); Movie-IDs würden dort fehlschlagen. + if is_tv: + _TMDB_ID_CACHE[title_key] = int(getattr(meta, "tmdb_id", 0) or 0) + info_labels.setdefault("mediatype", "tvshow") + else: + info_labels.setdefault("mediatype", "movie") + if show_plot and getattr(meta, "plot", ""): + info_labels["plot"] = getattr(meta, "plot", "") + runtime_minutes = int(getattr(meta, "runtime_minutes", 0) or 0) + if runtime_minutes > 0 and not is_tv: + info_labels["duration"] = str(runtime_minutes * 60) + rating = getattr(meta, "rating", 0.0) or 0.0 + votes = getattr(meta, "votes", 0) or 0 + if show_rating and rating: + # Kodi akzeptiert je nach Version float oder string; wir bleiben bei strings wie im restlichen Code. + info_labels["rating"] = str(rating) + if show_votes and votes: + info_labels["votes"] = str(votes) + if show_art and getattr(meta, "poster", ""): + poster = getattr(meta, "poster", "") + art.update({"thumb": poster, "poster": poster, "icon": poster}) + if show_fanart and getattr(meta, "fanart", ""): + fanart = getattr(meta, "fanart", "") + if fanart: + art.update({"fanart": fanart, "landscape": fanart}) + if show_cast: + cast = list(getattr(meta, "cast", []) or []) + elif log_requests or log_responses: + _tmdb_file_log(f"TMDB MISS title={title!r}") + + _TMDB_CACHE[cache_key] = (info_labels, art) + _TMDB_CAST_CACHE[cache_key] = list(cast) + return info_labels, art, list(cast) + + +async def _tmdb_labels_and_art_bulk_async( + titles: list[str], +) -> dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]]: + titles = [str(t).strip() for t in (titles or []) if t and str(t).strip()] + if not titles: + return {} + + unique_titles: list[str] = list(dict.fromkeys(titles)) + limit = _tmdb_prefetch_concurrency() + semaphore = asyncio.Semaphore(limit) + + async def fetch_one(title: str): + async with semaphore: + return title, await asyncio.to_thread(_tmdb_labels_and_art, title) + + tasks = [fetch_one(title) for title in unique_titles] + results = await asyncio.gather(*tasks, return_exceptions=True) + mapped: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + for entry in results: + if isinstance(entry, Exception): + continue + try: + title, payload = entry + except Exception: + continue + if isinstance(title, str) and isinstance(payload, tuple) and len(payload) == 3: + mapped[title] = payload # type: ignore[assignment] + return mapped + + +def _tmdb_labels_and_art_bulk( + titles: list[str], +) -> dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]]: + return _run_async(_tmdb_labels_and_art_bulk_async(titles)) + + +def _tmdb_episode_labels_and_art(*, title: str, season_label: str, episode_label: str) -> tuple[dict[str, str], dict[str, str]]: + title_key = (title or "").strip().casefold() + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + _tmdb_labels_and_art(title) + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + return {"title": episode_label}, {} + + season_number = _extract_first_int(season_label) + episode_number = _extract_first_int(episode_label) + if season_number is None or episode_number is None: + return {"title": episode_label}, {} + + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + flags = f"p{int(show_plot)}a{int(show_art)}" + season_key = (tmdb_id, season_number, language, flags) + cached_season = _TMDB_SEASON_CACHE.get(season_key) + if cached_season is None: + api_key = _get_setting_string("tmdb_api_key").strip() + if not api_key: + return {"title": episode_label}, {} + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + try: + season_meta = lookup_tv_season( + tmdb_id=tmdb_id, + season_number=season_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn(f"TMDB ERROR season_lookup_failed tmdb_id={tmdb_id} season={season_number} error={exc!r}") + season_meta = None + mapped: dict[int, tuple[dict[str, str], dict[str, str]]] = {} + if season_meta: + for ep_no, ep in season_meta.items(): + info: dict[str, str] = {"title": f"Episode {ep_no}"} + if show_plot and ep.plot: + info["plot"] = ep.plot + if getattr(ep, "runtime_minutes", 0): + info["duration"] = str(int(getattr(ep, "runtime_minutes", 0)) * 60) + art: dict[str, str] = {} + if show_art and ep.thumb: + art = {"thumb": ep.thumb} + mapped[ep_no] = (info, art) + _TMDB_SEASON_CACHE[season_key] = mapped + cached_season = mapped + + return cached_season.get(episode_number, ({"title": episode_label}, {})) + + +def _tmdb_episode_cast(*, title: str, season_label: str, episode_label: str) -> list[TmdbCastMember]: + show_episode_cast = _get_setting_bool("tmdb_show_episode_cast", default=False) + if not show_episode_cast: + return [] + + title_key = (title or "").strip().casefold() + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + _tmdb_labels_and_art(title) + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + return [] + + season_number = _extract_first_int(season_label) + episode_number = _extract_first_int(episode_label) + if season_number is None or episode_number is None: + return [] + + language = _get_setting_string("tmdb_language").strip() or "de-DE" + cache_key = (tmdb_id, season_number, episode_number, language) + cached = _TMDB_EPISODE_CAST_CACHE.get(cache_key) + if cached is not None: + return list(cached) + + api_key = _get_setting_string("tmdb_api_key").strip() + if not api_key: + _TMDB_EPISODE_CAST_CACHE[cache_key] = [] + return [] + + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + try: + cast = fetch_tv_episode_credits( + tmdb_id=tmdb_id, + season_number=season_number, + episode_number=episode_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn( + f"TMDB ERROR episode_credits_failed tmdb_id={tmdb_id} season={season_number} episode={episode_number} error={exc!r}" + ) + cast = [] + _TMDB_EPISODE_CAST_CACHE[cache_key] = list(cast) + return list(cast) + + +def _add_directory_item( + handle: int, + label: str, + action: str, + params: dict[str, str] | None = None, + *, + is_folder: bool = True, + info_labels: dict[str, str] | None = None, + art: dict[str, str] | None = None, + cast: list[TmdbCastMember] | None = None, +) -> None: + """Fuegt einen Eintrag (Folder oder Playable) in die Kodi-Liste ein.""" + query: dict[str, str] = {"action": action} + if params: + query.update(params) + url = f"{sys.argv[0]}?{urlencode(query)}" + item = xbmcgui.ListItem(label=label) + if not is_folder: + try: + item.setProperty("IsPlayable", "true") + except Exception: + pass + _apply_video_info(item, info_labels, cast) + if art: + setter = getattr(item, "setArt", None) + if callable(setter): + try: + setter(art) + except Exception: + pass + xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=item, isFolder=is_folder) + + +def _show_root_menu() -> None: + handle = _get_handle() + _log("Root-Menue wird angezeigt.") + _add_directory_item(handle, "Globale Suche", "search") + + plugins = _discover_plugins() + for plugin_name in sorted(plugins.keys(), key=lambda value: value.casefold()): + display = f"{plugin_name}" + _add_directory_item(handle, display, "plugin_menu", {"plugin": plugin_name}, is_folder=True) + + _add_directory_item(handle, "Einstellungen", "settings") + xbmcplugin.endOfDirectory(handle) + + +def _show_plugin_menu(plugin_name: str) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Plugin", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, plugin_name) + + _add_directory_item(handle, "Suche", "plugin_search", {"plugin": plugin_name}, is_folder=True) + + if _plugin_has_capability(plugin, "new_titles"): + _add_directory_item(handle, "Neue Titel", "new_titles", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + if _plugin_has_capability(plugin, "latest_episodes"): + _add_directory_item(handle, "Neueste Folgen", "latest_episodes", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + if _plugin_has_capability(plugin, "genres"): + _add_directory_item(handle, "Genres", "genres", {"plugin": plugin_name}, is_folder=True) + + if _plugin_has_capability(plugin, "popular_series"): + _add_directory_item(handle, "Meist gesehen", "popular", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + xbmcplugin.endOfDirectory(handle) + + +def _show_plugin_search(plugin_name: str) -> None: + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Suche", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + _show_root_menu() + return + + _log(f"Plugin-Suche gestartet: {plugin_name}") + dialog = xbmcgui.Dialog() + query = dialog.input(f"{plugin_name}: Titel eingeben", type=xbmcgui.INPUT_ALPHANUM).strip() + if not query: + _log("Plugin-Suche abgebrochen (leere Eingabe).", xbmc.LOGDEBUG) + _show_plugin_menu(plugin_name) + return + _log(f"Plugin-Suchbegriff ({plugin_name}): {query}", xbmc.LOGDEBUG) + _show_plugin_search_results(plugin_name, query) + + +def _show_plugin_search_results(plugin_name: str, query: str) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + query = (query or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Suche", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, f"{plugin_name}: {query}") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + _log(f"Suche nach Titeln (Plugin={plugin_name}): {query}") + + try: + results = _run_async(plugin.search_titles(query)) + except Exception as exc: + _log(f"Suche fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Suche", "Suche fehlgeschlagen.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + results = [str(t).strip() for t in (results or []) if t and str(t).strip()] + results.sort(key=lambda value: value.casefold()) + tmdb_prefetched: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + if results: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(list(results)) + for title in results: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + merged_info = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool(plugin_name.casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False)) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=merged_info, + art=art, + cast=cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _import_plugin_module(path: Path) -> ModuleType: + spec = importlib.util.spec_from_file_location(path.stem, path) + if spec is None or spec.loader is None: + raise ImportError(f"Modul-Spezifikation fuer {path.name} fehlt.") + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + try: + spec.loader.exec_module(module) + except Exception: + sys.modules.pop(spec.name, None) + raise + return module + + +def _discover_plugins() -> dict[str, BasisPlugin]: + """Laedt alle Plugins aus `plugins/*.py` und cached Instanzen im RAM.""" + global _PLUGIN_CACHE + if _PLUGIN_CACHE is not None: + return _PLUGIN_CACHE + # Plugins werden dynamisch aus `plugins/*.py` geladen, damit Integrationen getrennt + # entwickelt und bei Fehlern isoliert deaktiviert werden koennen. + plugins: dict[str, BasisPlugin] = {} + if not PLUGIN_DIR.exists(): + _PLUGIN_CACHE = plugins + return plugins + for file_path in sorted(PLUGIN_DIR.glob("*.py")): + if file_path.name.startswith("_"): + continue + try: + module = _import_plugin_module(file_path) + except Exception as exc: + xbmc.log(f"Plugin-Datei {file_path.name} konnte nicht geladen werden: {exc}", xbmc.LOGWARNING) + continue + plugin_classes = [ + obj + for obj in module.__dict__.values() + if inspect.isclass(obj) and issubclass(obj, BasisPlugin) and obj is not BasisPlugin + ] + for cls in plugin_classes: + try: + instance = cls() + except Exception as exc: + xbmc.log(f"Plugin {cls.__name__} konnte nicht geladen werden: {exc}", xbmc.LOGWARNING) + continue + if getattr(instance, "is_available", True) is False: + reason = getattr(instance, "unavailable_reason", "Nicht verfuegbar.") + xbmc.log(f"Plugin {cls.__name__} deaktiviert: {reason}", xbmc.LOGWARNING) + continue + plugins[instance.name] = instance + _PLUGIN_CACHE = plugins + return plugins + + +def _run_async(coro): + """Fuehrt eine Coroutine aus, auch wenn Kodi bereits einen Event-Loop hat.""" + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = None + if loop and loop.is_running(): + temp_loop = asyncio.new_event_loop() + try: + return temp_loop.run_until_complete(coro) + finally: + temp_loop.close() + return asyncio.run(coro) + + +def _show_search() -> None: + _log("Suche gestartet.") + dialog = xbmcgui.Dialog() + query = dialog.input("Serientitel eingeben", type=xbmcgui.INPUT_ALPHANUM).strip() + if not query: + _log("Suche abgebrochen (leere Eingabe).", xbmc.LOGDEBUG) + _show_root_menu() + return + _log(f"Suchbegriff: {query}", xbmc.LOGDEBUG) + _show_search_results(query) + + +def _show_search_results(query: str) -> None: + handle = _get_handle() + _log(f"Suche nach Titeln: {query}") + _set_content(handle, "tvshows") + plugins = _discover_plugins() + if not plugins: + xbmcgui.Dialog().notification("Suche", "Keine Plugins gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + for plugin_name, plugin in plugins.items(): + try: + results = _run_async(plugin.search_titles(query)) + except Exception as exc: + _log(f"Suche fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + continue + _log(f"Treffer ({plugin_name}): {len(results)}", xbmc.LOGDEBUG) + tmdb_prefetched: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + if results: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(list(results)) + for title in results: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + merged_info = _apply_playstate_to_info(dict(info_labels), playstate) + label = _label_with_duration(title, info_labels) + label = _label_with_playstate(label, playstate) + label = f"{label} [{plugin_name}]" + direct_play = bool( + plugin_name.casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=merged_info, + art=art, + cast=cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_seasons(plugin_name: str, title: str) -> None: + handle = _get_handle() + _log(f"Staffeln laden: {plugin_name} / {title}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Staffeln", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + # Einschalten liefert Filme. Für Playback soll nach dem Öffnen des Titels direkt ein + # einzelnes abspielbares Item angezeigt werden: -> ( abspielbar). + # Wichtig: ohne zusätzliche Netzwerkanfragen (sonst bleibt Kodi ggf. im Busy-Spinner hängen). + if (plugin_name or "").casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False): + xbmcplugin.setPluginCategory(handle, title) + _set_content(handle, "movies") + playstate = _title_playstate(plugin_name, title) + info_labels: dict[str, object] = {"title": title, "mediatype": "movie"} + info_labels = _apply_playstate_to_info(info_labels, playstate) + display_label = _label_with_playstate(title, playstate) + _add_directory_item( + handle, + display_label, + "play_movie", + {"plugin": plugin_name, "title": title}, + is_folder=False, + info_labels=info_labels, + ) + xbmcplugin.endOfDirectory(handle) + return + + # Optional: Plugins können schnell (ohne Detail-Request) sagen, ob ein Titel ein Film ist. + # Dann zeigen wir direkt ein einzelnes abspielbares Item: -> (). + is_movie = getattr(plugin, "is_movie", None) + if callable(is_movie): + try: + if bool(is_movie(title)): + xbmcplugin.setPluginCategory(handle, title) + _set_content(handle, "movies") + playstate = _title_playstate(plugin_name, title) + info_labels: dict[str, object] = {"title": title, "mediatype": "movie"} + info_labels = _apply_playstate_to_info(info_labels, playstate) + display_label = _label_with_playstate(title, playstate) + _add_directory_item( + handle, + display_label, + "play_movie", + {"plugin": plugin_name, "title": title}, + is_folder=False, + info_labels=info_labels, + ) + xbmcplugin.endOfDirectory(handle) + return + except Exception: + pass + + title_info_labels: dict[str, str] | None = None + title_art: dict[str, str] | None = None + title_cast: list[TmdbCastMember] | None = None + meta_getter = getattr(plugin, "metadata_for", None) + if callable(meta_getter): + try: + with _busy_dialog(): + meta_labels, meta_art, meta_cast = meta_getter(title) + if isinstance(meta_labels, dict): + title_info_labels = {str(k): str(v) for k, v in meta_labels.items() if v} + if isinstance(meta_art, dict): + title_art = {str(k): str(v) for k, v in meta_art.items() if v} + if isinstance(meta_cast, list): + # type: ignore[assignment] - plugins may return cast in their own shape; best-effort only + title_cast = meta_cast # noqa: PGH003 + except Exception: + pass + + try: + seasons = plugin.seasons_for(title) + except Exception as exc: + _log(f"Staffeln laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Staffeln", "Konnte Staffeln nicht laden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + count = len(seasons) + suffix = "Staffel" if count == 1 else "Staffeln" + xbmcplugin.setPluginCategory(handle, f"{title} ({count} {suffix})") + _set_content(handle, "seasons") + # Staffel-Metadaten (Plot/Poster) optional via TMDB. + _tmdb_labels_and_art(title) + api_key = _get_setting_string("tmdb_api_key").strip() + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + flags = f"p{int(show_plot)}a{int(show_art)}" + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + for season in seasons: + info_labels: dict[str, str] | None = None + art: dict[str, str] | None = None + season_number = _extract_first_int(season) + if api_key and season_number is not None: + cache_key = (_TMDB_ID_CACHE.get((title or "").strip().casefold(), 0), season_number, language, flags) + cached = _TMDB_SEASON_SUMMARY_CACHE.get(cache_key) + if cached is None and cache_key[0]: + try: + meta = lookup_tv_season_summary( + tmdb_id=cache_key[0], + season_number=season_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn(f"TMDB ERROR season_summary_failed tmdb_id={cache_key[0]} season={season_number} error={exc!r}") + meta = None + labels = {"title": season} + art_map: dict[str, str] = {} + if meta: + if show_plot and meta.plot: + labels["plot"] = meta.plot + if show_art and meta.poster: + art_map = {"thumb": meta.poster, "poster": meta.poster} + cached = (labels, art_map) + _TMDB_SEASON_SUMMARY_CACHE[cache_key] = cached + if cached is not None: + info_labels, art = cached + merged_labels = dict(info_labels or {}) + if title_info_labels: + merged_labels = dict(title_info_labels) + merged_labels.update(dict(info_labels or {})) + season_state = _season_playstate(plugin_name, title, season) + merged_labels = _apply_playstate_to_info(dict(merged_labels), season_state) + merged_art: dict[str, str] | None = art + if title_art: + merged_art = dict(title_art) + if isinstance(art, dict): + merged_art.update({k: str(v) for k, v in art.items() if v}) + + _add_directory_item( + handle, + _label_with_playstate(season, season_state), + "episodes", + {"plugin": plugin_name, "title": title, "season": season}, + is_folder=True, + info_labels=merged_labels or None, + art=merged_art, + cast=title_cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_episodes(plugin_name: str, title: str, season: str) -> None: + handle = _get_handle() + _log(f"Episoden laden: {plugin_name} / {title} / {season}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Episoden", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + season_number = _extract_first_int(season) + if season_number is not None: + xbmcplugin.setPluginCategory(handle, f"{title} - Staffel {season_number}") + else: + xbmcplugin.setPluginCategory(handle, f"{title} - {season}") + _set_content(handle, "episodes") + + episodes = list(plugin.episodes_for(title, season)) + if episodes: + show_info, show_art, show_cast = _tmdb_labels_and_art(title) + show_fanart = (show_art or {}).get("fanart") if isinstance(show_art, dict) else "" + show_poster = (show_art or {}).get("poster") if isinstance(show_art, dict) else "" + with _busy_dialog(): + for episode in episodes: + info_labels, art = _tmdb_episode_labels_and_art(title=title, season_label=season, episode_label=episode) + episode_cast = _tmdb_episode_cast(title=title, season_label=season, episode_label=episode) + merged_info = dict(show_info or {}) + merged_info.update(dict(info_labels or {})) + merged_art: dict[str, str] = {} + if isinstance(show_art, dict): + merged_art.update({k: str(v) for k, v in show_art.items() if v}) + if isinstance(art, dict): + merged_art.update({k: str(v) for k, v in art.items() if v}) + + # Kodi Info-Dialog für Episoden hängt oft an diesen Feldern. + season_number = _extract_first_int(season) or 0 + episode_number = _extract_first_int(episode) or 0 + merged_info.setdefault("mediatype", "episode") + merged_info.setdefault("tvshowtitle", title) + if season_number: + merged_info.setdefault("season", str(season_number)) + if episode_number: + merged_info.setdefault("episode", str(episode_number)) + + # Episode-Items ohne eigenes Artwork: Fanart/Poster vom Titel durchreichen. + if show_fanart: + merged_art.setdefault("fanart", show_fanart) + merged_art.setdefault("landscape", show_fanart) + if show_poster: + merged_art.setdefault("poster", show_poster) + + key = _playstate_key(plugin_name=plugin_name, title=title, season=season, episode=episode) + merged_info = _apply_playstate_to_info(merged_info, _get_playstate(key)) + + display_label = episode + _add_directory_item( + handle, + display_label, + "play_episode", + {"plugin": plugin_name, "title": title, "season": season, "episode": episode}, + is_folder=False, + info_labels=merged_info, + art=merged_art, + cast=episode_cast or show_cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_sources() -> None: + handle = _get_handle() + _log("Genre-Quellen laden.") + plugins = _discover_plugins() + sources: list[tuple[str, BasisPlugin]] = [] + for plugin_name, plugin in plugins.items(): + if plugin.__class__.genres is BasisPlugin.genres: + continue + if plugin.__class__.titles_for_genre is BasisPlugin.titles_for_genre: + continue + sources.append((plugin_name, plugin)) + + if not sources: + xbmcgui.Dialog().notification("Genres", "Keine Genre-Quellen gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + for plugin_name, plugin in sources: + _add_directory_item( + handle, + f"Genres [{plugin_name}]", + "genres", + {"plugin": plugin_name}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genres(plugin_name: str) -> None: + handle = _get_handle() + _log(f"Genres laden: {plugin_name}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Genres", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + try: + genres = plugin.genres() + except Exception as exc: + _log(f"Genres konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Genres konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + for genre in genres: + # Wenn Plugin Paging unterstützt, direkt paginierte Titelliste öffnen. + paging_getter = getattr(plugin, "titles_for_genre_page", None) + if callable(paging_getter): + _add_directory_item( + handle, + genre, + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": "1"}, + is_folder=True, + ) + continue + _add_directory_item( + handle, + genre, + "genre_series", + {"plugin": plugin_name, "genre": genre}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_titles_page(plugin_name: str, genre: str, page: int = 1) -> None: + handle = _get_handle() + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Genres", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + page = max(1, int(page or 1)) + paging_getter = getattr(plugin, "titles_for_genre_page", None) + if not callable(paging_getter): + xbmcgui.Dialog().notification("Genres", "Paging nicht verfügbar.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + total_pages = None + count_getter = getattr(plugin, "genre_page_count", None) + if callable(count_getter): + try: + total_pages = int(count_getter(genre) or 1) + except Exception: + total_pages = None + if total_pages is not None: + page = min(page, max(1, total_pages)) + xbmcplugin.setPluginCategory(handle, f"{genre} ({page}/{total_pages})") + else: + xbmcplugin.setPluginCategory(handle, f"{genre} ({page})") + _set_content(handle, "movies" if (plugin_name or "").casefold() == "einschalten" else "tvshows") + + if page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": str(page - 1)}, + is_folder=True, + ) + + try: + titles = list(paging_getter(genre, page) or []) + except Exception as exc: + _log(f"Genre-Seite konnte nicht geladen werden ({plugin_name}/{genre} p{page}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Seite konnte nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if titles: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(titles) + for title in titles: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in titles: + playstate = _title_playstate(plugin_name, title) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + show_next = False + if total_pages is not None: + show_next = page < total_pages + else: + has_more_getter = getattr(plugin, "genre_has_more", None) + if callable(has_more_getter): + try: + show_next = bool(has_more_getter(genre, page)) + except Exception: + show_next = False + + if show_next: + _add_directory_item( + handle, + "Nächste Seite", + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _title_group_key(title: str) -> str: + raw = (title or "").strip() + if not raw: + return "#" + for char in raw: + if char.isdigit(): + return "0-9" + if char.isalpha(): + normalized = char.casefold() + if normalized == "ä": + normalized = "a" + elif normalized == "ö": + normalized = "o" + elif normalized == "ü": + normalized = "u" + elif normalized == "ß": + normalized = "s" + return normalized.upper() + return "#" + + +def _genre_title_groups() -> list[tuple[str, str]]: + return [ + ("A-E", "A-E"), + ("F-J", "F-J"), + ("K-O", "K-O"), + ("P-T", "P-T"), + ("U-Z", "U-Z"), + ("0-9", "0-9"), + ] + + +def _group_matches(group_code: str, title: str) -> bool: + key = _title_group_key(title) + if group_code == "0-9": + return key == "0-9" + if key == "0-9" or key == "#": + return False + if group_code == "A-E": + return "A" <= key <= "E" + if group_code == "F-J": + return "F" <= key <= "J" + if group_code == "K-O": + return "K" <= key <= "O" + if group_code == "P-T": + return "P" <= key <= "T" + if group_code == "U-Z": + return "U" <= key <= "Z" + return False + + +def _get_genre_titles(plugin_name: str, genre: str) -> list[str]: + cache_key = (plugin_name, genre) + cached = _GENRE_TITLES_CACHE.get(cache_key) + if cached is not None: + return list(cached) + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + return [] + titles = plugin.titles_for_genre(genre) + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + _GENRE_TITLES_CACHE[cache_key] = list(titles) + return list(titles) + + +def _show_genre_series(plugin_name: str, genre: str) -> None: + handle = _get_handle() + xbmcplugin.setPluginCategory(handle, genre) + for label, group_code in _genre_title_groups(): + _add_directory_item( + handle, + label, + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _parse_positive_int(value: str, *, default: int = 1) -> int: + try: + parsed = int(str(value or "").strip()) + except Exception: + return default + return parsed if parsed > 0 else default + + +def _popular_genre_label(plugin: BasisPlugin) -> str | None: + label = getattr(plugin, "POPULAR_GENRE_LABEL", None) + if isinstance(label, str) and label.strip(): + return label.strip() + return None + + +def _plugin_has_capability(plugin: BasisPlugin, capability: str) -> bool: + getter = getattr(plugin, "capabilities", None) + if callable(getter): + try: + capabilities = getter() + except Exception: + capabilities = set() + try: + return capability in set(capabilities or []) + except Exception: + return False + # Backwards compatibility: Popular via POPULAR_GENRE_LABEL constant. + if capability == "popular_series": + return _popular_genre_label(plugin) is not None + return False + + +def _plugins_with_popular() -> list[tuple[str, BasisPlugin, str]]: + results: list[tuple[str, BasisPlugin, str]] = [] + for plugin_name, plugin in _discover_plugins().items(): + if not _plugin_has_capability(plugin, "popular_series"): + continue + label = _popular_genre_label(plugin) or "" + results.append((plugin_name, plugin, label)) + return results + + +def _show_popular(plugin_name: str | None = None, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + if plugin_name: + plugin = _discover_plugins().get(plugin_name) + if plugin is None or not _plugin_has_capability(plugin, "popular_series"): + xbmcgui.Dialog().notification("Beliebte Serien", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + try: + popular_getter = getattr(plugin, "popular_series", None) + if callable(popular_getter): + titles = list(popular_getter() or []) + else: + label = _popular_genre_label(plugin) + if not label: + titles = [] + else: + titles = list(plugin.titles_for_genre(label) or []) + except Exception as exc: + _log(f"Beliebte Serien konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Beliebte Serien", "Serien konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + total = len(titles) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"Beliebte Serien [{plugin_name}] ({page}/{total_pages})") + _set_content(handle, "tvshows") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "popular", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = titles[start:end] + + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + _add_directory_item( + handle, + display_label, + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + if total_pages > 1 and page < total_pages: + _add_directory_item( + handle, + "Nächste Seite", + "popular", + {"plugin": plugin_name, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + return + + sources = _plugins_with_popular() + if not sources: + xbmcgui.Dialog().notification("Beliebte Serien", "Keine Quellen gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, "Beliebte Serien") + for name, plugin, _label in sources: + _add_directory_item( + handle, + f"Beliebte Serien [{plugin.name}]", + "popular", + {"plugin": name, "page": "1"}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_new_titles(plugin_name: str, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if plugin is None or not _plugin_has_capability(plugin, "new_titles"): + xbmcgui.Dialog().notification("Neue Titel", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + getter = getattr(plugin, "new_titles", None) + if not callable(getter): + xbmcgui.Dialog().notification("Neue Titel", "Nicht verfügbar.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + paging_getter = getattr(plugin, "new_titles_page", None) + has_more_getter = getattr(plugin, "new_titles_has_more", None) + + if callable(paging_getter): + xbmcplugin.setPluginCategory(handle, f"Neue Titel [{plugin_name}] ({page})") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + if page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + try: + page_items = list(paging_getter(page) or []) + except Exception as exc: + _log(f"Neue Titel konnten nicht geladen werden ({plugin_name} p{page}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neue Titel", "Titel konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + page_items = [str(t).strip() for t in page_items if t and str(t).strip()] + page_items.sort(key=lambda value: value.casefold()) + else: + try: + titles = list(getter() or []) + except Exception as exc: + _log(f"Neue Titel konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neue Titel", "Titel konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + total = len(titles) + if total == 0: + xbmcgui.Dialog().notification( + "Neue Titel", + "Keine Titel gefunden (Basis-URL/Index prüfen).", + xbmcgui.NOTIFICATION_INFO, + 4000, + ) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"Neue Titel [{plugin_name}] ({page}/{total_pages})") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = titles[start:end] + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "movie") + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + show_next = False + if callable(paging_getter) and callable(has_more_getter): + try: + show_next = bool(has_more_getter(page)) + except Exception: + show_next = False + elif "total_pages" in locals(): + show_next = bool(total_pages > 1 and page < total_pages) # type: ignore[name-defined] + + if show_next: + _add_directory_item( + handle, + "Nächste Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_latest_episodes(plugin_name: str, page: int = 1) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Neueste Folgen", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + getter = getattr(plugin, "latest_episodes", None) + if not callable(getter): + xbmcgui.Dialog().notification("Neueste Folgen", "Nicht unterstützt.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, f"{plugin_name}: Neueste Folgen") + _set_content(handle, "episodes") + + try: + with _busy_dialog(): + entries = list(getter(page) or []) + except Exception as exc: + _log(f"Neueste Folgen fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neueste Folgen", "Abruf fehlgeschlagen.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + for entry in entries: + try: + title = str(getattr(entry, "series_title", "") or "").strip() + season_number = int(getattr(entry, "season", 0) or 0) + episode_number = int(getattr(entry, "episode", 0) or 0) + url = str(getattr(entry, "url", "") or "").strip() + airdate = str(getattr(entry, "airdate", "") or "").strip() + except Exception: + continue + if not title or not url or season_number < 0 or episode_number <= 0: + continue + + season_label = f"Staffel {season_number}" + episode_label = f"Episode {episode_number}" + key = _playstate_key(plugin_name=plugin_name, title=title, season=season_label, episode=episode_label) + playstate = _get_playstate(key) + + label = f"{title} - S{season_number:02d}E{episode_number:02d}" + if airdate: + label = f"{label} ({airdate})" + label = _label_with_playstate(label, playstate) + + info_labels: dict[str, object] = { + "title": f"{title} - S{season_number:02d}E{episode_number:02d}", + "tvshowtitle": title, + "season": season_number, + "episode": episode_number, + "mediatype": "episode", + } + info_labels = _apply_playstate_to_info(info_labels, playstate) + + _add_directory_item( + handle, + label, + "play_episode_url", + { + "plugin": plugin_name, + "title": title, + "season": str(season_number), + "episode": str(episode_number), + "url": url, + }, + is_folder=False, + info_labels=info_labels, + ) + + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_series_group(plugin_name: str, genre: str, group_code: str, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + try: + titles = _get_genre_titles(plugin_name, genre) + except Exception as exc: + _log(f"Genre-Serien konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Serien konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + filtered = [title for title in titles if _group_matches(group_code, title)] + total = len(filtered) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"{genre} [{group_code}] ({page}/{total_pages})") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = filtered[start:end] + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + _add_directory_item( + handle, + display_label, + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + if total_pages > 1 and page < total_pages: + _add_directory_item( + handle, + "Nächste Seite", + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + +def _open_settings() -> None: + """Oeffnet das Kodi-Addon-Settings-Dialog.""" + if xbmcaddon is None: # pragma: no cover - outside Kodi + raise RuntimeError("xbmcaddon ist nicht verfuegbar (KodiStub).") + addon = xbmcaddon.Addon() + addon.openSettings() + + +def _extract_first_int(value: str) -> int | None: + match = re.search(r"(\d+)", value or "") + if not match: + return None + try: + return int(match.group(1)) + except Exception: + return None + + +def _duration_label(duration_seconds: int) -> str: + try: + duration_seconds = int(duration_seconds or 0) + except Exception: + duration_seconds = 0 + if duration_seconds <= 0: + return "" + total_minutes = max(0, duration_seconds // 60) + hours = max(0, total_minutes // 60) + minutes = max(0, total_minutes % 60) + return f"{hours:02d}:{minutes:02d} Laufzeit" + + +def _label_with_duration(label: str, info_labels: dict[str, str] | None) -> str: + return label + + +def _play_final_link( + link: str, + *, + display_title: str | None = None, + info_labels: dict[str, str] | None = None, + art: dict[str, str] | None = None, + cast: list[TmdbCastMember] | None = None, + resolve_handle: int | None = None, +) -> None: + list_item = xbmcgui.ListItem(label=display_title or "", path=link) + try: + list_item.setProperty("IsPlayable", "true") + except Exception: + pass + merged_info: dict[str, object] = dict(info_labels or {}) + if display_title: + merged_info["title"] = display_title + _apply_video_info(list_item, merged_info, cast) + if art: + setter = getattr(list_item, "setArt", None) + if callable(setter): + try: + setter(art) + except Exception: + pass + + # Bei Plugin-Play-Items sollte Kodi via setResolvedUrl() die Wiedergabe starten. + # player.play() kann dazu führen, dass Kodi den Item-Callback nochmal triggert (Hoster-Auswahl doppelt). + resolved = False + if resolve_handle is not None: + resolver = getattr(xbmcplugin, "setResolvedUrl", None) + if callable(resolver): + try: + resolver(resolve_handle, True, list_item) + resolved = True + except Exception: + pass + + if not resolved: + player = xbmc.Player() + player.play(item=link, listitem=list_item) + + +def _track_playback_and_update_state(key: str) -> None: + if not key: + return + monitor = xbmc.Monitor() if xbmc is not None and hasattr(xbmc, "Monitor") else None + player = xbmc.Player() + + # Wait for playback start. + started = False + for _ in range(30): + try: + if player.isPlayingVideo(): + started = True + break + except Exception: + pass + if monitor and monitor.waitForAbort(0.5): + return + if not started: + return + + last_pos = 0.0 + total = 0.0 + while True: + try: + if not player.isPlayingVideo(): + break + last_pos = float(player.getTime() or 0.0) + total = float(player.getTotalTime() or 0.0) + except Exception: + pass + if monitor and monitor.waitForAbort(1.0): + return + + if total <= 0.0: + return + percent = max(0.0, min(1.0, last_pos / total)) + state: dict[str, object] = {"last_position": int(last_pos), "resume_total": int(total), "percent": percent} + if percent >= WATCHED_THRESHOLD: + state["watched"] = True + state["resume_position"] = 0 + elif last_pos > 0: + state["watched"] = False + state["resume_position"] = int(last_pos) + _set_playstate(key, state) + + # Zusätzlich aggregiert speichern, damit Titel-/Staffel-Listen "gesehen/fortsetzen" + # anzeigen können (für Filme/Serien gleichermaßen). + try: + parts = str(key).split("\t") + if len(parts) == 4: + plugin_name, title, season, _episode = parts + plugin_name = (plugin_name or "").strip() + title = (title or "").strip() + season = (season or "").strip() + if plugin_name and title: + _set_playstate(_playstate_key(plugin_name=plugin_name, title=title, season="", episode=""), state) + if season: + _set_playstate(_playstate_key(plugin_name=plugin_name, title=title, season=season, episode=""), state) + except Exception: + pass + + +def _play_episode( + plugin_name: str, + title: str, + season: str, + episode: str, + *, + resolve_handle: int | None = None, +) -> None: + _log(f"Play anfordern: {plugin_name} / {title} / {season} / {episode}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Play", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + + available_hosters: list[str] = [] + hoster_getter = getattr(plugin, "available_hosters_for", None) + if callable(hoster_getter): + try: + with _busy_dialog(): + available_hosters = list(hoster_getter(title, season, episode) or []) + except Exception as exc: + _log(f"Hoster laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + + selected_hoster: str | None = None + if available_hosters: + if len(available_hosters) == 1: + selected_hoster = available_hosters[0] + else: + selected_index = xbmcgui.Dialog().select("Hoster wählen", available_hosters) + if selected_index is None or selected_index < 0: + _log("Play abgebrochen (kein Hoster gewählt).", xbmc.LOGDEBUG) + return + selected_hoster = available_hosters[selected_index] + + # Manche Plugins erlauben (optional) eine temporaere Einschränkung auf einen Hoster. + preferred_setter = getattr(plugin, "set_preferred_hosters", None) + restore_hosters: list[str] | None = None + if selected_hoster and callable(preferred_setter): + current = getattr(plugin, "_preferred_hosters", None) + if isinstance(current, list): + restore_hosters = list(current) + preferred_setter([selected_hoster]) + + try: + link = plugin.stream_link_for(title, season, episode) + if not link: + _log("Kein Stream-Link gefunden.", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Play", "Kein Stream-Link gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + _log(f"Stream-Link: {link}", xbmc.LOGDEBUG) + final_link = plugin.resolve_stream_link(link) or link + finally: + if restore_hosters is not None and callable(preferred_setter): + preferred_setter(restore_hosters) + + _log(f"Finaler Link: {final_link}", xbmc.LOGDEBUG) + season_number = _extract_first_int(season) + episode_number = _extract_first_int(episode) + if season_number is not None and episode_number is not None: + display_title = f"{title} - S{season_number:02d}E{episode_number:02d}" + else: + display_title = title + info_labels, art, cast = _tmdb_labels_and_art(title) + display_title = _label_with_duration(display_title, info_labels) + _play_final_link( + final_link, + display_title=display_title, + info_labels=info_labels, + art=art, + cast=cast, + resolve_handle=resolve_handle, + ) + _track_playback_and_update_state( + _playstate_key(plugin_name=plugin_name, title=title, season=season, episode=episode) + ) + + +def _play_episode_url( + plugin_name: str, + *, + title: str, + season_number: int, + episode_number: int, + episode_url: str, + resolve_handle: int | None = None, +) -> None: + season_label = f"Staffel {season_number}" if season_number > 0 else "" + episode_label = f"Episode {episode_number}" if episode_number > 0 else "" + _log(f"Play (URL) anfordern: {plugin_name} / {title} / {season_label} / {episode_label} / {episode_url}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Play", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + + available_hosters: list[str] = [] + hoster_getter = getattr(plugin, "available_hosters_for_url", None) + if callable(hoster_getter): + try: + with _busy_dialog(): + available_hosters = list(hoster_getter(episode_url) or []) + except Exception as exc: + _log(f"Hoster laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + + selected_hoster: str | None = None + if available_hosters: + if len(available_hosters) == 1: + selected_hoster = available_hosters[0] + else: + selected_index = xbmcgui.Dialog().select("Hoster wählen", available_hosters) + if selected_index is None or selected_index < 0: + _log("Play abgebrochen (kein Hoster gewählt).", xbmc.LOGDEBUG) + return + selected_hoster = available_hosters[selected_index] + + preferred_setter = getattr(plugin, "set_preferred_hosters", None) + restore_hosters: list[str] | None = None + if selected_hoster and callable(preferred_setter): + current = getattr(plugin, "_preferred_hosters", None) + if isinstance(current, list): + restore_hosters = list(current) + preferred_setter([selected_hoster]) + + try: + link_getter = getattr(plugin, "stream_link_for_url", None) + if not callable(link_getter): + xbmcgui.Dialog().notification("Play", "Nicht unterstützt.", xbmcgui.NOTIFICATION_INFO, 3000) + return + link = link_getter(episode_url) + if not link: + _log("Kein Stream-Link gefunden.", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Play", "Kein Stream-Link gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + _log(f"Stream-Link: {link}", xbmc.LOGDEBUG) + final_link = plugin.resolve_stream_link(link) or link + finally: + if restore_hosters is not None and callable(preferred_setter): + preferred_setter(restore_hosters) + + display_title = f"{title} - S{season_number:02d}E{episode_number:02d}" if season_number and episode_number else title + info_labels, art, cast = _tmdb_labels_and_art(title) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "episode") + info_labels.setdefault("tvshowtitle", title) + if season_number > 0: + info_labels["season"] = str(season_number) + if episode_number > 0: + info_labels["episode"] = str(episode_number) + display_title = _label_with_duration(display_title, info_labels) + _play_final_link( + final_link, + display_title=display_title, + info_labels=info_labels, + art=art, + cast=cast, + resolve_handle=resolve_handle, + ) + _track_playback_and_update_state( + _playstate_key(plugin_name=plugin_name, title=title, season=season_label, episode=episode_label) + ) + + +def _parse_params() -> dict[str, str]: + """Parst Kodi-Plugin-Parameter aus `sys.argv[2]`.""" + if len(sys.argv) <= 2 or not sys.argv[2]: + return {} + raw_params = parse_qs(sys.argv[2].lstrip("?"), keep_blank_values=True) + return {key: values[0] for key, values in raw_params.items()} + + +def run() -> None: + params = _parse_params() + action = params.get("action") + _log(f"Action: {action}", xbmc.LOGDEBUG) + if action == "search": + _show_search() + elif action == "plugin_menu": + _show_plugin_menu(params.get("plugin", "")) + elif action == "plugin_search": + _show_plugin_search(params.get("plugin", "")) + elif action == "genre_sources": + _show_genre_sources() + elif action == "genres": + _show_genres(params.get("plugin", "")) + elif action == "new_titles": + _show_new_titles( + params.get("plugin", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "latest_episodes": + _show_latest_episodes( + params.get("plugin", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "genre_series": + _show_genre_series( + params.get("plugin", ""), + params.get("genre", ""), + ) + elif action == "genre_titles_page": + _show_genre_titles_page( + params.get("plugin", ""), + params.get("genre", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "genre_series_group": + _show_genre_series_group( + params.get("plugin", ""), + params.get("genre", ""), + params.get("group", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "popular": + _show_popular( + params.get("plugin") or None, + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "settings": + _open_settings() + elif action == "seasons": + _show_seasons(params.get("plugin", ""), params.get("title", "")) + elif action == "episodes": + _show_episodes( + params.get("plugin", ""), + params.get("title", ""), + params.get("season", ""), + ) + elif action == "play_episode": + _play_episode( + params.get("plugin", ""), + params.get("title", ""), + params.get("season", ""), + params.get("episode", ""), + resolve_handle=_get_handle(), + ) + elif action == "play_movie": + plugin_name = params.get("plugin", "") + title = params.get("title", "") + # Einschalten liefert Filme (keine Staffeln/Episoden). Für Playback nutzen wir: + # -> Stream -> . + if (plugin_name or "").casefold() == "einschalten": + _play_episode( + plugin_name, + title, + "Stream", + title, + resolve_handle=_get_handle(), + ) + else: + _play_episode( + plugin_name, + title, + "Film", + "Stream", + resolve_handle=_get_handle(), + ) + elif action == "play_episode_url": + _play_episode_url( + params.get("plugin", ""), + title=params.get("title", ""), + season_number=_parse_positive_int(params.get("season", "0"), default=0), + episode_number=_parse_positive_int(params.get("episode", "0"), default=0), + episode_url=params.get("url", ""), + resolve_handle=_get_handle(), + ) + elif action == "play": + link = params.get("url", "") + if link: + _play_final_link(link, resolve_handle=_get_handle()) + else: + _show_root_menu() + + +if __name__ == "__main__": + run() diff --git a/addon/http_session_pool.py b/addon/http_session_pool.py new file mode 100644 index 0000000..725fa43 --- /dev/null +++ b/addon/http_session_pool.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +"""Shared requests.Session pooling for plugins. + +Goal: reuse TCP connections/cookies across multiple HTTP calls within a Kodi session. +""" + +from __future__ import annotations + +from typing import Any, Dict, Optional + +try: # pragma: no cover - optional dependency + import requests +except Exception: # pragma: no cover + requests = None + +_SESSIONS: Dict[str, Any] = {} + + +def get_requests_session(key: str, *, headers: Optional[dict[str, str]] = None): + """Return a cached `requests.Session()` for the given key.""" + if requests is None: + raise RuntimeError("requests ist nicht verfuegbar.") + key = (key or "").strip() or "default" + session = _SESSIONS.get(key) + if session is None: + session = requests.Session() + _SESSIONS[key] = session + if headers: + try: + session.headers.update({str(k): str(v) for k, v in headers.items() if k and v}) + except Exception: + pass + return session + diff --git a/addon/icon.png b/addon/icon.png new file mode 100644 index 0000000..9e65f73 Binary files /dev/null and b/addon/icon.png differ diff --git a/addon/plugin_helpers.py b/addon/plugin_helpers.py new file mode 100644 index 0000000..ef634c0 --- /dev/null +++ b/addon/plugin_helpers.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +"""Shared helpers for ViewIt plugins. + +Focus: +- Kodi addon settings access (string/bool) +- Optional URL notifications +- Optional URL logging +- Optional HTML response dumps + +Designed to work both in Kodi and outside Kodi (for linting/tests). +""" + +from __future__ import annotations + +from datetime import datetime +import hashlib +import os +from typing import Optional + +try: # pragma: no cover - Kodi runtime + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow importing outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + + +def get_setting_string(addon_id: str, setting_id: str, *, default: str = "") -> str: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(addon_id) + getter = getattr(addon, "getSettingString", None) + if getter is not None: + return str(getter(setting_id) or "").strip() + return str(addon.getSetting(setting_id) or "").strip() + except Exception: + return default + + +def get_setting_bool(addon_id: str, setting_id: str, *, default: bool = False) -> bool: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(addon_id) + getter = getattr(addon, "getSettingBool", None) + if getter is not None: + return bool(getter(setting_id)) + raw = addon.getSetting(setting_id) + return str(raw).strip().lower() in {"1", "true", "yes", "on"} + except Exception: + return default + + +def notify_url(addon_id: str, *, heading: str, url: str, enabled_setting_id: str) -> None: + if xbmcgui is None: + return + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + try: + xbmcgui.Dialog().notification(heading, url, xbmcgui.NOTIFICATION_INFO, 3000) + except Exception: + return + + +def _profile_logs_dir(addon_id: str) -> Optional[str]: + if xbmcaddon is None or xbmcvfs is None: + return None + try: + addon = xbmcaddon.Addon(addon_id) + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + log_dir = os.path.join(profile, "logs") + if not xbmcvfs.exists(log_dir): + xbmcvfs.mkdirs(log_dir) + return log_dir + except Exception: + return None + + +def _append_text_file(path: str, content: str) -> None: + try: + with open(path, "a", encoding="utf-8") as handle: + handle.write(content) + return + except Exception: + pass + if xbmcvfs is None: + return + try: + handle = xbmcvfs.File(path, "a") + handle.write(content) + handle.close() + except Exception: + return + + +def log_url(addon_id: str, *, enabled_setting_id: str, log_filename: str, url: str, kind: str = "VISIT") -> None: + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + timestamp = datetime.utcnow().isoformat(timespec="seconds") + "Z" + line = f"{timestamp}\t{kind}\t{url}\n" + log_dir = _profile_logs_dir(addon_id) + if log_dir: + _append_text_file(os.path.join(log_dir, log_filename), line) + return + _append_text_file(os.path.join(os.path.dirname(__file__), log_filename), line) + + +def dump_response_html( + addon_id: str, + *, + enabled_setting_id: str, + url: str, + body: str, + filename_prefix: str, +) -> None: + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S_%f") + digest = hashlib.md5(url.encode("utf-8")).hexdigest() # nosec - filename only + filename = f"{filename_prefix}_{timestamp}_{digest}.html" + log_dir = _profile_logs_dir(addon_id) + path = os.path.join(log_dir, filename) if log_dir else os.path.join(os.path.dirname(__file__), filename) + content = f"\n{body or ''}" + _append_text_file(path, content) + diff --git a/addon/plugin_interface.py b/addon/plugin_interface.py new file mode 100644 index 0000000..a8b5b37 --- /dev/null +++ b/addon/plugin_interface.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +"""Gemeinsame Schnittstelle fuer Kodi-Plugins.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import List, Optional, Set + + +class BasisPlugin(ABC): + """Abstrakte Basisklasse fuer alle Integrationen.""" + + name: str + + @abstractmethod + async def search_titles(self, query: str) -> List[str]: + """Liefert eine Liste aller Treffer fuer die Suche.""" + + @abstractmethod + def seasons_for(self, title: str) -> List[str]: + """Liefert alle Staffeln zu einem Titel.""" + + @abstractmethod + def episodes_for(self, title: str, season: str) -> List[str]: + """Liefert alle Folgen zu einer Staffel.""" + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + """Optional: Liefert den Stream-Link fuer eine konkrete Folge.""" + return None + + def resolve_stream_link(self, link: str) -> Optional[str]: + """Optional: Folgt einem Stream-Link und liefert die finale URL.""" + return None + + def genres(self) -> List[str]: + """Optional: Liefert eine Liste an Genres (falls verfügbar).""" + return [] + + def titles_for_genre(self, genre: str) -> List[str]: + """Optional: Liefert alle Serientitel zu einem Genre.""" + return [] + + def capabilities(self) -> Set[str]: + """Optional: Liefert eine Menge an Features/Capabilities dieses Plugins. + + Beispiele: + - `popular_series`: Plugin kann eine Liste beliebter Serien liefern. + """ + + return set() + + def popular_series(self) -> List[str]: + """Optional: Liefert eine Liste beliebter Serien (als Titel-Strings).""" + + return [] diff --git a/addon/plugins/__init__.py b/addon/plugins/__init__.py new file mode 100644 index 0000000..9929cfa --- /dev/null +++ b/addon/plugins/__init__.py @@ -0,0 +1 @@ +"""Kodi addon plugins.""" diff --git a/addon/plugins/__pycache__/__init__.cpython-312.pyc b/addon/plugins/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..26e3918 Binary files /dev/null and b/addon/plugins/__pycache__/__init__.cpython-312.pyc differ diff --git a/addon/plugins/__pycache__/_template_plugin.cpython-312.pyc b/addon/plugins/__pycache__/_template_plugin.cpython-312.pyc new file mode 100644 index 0000000..4d0c98b Binary files /dev/null and b/addon/plugins/__pycache__/_template_plugin.cpython-312.pyc differ diff --git a/addon/plugins/__pycache__/aniworld_plugin.cpython-312.pyc b/addon/plugins/__pycache__/aniworld_plugin.cpython-312.pyc new file mode 100644 index 0000000..25f957e Binary files /dev/null and b/addon/plugins/__pycache__/aniworld_plugin.cpython-312.pyc differ diff --git a/addon/plugins/__pycache__/einschalten_plugin.cpython-312.pyc b/addon/plugins/__pycache__/einschalten_plugin.cpython-312.pyc new file mode 100644 index 0000000..b5da04f Binary files /dev/null and b/addon/plugins/__pycache__/einschalten_plugin.cpython-312.pyc differ diff --git a/addon/plugins/__pycache__/serienstream_plugin.cpython-312.pyc b/addon/plugins/__pycache__/serienstream_plugin.cpython-312.pyc new file mode 100644 index 0000000..f5edc5b Binary files /dev/null and b/addon/plugins/__pycache__/serienstream_plugin.cpython-312.pyc differ diff --git a/addon/plugins/__pycache__/topstreamfilm_plugin.cpython-312.pyc b/addon/plugins/__pycache__/topstreamfilm_plugin.cpython-312.pyc new file mode 100644 index 0000000..88fbd42 Binary files /dev/null and b/addon/plugins/__pycache__/topstreamfilm_plugin.cpython-312.pyc differ diff --git a/addon/plugins/_template_plugin.py b/addon/plugins/_template_plugin.py new file mode 100644 index 0000000..a5244e2 --- /dev/null +++ b/addon/plugins/_template_plugin.py @@ -0,0 +1,127 @@ +"""Template fuer ein neues ViewIt-Plugin (Basis: serienstream_plugin). + +Diese Datei wird NICHT automatisch geladen (Dateiname beginnt mit `_`). +Zum Verwenden: +1) Kopiere/benenne die Datei um (ohne fuehrenden Unterstrich), z.B. `my_site_plugin.py` +2) Passe `name`, `BASE_URL` und die Implementierungen an. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, List, Optional, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +ADDON_ID = "plugin.video.viewit" +BASE_URL = "https://example.com" +DEFAULT_TIMEOUT = 20 +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class TitleHit: + """Ein Suchtreffer mit Titel und Detail-URL.""" + + title: str + url: str + + +class TemplatePlugin(BasisPlugin): + """Vorlage fuer eine Streamingseiten-Integration. + + Optional kann ein Plugin Capabilities deklarieren (z.B. `popular_series`), + damit der Router passende Menüpunkte anbieten kann. + """ + + name = "Template" + + def __init__(self) -> None: + self._session: RequestsSession | None = None + + @property + def is_available(self) -> bool: + return REQUESTS_AVAILABLE + + @property + def unavailable_reason(self) -> str: + if REQUESTS_AVAILABLE: + return "" + return f"requests/bs4 nicht verfuegbar: {REQUESTS_IMPORT_ERROR}" + + def _get_session(self) -> RequestsSession: + if requests is None: + raise RuntimeError(self.unavailable_reason) + if self._session is None: + session = requests.Session() + session.headers.update(HEADERS) + self._session = session + return self._session + + async def search_titles(self, query: str) -> List[str]: + """TODO: Suche auf der Zielseite implementieren.""" + _ = query + return [] + + def seasons_for(self, title: str) -> List[str]: + """TODO: Staffeln fuer einen Titel liefern.""" + _ = title + return [] + + def episodes_for(self, title: str, season: str) -> List[str]: + """TODO: Episoden fuer Titel+Staffel liefern.""" + _ = (title, season) + return [] + + def capabilities(self) -> set[str]: + """Optional: Deklariert Fähigkeiten dieses Plugins. + + Beispiele: + - `popular_series`: Plugin kann beliebte Serien liefern + - `genres`: Plugin unterstützt Genre-Browser + """ + + return set() + + def popular_series(self) -> List[str]: + """Optional: Liste beliebter Serien (nur wenn `popular_series` gesetzt ist).""" + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + """Optional: Embed-/Hoster-Link fuer eine Episode.""" + _ = (title, season, episode) + return None + + def resolve_stream_link(self, link: str) -> Optional[str]: + """Optional: Redirect-/Mirror-Aufloesung.""" + return link diff --git a/addon/plugins/aniworld_plugin.py b/addon/plugins/aniworld_plugin.py new file mode 100644 index 0000000..99d7a65 --- /dev/null +++ b/addon/plugins/aniworld_plugin.py @@ -0,0 +1,877 @@ +"""AniWorld (aniworld.to) Integration als Downloader-Plugin. + +Dieses Plugin ist weitgehend kompatibel zur Serienstream-Integration: +- gleiche Staffel-/Episoden-URL-Struktur (/staffel-x/episode-y) +- gleiche Hoster-/Watch-Layouts (best-effort) +""" + +from __future__ import annotations + +from dataclasses import dataclass +import re +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from http_session_pool import get_requests_session +from regex_patterns import DIGITS, SEASON_EPISODE_TAG, SEASON_EPISODE_URL, STAFFEL_NUM_IN_URL + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +BASE_URL = "https://aniworld.to" +ANIME_BASE_URL = f"{BASE_URL}/anime/stream" +POPULAR_ANIMES_URL = f"{BASE_URL}/beliebte-animes" +GENRES_URL = f"{BASE_URL}/animes" +LATEST_EPISODES_URL = f"{BASE_URL}/neue-episoden" +SEARCH_URL = f"{BASE_URL}/search?q={{query}}" +SEARCH_API_URL = f"{BASE_URL}/ajax/search" +DEFAULT_PREFERRED_HOSTERS = ["voe"] +DEFAULT_TIMEOUT = 20 +ADDON_ID = "plugin.video.viewit" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass +class SeriesResult: + title: str + description: str + url: str + + +@dataclass +class EpisodeInfo: + number: int + title: str + original_title: str + url: str + + +@dataclass +class LatestEpisode: + series_title: str + season: int + episode: int + url: str + airdate: str + + +@dataclass +class SeasonInfo: + number: int + url: str + episodes: List[EpisodeInfo] + + +def _absolute_url(href: str) -> str: + return f"{BASE_URL}{href}" if href.startswith("/") else href + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="aniworld_urls.log", url=url, kind=kind) + + +def _log_visit(url: str) -> None: + _log_url(url, kind="VISIT") + notify_url(ADDON_ID, heading="AniWorld", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_parsed_url(url: str) -> None: + _log_url(url, kind="PARSE") + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="aniworld_response", + ) + + +def _normalize_search_text(value: str) -> str: + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _strip_html(text: str) -> str: + if not text: + return "" + return re.sub(r"<[^>]+>", "", text) + + +def _matches_query(query: str, *, title: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = _normalize_search_text(title) + if not haystack: + return False + return normalized_query in haystack + + +def _ensure_requests() -> None: + if requests is None or BeautifulSoup is None: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + + +def _looks_like_cloudflare_challenge(body: str) -> bool: + lower = body.lower() + markers = ( + "cf-browser-verification", + "cf-challenge", + "cf_chl", + "challenge-platform", + "attention required! | cloudflare", + "just a moment...", + "cloudflare ray id", + ) + return any(marker in lower for marker in markers) + + +def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("aniworld", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _get_soup_simple(url: str) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = get_requests_session("aniworld", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _post_json(url: str, *, payload: Dict[str, str], session: Optional[RequestsSession] = None) -> Any: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("aniworld", headers=HEADERS) + response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + try: + return response.json() + except Exception: + return None + + +def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str: + canonical = soup.select_one('link[rel="canonical"][href]') + href = (canonical.get("href") if canonical else "") or "" + href = href.strip() + if href.startswith("http://") or href.startswith("https://"): + return href.rstrip("/") + return fallback.rstrip("/") + + +def _series_root_url(url: str) -> str: + normalized = (url or "").strip().rstrip("/") + normalized = re.sub(r"/staffel-\d+(?:/.*)?$", "", normalized) + normalized = re.sub(r"/episode-\d+(?:/.*)?$", "", normalized) + return normalized.rstrip("/") + + +def _extract_season_links(soup: BeautifulSoupT) -> List[Tuple[int, str]]: + season_links: List[Tuple[int, str]] = [] + seen_numbers: set[int] = set() + for anchor in soup.select('.hosterSiteDirectNav a[href*="/staffel-"]'): + href = anchor.get("href") or "" + if "/episode-" in href: + continue + match = re.search(STAFFEL_NUM_IN_URL, href) + if match: + number = int(match.group(1)) + else: + label = anchor.get_text(strip=True) + if not label.isdigit(): + continue + number = int(label) + if number in seen_numbers: + continue + seen_numbers.add(number) + season_url = _absolute_url(href) + if season_url: + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + return season_links + + +def _extract_number_of_seasons(soup: BeautifulSoupT) -> Optional[int]: + tag = soup.select_one('meta[itemprop="numberOfSeasons"]') + if not tag: + return None + content = (tag.get("content") or "").strip() + if not content.isdigit(): + return None + count = int(content) + return count if count > 0 else None + + +def _extract_episodes(soup: BeautifulSoupT) -> List[EpisodeInfo]: + episodes: List[EpisodeInfo] = [] + rows = soup.select("table.seasonEpisodesList tbody tr") + for index, row in enumerate(rows): + cells = row.find_all("td") + if not cells: + continue + episode_cell = cells[0] + number_text = episode_cell.get_text(strip=True) + digits = "".join(ch for ch in number_text if ch.isdigit()) + number = int(digits) if digits else index + 1 + link = episode_cell.find("a") + href = link.get("href") if link else "" + url = _absolute_url(href or "") + if url: + _log_parsed_url(url) + + title_tag = row.select_one(".seasonEpisodeTitle strong") + original_tag = row.select_one(".seasonEpisodeTitle span") + title = title_tag.get_text(strip=True) if title_tag else "" + original_title = original_tag.get_text(strip=True) if original_tag else "" + + if url: + episodes.append(EpisodeInfo(number=number, title=title, original_title=original_title, url=url)) + return episodes + + +_LATEST_EPISODE_TAG_RE = re.compile(SEASON_EPISODE_TAG, re.IGNORECASE) +_LATEST_EPISODE_URL_RE = re.compile(SEASON_EPISODE_URL, re.IGNORECASE) + + +def _extract_latest_episodes(soup: BeautifulSoupT) -> List[LatestEpisode]: + episodes: List[LatestEpisode] = [] + seen: set[str] = set() + + for anchor in soup.select(".newEpisodeList a[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/anime/stream/" not in href: + continue + url = _absolute_url(href) + if not url: + continue + + title_tag = anchor.select_one("strong") + series_title = (title_tag.get_text(strip=True) if title_tag else "").strip() + if not series_title: + continue + + season_number: Optional[int] = None + episode_number: Optional[int] = None + + match = _LATEST_EPISODE_URL_RE.search(href) + if match: + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + + if season_number is None or episode_number is None: + tag_node = ( + anchor.select_one("span.listTag.bigListTag.blue2") + or anchor.select_one("span.listTag.blue2") + or anchor.select_one("span.blue2") + ) + tag_text = (tag_node.get_text(" ", strip=True) if tag_node else "").strip() + match = _LATEST_EPISODE_TAG_RE.search(tag_text) + if not match: + continue + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + + if season_number is None or episode_number is None: + continue + + airdate_node = anchor.select_one("span.elementFloatRight") + airdate = (airdate_node.get_text(" ", strip=True) if airdate_node else "").strip() + + key = f"{url}\t{season_number}\t{episode_number}" + if key in seen: + continue + seen.add(key) + + _log_parsed_url(url) + episodes.append( + LatestEpisode( + series_title=series_title, + season=season_number, + episode=episode_number, + url=url, + airdate=airdate, + ) + ) + + return episodes + + +def scrape_anime_detail(anime_identifier: str, max_seasons: Optional[int] = None) -> List[SeasonInfo]: + _ensure_requests() + anime_url = _series_root_url(_absolute_url(anime_identifier)) + _log_url(anime_url, kind="ANIME") + session = get_requests_session("aniworld", headers=HEADERS) + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(anime_url, session=session) + + base_anime_url = _series_root_url(_extract_canonical_url(soup, anime_url)) + season_links = _extract_season_links(soup) + season_count = _extract_number_of_seasons(soup) + if season_count and (not season_links or len(season_links) < season_count): + existing = {number for number, _ in season_links} + for number in range(1, season_count + 1): + if number in existing: + continue + season_url = f"{base_anime_url}/staffel-{number}" + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + if max_seasons is not None: + season_links = season_links[:max_seasons] + + seasons: List[SeasonInfo] = [] + for number, url in season_links: + season_soup = _get_soup(url, session=session) + episodes = _extract_episodes(season_soup) + seasons.append(SeasonInfo(number=number, url=url, episodes=episodes)) + seasons.sort(key=lambda s: s.number) + return seasons + + +def resolve_redirect(target_url: str) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(target_url) + _log_visit(normalized_url) + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True) + if response.url: + _log_url(response.url, kind="RESOLVED") + return response.url if response.url else None + + +def fetch_episode_hoster_names(episode_url: str) -> List[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + soup = _get_soup(normalized_url, session=session) + names: List[str] = [] + seen: set[str] = set() + for anchor in soup.select(".hosterSiteVideo a.watchEpisode"): + title = anchor.select_one("h4") + name = title.get_text(strip=True) if title else "" + if not name: + name = anchor.get_text(" ", strip=True) + name = (name or "").strip() + if name.lower().startswith("hoster "): + name = name[7:].strip() + href = anchor.get("href") or "" + url = _absolute_url(href) + if url: + _log_parsed_url(url) + key = name.casefold().strip() + if not key or key in seen: + continue + seen.add(key) + names.append(name) + if names: + _log_url(f"{normalized_url}#hosters={','.join(names)}", kind="HOSTERS") + return names + + +def fetch_episode_stream_link( + episode_url: str, + *, + preferred_hosters: Optional[List[str]] = None, +) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + preferred = [hoster.lower() for hoster in (preferred_hosters or DEFAULT_PREFERRED_HOSTERS)] + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + soup = _get_soup(normalized_url, session=session) + candidates: List[Tuple[str, str]] = [] + for anchor in soup.select(".hosterSiteVideo a.watchEpisode"): + name_tag = anchor.select_one("h4") + name = name_tag.get_text(strip=True) if name_tag else "" + href = anchor.get("href") or "" + url = _absolute_url(href) + if url: + _log_parsed_url(url) + if name and url: + candidates.append((name, url)) + if not candidates: + return None + candidates.sort(key=lambda item: item[0].casefold()) + selected_url = None + for wanted in preferred: + for name, url in candidates: + if wanted in name.casefold(): + selected_url = url + break + if selected_url: + break + if not selected_url: + selected_url = candidates[0][1] + resolved = resolve_redirect(selected_url) or selected_url + return resolved + + +def search_animes(query: str) -> List[SeriesResult]: + _ensure_requests() + query = (query or "").strip() + if not query: + return [] + session = get_requests_session("aniworld", headers=HEADERS) + try: + session.get(BASE_URL, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + except Exception: + pass + data = _post_json(SEARCH_API_URL, payload={"keyword": query}, session=session) + results: List[SeriesResult] = [] + seen: set[str] = set() + if isinstance(data, list): + for entry in data: + if not isinstance(entry, dict): + continue + title = _strip_html((entry.get("title") or "").strip()) + if not title or not _matches_query(query, title=title): + continue + link = (entry.get("link") or "").strip() + if not link.startswith("/anime/stream/"): + continue + if "/staffel-" in link or "/episode-" in link: + continue + if link.rstrip("/") == "/anime/stream": + continue + url = _absolute_url(link) if link else "" + if url: + _log_parsed_url(url) + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + description = (entry.get("description") or "").strip() + results.append(SeriesResult(title=title, description=description, url=url)) + return results + + soup = _get_soup_simple(SEARCH_URL.format(query=requests.utils.quote(query))) + for anchor in soup.select("a[href^='/anime/stream/'][href]"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title_node = anchor.select_one("h3") or anchor.select_one("strong") + title = (title_node.get_text(" ", strip=True) if title_node else anchor.get_text(" ", strip=True)).strip() + if not title: + continue + if not _matches_query(query, title=title): + continue + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + results.append(SeriesResult(title=title, description="", url=url)) + return results + + +class AniworldPlugin(BasisPlugin): + name = "AniWorld (aniworld.to)" + + def __init__(self) -> None: + self._anime_results: Dict[str, SeriesResult] = {} + self._season_cache: Dict[str, List[SeasonInfo]] = {} + self._episode_label_cache: Dict[Tuple[str, str], Dict[str, EpisodeInfo]] = {} + self._popular_cache: Optional[List[SeriesResult]] = None + self._genre_cache: Optional[Dict[str, List[SeriesResult]]] = None + self._latest_cache: Dict[int, List[LatestEpisode]] = {} + self._latest_hoster_cache: Dict[str, List[str]] = {} + self._requests_available = REQUESTS_AVAILABLE + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {} + self.is_available = True + self.unavailable_reason: Optional[str] = None + if not self._requests_available: # pragma: no cover - optional dependency + self.is_available = False + self.unavailable_reason = "requests/bs4 fehlen. Installiere 'requests' und 'beautifulsoup4'." + if REQUESTS_IMPORT_ERROR: + print(f"AniworldPlugin Importfehler: {REQUESTS_IMPORT_ERROR}") + + def capabilities(self) -> set[str]: + return {"popular_series", "genres", "latest_episodes"} + + def _find_series_by_title(self, title: str) -> Optional[SeriesResult]: + title = (title or "").strip() + if not title: + return None + + direct = self._anime_results.get(title) + if direct: + return direct + + wanted = title.casefold().strip() + + for candidate in self._anime_results.values(): + if candidate.title and candidate.title.casefold().strip() == wanted: + return candidate + + try: + for entry in self._ensure_popular(): + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + try: + for entries in self._ensure_genres().values(): + for entry in entries: + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + try: + for entry in search_animes(title): + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + return None + + def _ensure_popular(self) -> List[SeriesResult]: + if self._popular_cache is not None: + return list(self._popular_cache) + soup = _get_soup_simple(POPULAR_ANIMES_URL) + results: List[SeriesResult] = [] + seen: set[str] = set() + for anchor in soup.select("div.seriesListContainer a[href^='/anime/stream/']"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title_node = anchor.select_one("h3") + title = (title_node.get_text(" ", strip=True) if title_node else "").strip() + if not title: + continue + description = "" + desc_node = anchor.select_one("small") + if desc_node: + description = desc_node.get_text(" ", strip=True).strip() + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + results.append(SeriesResult(title=title, description=description, url=url)) + self._popular_cache = list(results) + return list(results) + + def popular_series(self) -> List[str]: + if not self._requests_available: + return [] + entries = self._ensure_popular() + self._anime_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def latest_episodes(self, page: int = 1) -> List[LatestEpisode]: + if not self._requests_available: + return [] + try: + page = int(page or 1) + except Exception: + page = 1 + page = max(1, page) + + cached = self._latest_cache.get(page) + if cached is not None: + return list(cached) + + url = LATEST_EPISODES_URL + if page > 1: + url = f"{url}?page={page}" + + soup = _get_soup_simple(url) + episodes = _extract_latest_episodes(soup) + self._latest_cache[page] = list(episodes) + return list(episodes) + + def _ensure_genres(self) -> Dict[str, List[SeriesResult]]: + if self._genre_cache is not None: + return {key: list(value) for key, value in self._genre_cache.items()} + soup = _get_soup_simple(GENRES_URL) + results: Dict[str, List[SeriesResult]] = {} + genre_blocks = soup.select("#seriesContainer div.genre") + if not genre_blocks: + genre_blocks = soup.select("div.genre") + for genre_block in genre_blocks: + name_node = genre_block.select_one(".seriesGenreList h3") + genre_name = (name_node.get_text(" ", strip=True) if name_node else "").strip() + if not genre_name: + continue + entries: List[SeriesResult] = [] + seen: set[str] = set() + for anchor in genre_block.select("ul li a[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title = (anchor.get_text(" ", strip=True) or "").strip() + if not title: + continue + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + entries.append(SeriesResult(title=title, description="", url=url)) + if entries: + results[genre_name] = entries + self._genre_cache = {key: list(value) for key, value in results.items()} + # Für spätere Auflösung (Seasons/Episoden) die Titel->URL Zuordnung auffüllen. + for entries in results.values(): + for entry in entries: + if not entry.title: + continue + if entry.title not in self._anime_results: + self._anime_results[entry.title] = entry + return {key: list(value) for key, value in results.items()} + + def genres(self) -> List[str]: + if not self._requests_available: + return [] + genres = list(self._ensure_genres().keys()) + return [g for g in genres if g] + + def titles_for_genre(self, genre: str) -> List[str]: + genre = (genre or "").strip() + if not genre or not self._requests_available: + return [] + mapping = self._ensure_genres() + entries = mapping.get(genre) + if entries is None: + wanted = genre.casefold() + for key, value in mapping.items(): + if key.casefold() == wanted: + entries = value + break + if not entries: + return [] + # Zusätzlich sicherstellen, dass die Titel im Cache sind. + self._anime_results.update({entry.title: entry for entry in entries if entry.title and entry.title not in self._anime_results}) + return [entry.title for entry in entries if entry.title] + + def _season_label(self, number: int) -> str: + return f"Staffel {number}" + + def _parse_season_number(self, season_label: str) -> Optional[int]: + match = re.search(DIGITS, season_label or "") + return int(match.group(1)) if match else None + + def _episode_label(self, info: EpisodeInfo) -> str: + title = (info.title or "").strip() + if title: + return f"Episode {info.number} - {title}" + return f"Episode {info.number}" + + def _cache_episode_labels(self, title: str, season_label: str, season_info: SeasonInfo) -> None: + cache_key = (title, season_label) + self._episode_label_cache[cache_key] = {self._episode_label(info): info for info in season_info.episodes} + + def _lookup_episode(self, title: str, season_label: str, episode_label: str) -> Optional[EpisodeInfo]: + cache_key = (title, season_label) + cached = self._episode_label_cache.get(cache_key) + if cached: + return cached.get(episode_label) + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season_label) + if number is None: + return None + for season_info in seasons: + if season_info.number == number: + self._cache_episode_labels(title, season_label, season_info) + return self._episode_label_cache.get(cache_key, {}).get(episode_label) + return None + + async def search_titles(self, query: str) -> List[str]: + query = (query or "").strip() + if not query: + self._anime_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._popular_cache = None + return [] + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 nicht suchen.") + try: + results = search_animes(query) + except Exception as exc: # pragma: no cover + self._anime_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + raise RuntimeError(f"AniWorld-Suche fehlgeschlagen: {exc}") from exc + self._anime_results = {result.title: result for result in results} + self._season_cache.clear() + self._episode_label_cache.clear() + return [result.title for result in results] + + def _ensure_seasons(self, title: str) -> List[SeasonInfo]: + if title in self._season_cache: + return self._season_cache[title] + anime = self._find_series_by_title(title) + if not anime: + return [] + seasons = scrape_anime_detail(anime.url) + self._season_cache[title] = list(seasons) + return list(seasons) + + def seasons_for(self, title: str) -> List[str]: + seasons = self._ensure_seasons(title) + return [self._season_label(season.number) for season in seasons if season.episodes] + + def episodes_for(self, title: str, season: str) -> List[str]: + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season) + if number is None: + return [] + for season_info in seasons: + if season_info.number == number: + labels = [self._episode_label(info) for info in season_info.episodes] + self._cache_episode_labels(title, season, season_info) + return labels + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return None + link = fetch_episode_stream_link(episode_info.url, preferred_hosters=self._preferred_hosters) + if link: + _log_url(link, kind="FOUND") + return link + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.") + cache_key = (title, season, episode) + cached = self._hoster_cache.get(cache_key) + if cached is not None: + return list(cached) + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return [] + names = fetch_episode_hoster_names(episode_info.url) + self._hoster_cache[cache_key] = list(names) + return list(names) + + def available_hosters_for_url(self, episode_url: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.") + normalized = _absolute_url(episode_url) + cached = self._latest_hoster_cache.get(normalized) + if cached is not None: + return list(cached) + names = fetch_episode_hoster_names(normalized) + self._latest_hoster_cache[normalized] = list(names) + return list(names) + + def stream_link_for_url(self, episode_url: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + normalized = _absolute_url(episode_url) + link = fetch_episode_stream_link(normalized, preferred_hosters=self._preferred_hosters) + if link: + _log_url(link, kind="FOUND") + return link + + def resolve_stream_link(self, link: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links aufloesen.") + resolved = resolve_redirect(link) + if not resolved: + return None + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved_by_resolveurl = resolve_with_resolveurl(resolved) + if resolved_by_resolveurl: + _log_url("ResolveURL", kind="HOSTER_RESOLVER") + _log_url(resolved_by_resolveurl, kind="MEDIA") + return resolved_by_resolveurl + _log_url(resolved, kind="FINAL") + return resolved + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + +Plugin = AniworldPlugin diff --git a/addon/plugins/einschalten_plugin.py b/addon/plugins/einschalten_plugin.py new file mode 100644 index 0000000..7b4795a --- /dev/null +++ b/addon/plugins/einschalten_plugin.py @@ -0,0 +1,1052 @@ +"""Einschalten Plugin. + +Optionales Debugging wie bei Serienstream: +- URL-Logging +- HTML-Dumps +- On-Screen URL-Info +""" + +from __future__ import annotations + +import json +import re +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Set +from urllib.parse import urlencode, urljoin, urlsplit + +try: # pragma: no cover - optional dependency (Kodi dependency) + import requests +except ImportError as exc: # pragma: no cover + requests = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url + +ADDON_ID = "plugin.video.viewit" +SETTING_BASE_URL = "einschalten_base_url" +SETTING_INDEX_PATH = "einschalten_index_path" +SETTING_NEW_TITLES_PATH = "einschalten_new_titles_path" +SETTING_SEARCH_PATH = "einschalten_search_path" +SETTING_GENRES_PATH = "einschalten_genres_path" +SETTING_ENABLE_PLAYBACK = "einschalten_enable_playback" +SETTING_WATCH_PATH_TEMPLATE = "einschalten_watch_path_template" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" + +DEFAULT_BASE_URL = "" +DEFAULT_INDEX_PATH = "/" +DEFAULT_NEW_TITLES_PATH = "/movies/new" +DEFAULT_SEARCH_PATH = "/search" +DEFAULT_GENRES_PATH = "/genres" +DEFAULT_WATCH_PATH_TEMPLATE = "/api/movies/{id}/watch" + +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/json;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class MovieItem: + id: int + title: str + release_date: str = "" + poster_path: str = "" + vote_average: float | None = None + collection_id: int | None = None + + +@dataclass(frozen=True) +class MovieDetail: + id: int + title: str + tagline: str = "" + overview: str = "" + release_date: str = "" + runtime_minutes: int | None = None + poster_path: str = "" + backdrop_path: str = "" + vote_average: float | None = None + vote_count: int | None = None + homepage: str = "" + imdb_id: str = "" + wikidata_id: str = "" + genres: List[str] | None = None + + +def _normalize_search_text(value: str) -> str: + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _matches_query(query: str, *, title: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = f" {_normalize_search_text(title)} " + return f" {normalized_query} " in haystack + + +def _filter_movies_by_title(query: str, movies: List[MovieItem]) -> List[MovieItem]: + query = (query or "").strip() + if not query: + return [] + return [movie for movie in movies if _matches_query(query, title=movie.title)] + + +def _get_setting_text(setting_id: str, *, default: str = "") -> str: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(ADDON_ID) + getter = getattr(addon, "getSettingString", None) + if getter is not None: + return str(getter(setting_id) or "").strip() + return str(addon.getSetting(setting_id) or "").strip() + except Exception: + return default + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + +def _ensure_requests() -> None: + if requests is None: + raise RuntimeError(f"requests ist nicht verfuegbar: {REQUESTS_IMPORT_ERROR}") + + +def _extract_ng_state_payload(html: str) -> Dict[str, Any]: + """Extrahiert JSON aus ``.""" + html = html or "" + # Regex ist hier ausreichend und vermeidet bs4-Abhängigkeit. + match = re.search( + r']*id=["\\\']ng-state["\\\'][^>]*>(.*?)', + html, + flags=re.IGNORECASE | re.DOTALL, + ) + if not match: + return {} + raw = (match.group(1) or "").strip() + if not raw: + return {} + try: + data = json.loads(raw) + except Exception: + return {} + return data if isinstance(data, dict) else {} + + +def _notify_url(url: str) -> None: + notify_url(ADDON_ID, heading="einschalten", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="einschalten_urls.log", url=url, kind=kind) + + +def _log_debug_line(message: str) -> None: + try: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="einschalten_debug.log", url=message, kind="DEBUG") + except Exception: + pass + + +def _log_titles(items: list[MovieItem], *, context: str) -> None: + if not items: + return + try: + log_url( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_LOG_URLS, + log_filename="einschalten_titles.log", + url=f"{context}:count={len(items)}", + kind="TITLE", + ) + for item in items: + log_url( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_LOG_URLS, + log_filename="einschalten_titles.log", + url=f"{context}:id={item.id} title={item.title}", + kind="TITLE", + ) + except Exception: + pass + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="einschalten_response", + ) + +def _u_matches(value: Any, expected_path: str) -> bool: + raw = (value or "").strip() + if not raw: + return False + if raw == expected_path: + return True + try: + if "://" in raw: + path = urlsplit(raw).path or "" + else: + path = raw.split("?", 1)[0].split("#", 1)[0] + if path == expected_path: + return True + except Exception: + pass + return raw.endswith(expected_path) + + +def _parse_ng_state_movies(payload: Dict[str, Any]) -> List[MovieItem]: + movies: List[MovieItem] = [] + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + # In ng-state payload, `u` (URL) is a sibling of `b` (body), not nested inside `b`. + if not _u_matches(value.get("u"), "/api/movies"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + data = block.get("data") + if not isinstance(data, list): + continue + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + return movies + + +def _parse_ng_state_movies_with_pagination(payload: Dict[str, Any]) -> tuple[List[MovieItem], bool | None, int | None]: + """Parses ng-state for `u: "/api/movies"` where `b` contains `{data:[...], pagination:{...}}`. + + Returns: (movies, has_more, current_page) + """ + + movies: List[MovieItem] = [] + has_more: bool | None = None + current_page: int | None = None + + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/movies"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + + pagination = block.get("pagination") + if isinstance(pagination, dict): + if "hasMore" in pagination: + has_more = bool(pagination.get("hasMore") is True) + try: + current_page = int(pagination.get("currentPage")) if pagination.get("currentPage") is not None else None + except Exception: + current_page = None + + data = block.get("data") + if not isinstance(data, list): + continue + + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + + # Stop after first matching block (genre pages should only have one). + break + + return movies, has_more, current_page + + +def _parse_ng_state_search_results(payload: Dict[str, Any]) -> List[MovieItem]: + movies: List[MovieItem] = [] + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/search"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + data = block.get("data") + if not isinstance(data, list): + continue + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + return movies + + +def _parse_ng_state_movie_detail(payload: Dict[str, Any], *, movie_id: int) -> MovieDetail | None: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return None + expected_u = f"/api/movies/{movie_id}" + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), expected_u): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + try: + parsed_id = int(block.get("id")) + except Exception: + continue + if parsed_id != movie_id: + continue + title = str(block.get("title") or "").strip() + if not title: + continue + runtime = block.get("runtime") + try: + runtime_i = int(runtime) if runtime is not None else None + except Exception: + runtime_i = None + vote_average = block.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + vote_count = block.get("voteCount") + try: + vote_count_i = int(vote_count) if vote_count is not None else None + except Exception: + vote_count_i = None + genres_raw = block.get("genres") + genres: List[str] | None = None + if isinstance(genres_raw, list): + names: List[str] = [] + for g in genres_raw: + if isinstance(g, dict): + name = str(g.get("name") or "").strip() + if name: + names.append(name) + genres = names + return MovieDetail( + id=movie_id, + title=title, + tagline=str(block.get("tagline") or "").strip(), + overview=str(block.get("overview") or "").strip(), + release_date=str(block.get("releaseDate") or "").strip(), + runtime_minutes=runtime_i, + poster_path=str(block.get("posterPath") or "").strip(), + backdrop_path=str(block.get("backdropPath") or "").strip(), + vote_average=vote_average_f, + vote_count=vote_count_i, + homepage=str(block.get("homepage") or "").strip(), + imdb_id=str(block.get("imdbId") or "").strip(), + wikidata_id=str(block.get("wikidataId") or "").strip(), + genres=genres, + ) + return None + + +def _parse_ng_state_genres(payload: Dict[str, Any]) -> Dict[str, int]: + """Parses ng-state for `u: "/api/genres"` where `b` is a list of {id,name}.""" + genres: Dict[str, int] = {} + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/genres"): + continue + block = value.get("b") + if not isinstance(block, list): + continue + for item in block: + if not isinstance(item, dict): + continue + name = str(item.get("name") or "").strip() + if not name: + continue + try: + gid = int(item.get("id")) + except Exception: + continue + if gid > 0: + genres[name] = gid + return genres + + +class EinschaltenPlugin(BasisPlugin): + """Metadata-Plugin für eine autorisierte Quelle.""" + + name = "einschalten" + + def __init__(self) -> None: + self.is_available = REQUESTS_AVAILABLE + self.unavailable_reason = None if REQUESTS_AVAILABLE else f"requests fehlt: {REQUESTS_IMPORT_ERROR}" + self._session = None + self._id_by_title: Dict[str, int] = {} + self._detail_html_by_id: Dict[int, str] = {} + self._detail_by_id: Dict[int, MovieDetail] = {} + self._genre_id_by_name: Dict[str, int] = {} + self._genre_has_more_by_id_page: Dict[tuple[int, int], bool] = {} + self._new_titles_has_more_by_page: Dict[int, bool] = {} + + def _get_session(self): + _ensure_requests() + if self._session is None: + self._session = requests.Session() + return self._session + + def _get_base_url(self) -> str: + base = _get_setting_text(SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip() + return base.rstrip("/") + + def _index_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_INDEX_PATH, default=DEFAULT_INDEX_PATH).strip() or "/" + return urljoin(base + "/", path.lstrip("/")) + + def _new_titles_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_NEW_TITLES_PATH, default=DEFAULT_NEW_TITLES_PATH).strip() or "/movies/new" + return urljoin(base + "/", path.lstrip("/")) + + def _genres_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_GENRES_PATH, default=DEFAULT_GENRES_PATH).strip() or "/genres" + return urljoin(base + "/", path.lstrip("/")) + + def _api_genres_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + return urljoin(base + "/", "api/genres") + + def _search_url(self, query: str) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_SEARCH_PATH, default=DEFAULT_SEARCH_PATH).strip() or "/search" + url = urljoin(base + "/", path.lstrip("/")) + return f"{url}?{urlencode({'query': query})}" + + def _api_movies_url(self, *, with_genres: int, page: int = 1) -> str: + base = self._get_base_url() + if not base: + return "" + params: Dict[str, str] = {"withGenres": str(int(with_genres))} + if page and int(page) > 1: + params["page"] = str(int(page)) + return urljoin(base + "/", "api/movies") + f"?{urlencode(params)}" + + def _genre_page_url(self, *, genre_id: int, page: int = 1) -> str: + """Genre title pages are rendered server-side and embed the movie list in ng-state. + + Example: + - `/genres/` contains ng-state with `u: "/api/movies"` and `b.data` + `b.pagination`. + """ + + base = self._get_base_url() + if not base: + return "" + genre_root = self._genres_url().rstrip("/") + if not genre_root: + return "" + page = max(1, int(page or 1)) + url = urljoin(genre_root + "/", str(int(genre_id))) + if page > 1: + url = f"{url}?{urlencode({'page': str(page)})}" + return url + + def _movie_detail_url(self, movie_id: int) -> str: + base = self._get_base_url() + if not base: + return "" + return urljoin(base + "/", f"movies/{int(movie_id)}") + + def _watch_url(self, movie_id: int) -> str: + base = self._get_base_url() + if not base: + return "" + template = _get_setting_text(SETTING_WATCH_PATH_TEMPLATE, default=DEFAULT_WATCH_PATH_TEMPLATE).strip() + if not template: + template = DEFAULT_WATCH_PATH_TEMPLATE + try: + path = template.format(id=int(movie_id)) + except Exception: + path = DEFAULT_WATCH_PATH_TEMPLATE.format(id=int(movie_id)) + return urljoin(base + "/", path.lstrip("/")) + + def _ensure_title_id(self, title: str) -> int | None: + title = (title or "").strip() + if not title: + return None + cached = self._id_by_title.get(title) + if isinstance(cached, int) and cached > 0: + return cached + # Fallback: scan index ng-state again to rebuild mapping. + for movie in self._load_movies(): + if movie.title == title: + self._id_by_title[title] = movie.id + return movie.id + # Kodi startet das Plugin pro Navigation neu -> RAM-Cache geht verloren. + # Für Titel, die nicht auf der Index-Seite sind (z.B. /movies/new), lösen wir die ID + # über die Suchseite auf, die ebenfalls `id` + `title` im ng-state liefert. + try: + normalized = title.casefold().strip() + for movie in self._fetch_search_movies(title): + if (movie.title or "").casefold().strip() == normalized: + self._id_by_title[title] = movie.id + return movie.id + except Exception: + pass + return None + + def _fetch_movie_detail(self, movie_id: int) -> str: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return "" + cached = self._detail_html_by_id.get(movie_id) + if isinstance(cached, str) and cached: + return cached + url = self._movie_detail_url(movie_id) + if not url: + return "" + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + self._detail_html_by_id[movie_id] = resp.text or "" + return resp.text or "" + except Exception: + return "" + + def _fetch_watch_payload(self, movie_id: int) -> dict[str, object]: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return {} + url = self._watch_url(movie_id) + if not url: + return {} + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + # Some backends may return JSON with a JSON content-type; for debugging we still dump text. + _log_response_html(resp.url or url, resp.text) + data = resp.json() + return dict(data) if isinstance(data, dict) else {} + except Exception: + return {} + + def _watch_stream_url(self, movie_id: int) -> str: + payload = self._fetch_watch_payload(movie_id) + stream_url = payload.get("streamUrl") + return str(stream_url).strip() if isinstance(stream_url, str) and stream_url.strip() else "" + + def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]: + """Optional hook for the UI layer (default.py) to attach metadata/art without TMDB.""" + title = (title or "").strip() + movie_id = self._ensure_title_id(title) + if movie_id is None: + return {}, {}, None + + detail = self._detail_by_id.get(movie_id) + if detail is None: + html = self._fetch_movie_detail(movie_id) + payload = _extract_ng_state_payload(html) + parsed = _parse_ng_state_movie_detail(payload, movie_id=movie_id) + if parsed is not None: + self._detail_by_id[movie_id] = parsed + detail = parsed + + info: dict[str, str] = {"mediatype": "movie", "title": title} + art: dict[str, str] = {} + if detail is None: + return info, art, None + + if detail.overview: + info["plot"] = detail.overview + if detail.tagline: + info["tagline"] = detail.tagline + if detail.release_date: + info["premiered"] = detail.release_date + if len(detail.release_date) >= 4 and detail.release_date[:4].isdigit(): + info["year"] = detail.release_date[:4] + if detail.runtime_minutes is not None and detail.runtime_minutes > 0: + info["duration"] = str(int(detail.runtime_minutes) * 60) + if detail.vote_average is not None: + info["rating"] = str(detail.vote_average) + if detail.vote_count is not None: + info["votes"] = str(detail.vote_count) + if detail.genres: + info["genre"] = " / ".join(detail.genres) + + base = self._get_base_url() + if base: + if detail.poster_path: + poster = urljoin(base + "/", f"api/image/poster/{detail.poster_path.lstrip('/')}") + art.update({"thumb": poster, "poster": poster}) + if detail.backdrop_path: + backdrop = urljoin(base + "/", f"api/image/backdrop/{detail.backdrop_path.lstrip('/')}") + art.setdefault("fanart", backdrop) + art.setdefault("landscape", backdrop) + + return info, art, None + + def _fetch_index_movies(self) -> List[MovieItem]: + url = self._index_url() + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + return _parse_ng_state_movies(payload) + except Exception: + return [] + + def _fetch_new_titles_movies(self) -> List[MovieItem]: + # "Neue Filme" lives at `/movies/new` and embeds the list in ng-state (`u: "/api/movies"`). + url = self._new_titles_url() + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + movies = _parse_ng_state_movies(payload) + _log_debug_line(f"parse_ng_state_movies:count={len(movies)}") + if movies: + _log_titles(movies, context="new_titles") + return movies + return [] + except Exception: + return [] + + def _fetch_new_titles_movies_page(self, page: int) -> List[MovieItem]: + page = max(1, int(page or 1)) + url = self._new_titles_url() + if not url: + return [] + if page > 1: + url = f"{url}?{urlencode({'page': str(page)})}" + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload) + _log_debug_line(f"parse_ng_state_movies_page:page={page} count={len(movies)}") + if has_more is not None: + self._new_titles_has_more_by_page[page] = bool(has_more) + elif current_page is not None and int(current_page) != page: + self._new_titles_has_more_by_page[page] = False + if movies: + _log_titles(movies, context=f"new_titles_page={page}") + return movies + self._new_titles_has_more_by_page[page] = False + return [] + except Exception: + return [] + + def new_titles_page(self, page: int) -> List[str]: + """Paged variant: returns titles for `/movies/new?page=`.""" + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + page = max(1, int(page or 1)) + movies = self._fetch_new_titles_movies_page(page) + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + if movie.title in seen: + continue + seen.add(movie.title) + self._id_by_title[movie.title] = movie.id + titles.append(movie.title) + return titles + + def new_titles_has_more(self, page: int) -> bool: + """Tells the UI whether `/movies/new` has a next page after `page`.""" + page = max(1, int(page or 1)) + cached = self._new_titles_has_more_by_page.get(page) + if cached is not None: + return bool(cached) + # Load page to fill cache. + _ = self._fetch_new_titles_movies_page(page) + return bool(self._new_titles_has_more_by_page.get(page, False)) + + def _fetch_search_movies(self, query: str) -> List[MovieItem]: + query = (query or "").strip() + if not query: + return [] + + # Parse ng-state from /search page HTML. + url = self._search_url(query) + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + results = _parse_ng_state_search_results(payload) + return _filter_movies_by_title(query, results) + except Exception: + return [] + + def _load_movies(self) -> List[MovieItem]: + return self._fetch_index_movies() + + def _ensure_genre_index(self) -> None: + if self._genre_id_by_name: + return + # Prefer direct JSON API (simpler): GET /api/genres -> [{"id":..,"name":..}, ...] + api_url = self._api_genres_url() + if api_url: + try: + _log_url(api_url, kind="GET") + _notify_url(api_url) + sess = self._get_session() + resp = sess.get(api_url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or api_url, kind="OK") + payload = resp.json() + if isinstance(payload, list): + parsed: Dict[str, int] = {} + for item in payload: + if not isinstance(item, dict): + continue + name = str(item.get("name") or "").strip() + if not name: + continue + try: + gid = int(item.get("id")) + except Exception: + continue + if gid > 0: + parsed[name] = gid + if parsed: + self._genre_id_by_name.clear() + self._genre_id_by_name.update(parsed) + return + except Exception: + pass + + # Fallback: parse ng-state from HTML /genres page. + url = self._genres_url() + if not url: + return + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + parsed = _parse_ng_state_genres(payload) + if parsed: + self._genre_id_by_name.clear() + self._genre_id_by_name.update(parsed) + except Exception: + return + + async def search_titles(self, query: str) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + query = (query or "").strip() + if not query: + return [] + if not self._get_base_url(): + return [] + + movies = self._fetch_search_movies(query) + if not movies: + movies = _filter_movies_by_title(query, self._load_movies()) + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + if movie.title in seen: + continue + seen.add(movie.title) + self._id_by_title[movie.title] = movie.id + titles.append(movie.title) + titles.sort(key=lambda value: value.casefold()) + return titles + + def genres(self) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + self._ensure_genre_index() + return sorted(self._genre_id_by_name.keys(), key=lambda value: value.casefold()) + + def titles_for_genre(self, genre: str) -> List[str]: + # Backwards compatible (first page only); paging handled via titles_for_genre_page(). + titles = self.titles_for_genre_page(genre, 1) + titles.sort(key=lambda value: value.casefold()) + return titles + + def titles_for_genre_page(self, genre: str, page: int) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._get_base_url(): + return [] + self._ensure_genre_index() + genre_id = self._genre_id_by_name.get(genre) + if not genre_id: + return [] + # Do NOT use `/api/movies?withGenres=...` directly: on some deployments it returns + # a mismatched/unfiltered dataset. Instead parse the server-rendered genre page + # `/genres/` which embeds the correct data in ng-state. + url = self._genre_page_url(genre_id=int(genre_id), page=max(1, int(page or 1))) + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + except Exception: + return [] + if not isinstance(payload, dict): + return [] + + movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload) + page = max(1, int(page or 1)) + if has_more is not None: + self._genre_has_more_by_id_page[(int(genre_id), page)] = bool(has_more) + elif current_page is not None and int(current_page) != page: + # Defensive: if the page param wasn't honored, avoid showing "next". + self._genre_has_more_by_id_page[(int(genre_id), page)] = False + + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + title = (movie.title or "").strip() + if not title or title in seen: + continue + seen.add(title) + if movie.id > 0: + self._id_by_title[title] = int(movie.id) + titles.append(title) + return titles + + def genre_has_more(self, genre: str, page: int) -> bool: + """Optional: tells the UI whether a genre has more pages after `page`.""" + genre = (genre or "").strip() + if not genre: + return False + self._ensure_genre_index() + genre_id = self._genre_id_by_name.get(genre) + if not genre_id: + return False + page = max(1, int(page or 1)) + cached = self._genre_has_more_by_id_page.get((int(genre_id), page)) + if cached is not None: + return bool(cached) + # If the page wasn't loaded yet, load it (fills the cache) and then report. + _ = self.titles_for_genre_page(genre, page) + return bool(self._genre_has_more_by_id_page.get((int(genre_id), page), False)) + + def seasons_for(self, title: str) -> List[str]: + # Beim Öffnen eines Titels: Detailseite anhand der ID abrufen (HTML) und cachen. + title = (title or "").strip() + if not title: + return [] + movie_id = self._ensure_title_id(title) + if movie_id is not None: + self._fetch_movie_detail(movie_id) + if _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + # Playback: expose a single "Stream" folder (inside: 1 playable item = Filmtitel). + return ["Stream"] + return ["Details"] + + def episodes_for(self, title: str, season: str) -> List[str]: + season = (season or "").strip() + if season.casefold() == "stream" and _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + title = (title or "").strip() + return [title] if title else [] + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + return None + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + # Backwards compatible: + # - old: Film / Stream + # - new: Stream / + if not title: + return None + if season.casefold() == "film" and episode.casefold() == "stream": + pass + elif season.casefold() == "stream" and (episode == title or episode.casefold() == "stream"): + pass + else: + return None + movie_id = self._ensure_title_id(title) + if movie_id is None: + return None + stream_url = self._watch_stream_url(movie_id) + return stream_url or None + + def resolve_stream_link(self, link: str) -> Optional[str]: + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + return resolve_with_resolveurl(link) or link + return link + + def capabilities(self) -> Set[str]: + return {"new_titles", "genres"} + + def new_titles(self) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + # Backwards compatible: first page only. UI uses paging via `new_titles_page`. + return self.new_titles_page(1) diff --git a/addon/plugins/serienstream_plugin.py b/addon/plugins/serienstream_plugin.py new file mode 100644 index 0000000..8f139dc --- /dev/null +++ b/addon/plugins/serienstream_plugin.py @@ -0,0 +1,966 @@ +"""Serienstream (s.to) Integration als Downloader-Plugin. + +Hinweise: +- Diese Integration nutzt optional `requests` + `beautifulsoup4` (bs4). +- In Kodi koennen zusaetzliche Debug-Funktionen ueber Addon-Settings aktiviert werden + (URL-Logging, HTML-Dumps, Benachrichtigungen). +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +import hashlib +import os +import re +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from http_session_pool import get_requests_session +from regex_patterns import SEASON_EPISODE_TAG, SEASON_EPISODE_URL + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +BASE_URL = "https://s.to" +SERIES_BASE_URL = f"{BASE_URL}/serie/stream" +POPULAR_SERIES_URL = f"{BASE_URL}/beliebte-serien" +LATEST_EPISODES_URL = f"{BASE_URL}" +DEFAULT_PREFERRED_HOSTERS = ["voe"] +DEFAULT_TIMEOUT = 20 +ADDON_ID = "plugin.video.viewit" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass +class SeriesResult: + title: str + description: str + url: str + + +@dataclass +class EpisodeInfo: + number: int + title: str + original_title: str + url: str + season_label: str = "" + languages: List[str] = field(default_factory=list) + hosters: List[str] = field(default_factory=list) + + +@dataclass +class LatestEpisode: + series_title: str + season: int + episode: int + url: str + airdate: str + + +@dataclass +class SeasonInfo: + number: int + url: str + episodes: List[EpisodeInfo] + + +def _absolute_url(href: str) -> str: + return f"{BASE_URL}{href}" if href.startswith("/") else href + + +def _normalize_series_url(identifier: str) -> str: + if identifier.startswith("http://") or identifier.startswith("https://"): + return identifier.rstrip("/") + slug = identifier.strip("/") + return f"{SERIES_BASE_URL}/{slug}" + + +def _series_root_url(url: str) -> str: + """Normalisiert eine Serien-URL auf die Root-URL (ohne /staffel-x oder /episode-x).""" + normalized = (url or "").strip().rstrip("/") + normalized = re.sub(r"/staffel-\d+(?:/.*)?$", "", normalized) + normalized = re.sub(r"/episode-\d+(?:/.*)?$", "", normalized) + return normalized.rstrip("/") + + +def _log_visit(url: str) -> None: + _log_url(url, kind="VISIT") + _notify_url(url) + if xbmcaddon is None: + print(f"Visiting: {url}") + + +def _normalize_text(value: str) -> str: + """Legacy normalization (kept for backwards compatibility).""" + value = value.casefold() + value = re.sub(r"[^a-z0-9]+", "", value) + return value + + +def _normalize_search_text(value: str) -> str: + """Normalisiert Text für die Suche ohne Wortgrenzen zu "verschmelzen". + + Wichtig: Wir ersetzen Nicht-Alphanumerisches durch Leerzeichen, statt es zu entfernen. + Dadurch entstehen keine künstlichen Treffer über Wortgrenzen hinweg (z.B. "an" + "na" -> "anna"). + """ + + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + +def _notify_url(url: str) -> None: + notify_url(ADDON_ID, heading="Serienstream", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="serienstream_urls.log", url=url, kind=kind) + + +def _log_parsed_url(url: str) -> None: + _log_url(url, kind="PARSE") + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="s_to_response", + ) + + +def _ensure_requests() -> None: + if requests is None or BeautifulSoup is None: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + + +def _looks_like_cloudflare_challenge(body: str) -> bool: + lower = body.lower() + markers = ( + "cf-browser-verification", + "cf-challenge", + "cf_chl", + "challenge-platform", + "attention required! | cloudflare", + "just a moment...", + "cloudflare ray id", + ) + return any(marker in lower for marker in markers) + + +def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("serienstream", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _get_soup_simple(url: str) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = get_requests_session("serienstream", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def search_series(query: str) -> List[SeriesResult]: + """Sucht Serien im (/serien)-Katalog (Genre-liste) nach Titel/Alt-Titel.""" + _ensure_requests() + normalized_query = _normalize_search_text(query) + if not normalized_query: + return [] + # Direkter Abruf wie in fetch_serien.py. + catalog_url = f"{BASE_URL}/serien?by=genre" + soup = _get_soup_simple(catalog_url) + results: List[SeriesResult] = [] + for series in parse_series_catalog(soup).values(): + for entry in series: + haystack = _normalize_search_text(entry.title) + if entry.title and normalized_query in haystack: + results.append(entry) + return results + + +def parse_series_catalog(soup: BeautifulSoupT) -> Dict[str, List[SeriesResult]]: + """Parst die Serien-Übersicht (/serien) und liefert Genre -> Serienliste.""" + catalog: Dict[str, List[SeriesResult]] = {} + + # Neues Layout (Stand: 2026-01): Gruppen-Header + Liste. + # - Header: `div.background-1 ...` mit `h3` + # - Einträge: `ul.series-list` -> `li.series-item[data-search]` -> `a[href]` + for header in soup.select("div.background-1 h3"): + group = (header.get_text(strip=True) or "").strip() + if not group: + continue + list_node = header.parent.find_next_sibling("ul", class_="series-list") + if not list_node: + continue + series: List[SeriesResult] = [] + for item in list_node.select("li.series-item"): + anchor = item.find("a", href=True) + if not anchor: + continue + href = (anchor.get("href") or "").strip() + url = _absolute_url(href) + if url: + _log_parsed_url(url) + if ("/serie/" not in url) or "/staffel-" in url or "/episode-" in url: + continue + title = (anchor.get_text(" ", strip=True) or "").strip() + description = (item.get("data-search") or "").strip() + if title: + series.append(SeriesResult(title=title, description=description, url=url)) + if series: + catalog[group] = series + + return catalog + + +def _extract_season_links(soup: BeautifulSoupT) -> List[Tuple[int, str]]: + season_links: List[Tuple[int, str]] = [] + seen_numbers: set[int] = set() + anchors = soup.select("ul.nav.list-items-nav a[data-season-pill][href]") + for anchor in anchors: + href = anchor.get("href") or "" + if "/episode-" in href: + continue + data_number = (anchor.get("data-season-pill") or "").strip() + match = re.search(r"/staffel-(\d+)", href) + if match: + number = int(match.group(1)) + elif data_number.isdigit(): + number = int(data_number) + else: + label = anchor.get_text(strip=True) + if not label.isdigit(): + continue + number = int(label) + if number in seen_numbers: + continue + seen_numbers.add(number) + season_url = _absolute_url(href) + if season_url: + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + return season_links + + +def _extract_number_of_seasons(soup: BeautifulSoupT) -> Optional[int]: + tag = soup.select_one('meta[itemprop="numberOfSeasons"]') + if not tag: + return None + content = (tag.get("content") or "").strip() + if not content.isdigit(): + return None + count = int(content) + return count if count > 0 else None + + +def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str: + canonical = soup.select_one('link[rel="canonical"][href]') + href = (canonical.get("href") if canonical else "") or "" + href = href.strip() + if href.startswith("http://") or href.startswith("https://"): + return href.rstrip("/") + return fallback.rstrip("/") + + +def _extract_episodes(soup: BeautifulSoupT) -> List[EpisodeInfo]: + episodes: List[EpisodeInfo] = [] + season_label = "" + season_header = soup.select_one("section.episode-section h2") or soup.select_one("h2.h3") + if season_header: + season_label = (season_header.get_text(" ", strip=True) or "").strip() + + language_map = { + "german": "DE", + "english": "EN", + "japanese": "JP", + "turkish": "TR", + "spanish": "ES", + "italian": "IT", + "french": "FR", + "korean": "KO", + "russian": "RU", + "polish": "PL", + "portuguese": "PT", + "chinese": "ZH", + "arabic": "AR", + "thai": "TH", + } + # Neues Layout (Stand: 2026-01): Episoden-Tabelle mit Zeilen und onclick-URL. + rows = soup.select("table.episode-table tbody tr.episode-row") + for index, row in enumerate(rows): + onclick = (row.get("onclick") or "").strip() + url = "" + if onclick: + match = re.search(r"location=['\\\"]([^'\\\"]+)['\\\"]", onclick) + if match: + url = _absolute_url(match.group(1)) + if not url: + anchor = row.find("a", href=True) + url = _absolute_url(anchor.get("href")) if anchor else "" + if url: + _log_parsed_url(url) + + number_tag = row.select_one(".episode-number-cell") + number_text = (number_tag.get_text(strip=True) if number_tag else "").strip() + match = re.search(r"/episode-(\d+)", url) if url else None + if match: + number = int(match.group(1)) + else: + digits = "".join(ch for ch in number_text if ch.isdigit()) + number = int(digits) if digits else index + 1 + + title_tag = row.select_one(".episode-title-ger") + original_tag = row.select_one(".episode-title-eng") + title = (title_tag.get_text(strip=True) if title_tag else "").strip() + original_title = (original_tag.get_text(strip=True) if original_tag else "").strip() + if not title: + title = f"Episode {number}" + + hosters: List[str] = [] + for img in row.select(".episode-watch-cell img"): + label = (img.get("alt") or img.get("title") or "").strip() + if label and label not in hosters: + hosters.append(label) + + languages: List[str] = [] + for flag in row.select(".episode-language-cell .watch-language"): + classes = flag.get("class") or [] + if isinstance(classes, str): + classes = classes.split() + for cls in classes: + if cls.startswith("svg-flag-"): + key = cls.replace("svg-flag-", "").strip() + if not key: + continue + value = language_map.get(key, key.upper()) + if value and value not in languages: + languages.append(value) + + episodes.append( + EpisodeInfo( + number=number, + title=title, + original_title=original_title, + url=url, + season_label=season_label, + languages=languages, + hosters=hosters, + ) + ) + if episodes: + return episodes + return episodes + + +def fetch_episode_stream_link( + episode_url: str, + *, + preferred_hosters: Optional[List[str]] = None, +) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + preferred = [hoster.lower() for hoster in (preferred_hosters or DEFAULT_PREFERRED_HOSTERS)] + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(normalized_url, session=session) + candidates: List[Tuple[str, str]] = [] + for button in soup.select("button.link-box[data-play-url]"): + play_url = (button.get("data-play-url") or "").strip() + provider = (button.get("data-provider-name") or "").strip() + url = _absolute_url(play_url) + if url: + _log_parsed_url(url) + if provider and url: + candidates.append((provider, url)) + if not candidates: + return None + for preferred_name in preferred: + for name, url in candidates: + if name.lower() == preferred_name: + return url + return candidates[0][1] + + +def fetch_episode_hoster_names(episode_url: str) -> List[str]: + """Liest die verfügbaren Hoster-Namen für eine Episode aus.""" + _ensure_requests() + normalized_url = _absolute_url(episode_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(normalized_url, session=session) + names: List[str] = [] + seen: set[str] = set() + for button in soup.select("button.link-box[data-provider-name]"): + name = (button.get("data-provider-name") or "").strip() + play_url = (button.get("data-play-url") or "").strip() + url = _absolute_url(play_url) + if url: + _log_parsed_url(url) + key = name.casefold().strip() + if not key or key in seen: + continue + seen.add(key) + names.append(name) + _log_url(name, kind="HOSTER") + if names: + _log_url(f"{normalized_url}#hosters={','.join(names)}", kind="HOSTERS") + return names + + +_LATEST_EPISODE_TAG_RE = re.compile(SEASON_EPISODE_TAG, re.IGNORECASE) +_LATEST_EPISODE_URL_RE = re.compile(SEASON_EPISODE_URL, re.IGNORECASE) + + +def _extract_latest_episodes(soup: BeautifulSoupT) -> List[LatestEpisode]: + """Parst die neuesten Episoden von der Startseite.""" + episodes: List[LatestEpisode] = [] + seen: set[str] = set() + + for anchor in soup.select("a.latest-episode-row[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/serie/" not in href: + continue + url = _absolute_url(href) + if not url: + continue + + title_node = anchor.select_one(".ep-title") + series_title = (title_node.get("title") if title_node else "") or "" + series_title = series_title.strip() or (title_node.get_text(strip=True) if title_node else "").strip() + if not series_title: + continue + + season_text = (anchor.select_one(".ep-season").get_text(strip=True) if anchor.select_one(".ep-season") else "").strip() + episode_text = (anchor.select_one(".ep-episode").get_text(strip=True) if anchor.select_one(".ep-episode") else "").strip() + season_number: Optional[int] = None + episode_number: Optional[int] = None + match = re.search(r"S\\s*(\\d+)", season_text, re.IGNORECASE) + if match: + season_number = int(match.group(1)) + match = re.search(r"E\\s*(\\d+)", episode_text, re.IGNORECASE) + if match: + episode_number = int(match.group(1)) + if season_number is None or episode_number is None: + match = _LATEST_EPISODE_URL_RE.search(href) + if match: + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + if season_number is None or episode_number is None: + continue + + airdate_node = anchor.select_one(".ep-time") + airdate = (airdate_node.get_text(" ", strip=True) if airdate_node else "").strip() + + key = f"{url}\\t{season_number}\\t{episode_number}" + if key in seen: + continue + seen.add(key) + + _log_parsed_url(url) + episodes.append( + LatestEpisode( + series_title=series_title, + season=int(season_number), + episode=int(episode_number), + url=url, + airdate=airdate, + ) + ) + + return episodes + + +def resolve_redirect(target_url: str) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(target_url) + _log_visit(normalized_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + response = session.get( + normalized_url, + headers=HEADERS, + timeout=DEFAULT_TIMEOUT, + allow_redirects=True, + ) + if response.url: + _log_url(response.url, kind="RESOLVED") + return response.url if response.url else None + + +def scrape_series_detail( + series_identifier: str, + max_seasons: Optional[int] = None, +) -> List[SeasonInfo]: + _ensure_requests() + series_url = _series_root_url(_normalize_series_url(series_identifier)) + _log_url(series_url, kind="SERIES") + _notify_url(series_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight ist optional; manche Umgebungen/Provider leiten die Startseite um. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(series_url, session=session) + + base_series_url = _series_root_url(_extract_canonical_url(soup, series_url)) + season_links = _extract_season_links(soup) + season_count = _extract_number_of_seasons(soup) + if season_count and (not season_links or len(season_links) < season_count): + existing = {number for number, _ in season_links} + for number in range(1, season_count + 1): + if number in existing: + continue + season_url = f"{base_series_url}/staffel-{number}" + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + if max_seasons is not None: + season_links = season_links[:max_seasons] + seasons: List[SeasonInfo] = [] + for number, url in season_links: + season_soup = _get_soup(url, session=session) + episodes = _extract_episodes(season_soup) + seasons.append(SeasonInfo(number=number, url=url, episodes=episodes)) + seasons.sort(key=lambda s: s.number) + return seasons + + +class SerienstreamPlugin(BasisPlugin): + """Downloader-Plugin, das Serien von s.to ueber requests/bs4 bereitstellt.""" + + name = "Serienstream (s.to)" + POPULAR_GENRE_LABEL = "⭐ Beliebte Serien" + + def __init__(self) -> None: + self._series_results: Dict[str, SeriesResult] = {} + self._season_cache: Dict[str, List[SeasonInfo]] = {} + self._episode_label_cache: Dict[Tuple[str, str], Dict[str, EpisodeInfo]] = {} + self._catalog_cache: Optional[Dict[str, List[SeriesResult]]] = None + self._popular_cache: Optional[List[SeriesResult]] = None + self._requests_available = REQUESTS_AVAILABLE + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {} + self._latest_cache: Dict[int, List[LatestEpisode]] = {} + self._latest_hoster_cache: Dict[str, List[str]] = {} + self.is_available = True + self.unavailable_reason: Optional[str] = None + if not self._requests_available: # pragma: no cover - optional dependency + self.is_available = False + self.unavailable_reason = ( + "requests/bs4 fehlen. Installiere 'requests' und 'beautifulsoup4'." + ) + print( + "SerienstreamPlugin deaktiviert: requests/bs4 fehlen. " + "Installiere 'requests' und 'beautifulsoup4'." + ) + if REQUESTS_IMPORT_ERROR: + print(f"Importfehler: {REQUESTS_IMPORT_ERROR}") + return + + def _ensure_catalog(self) -> Dict[str, List[SeriesResult]]: + if self._catalog_cache is not None: + return self._catalog_cache + # Stand: 2026-01 liefert `?by=genre` konsistente Gruppen für `genres()`. + catalog_url = f"{BASE_URL}/serien?by=genre" + soup = _get_soup_simple(catalog_url) + self._catalog_cache = parse_series_catalog(soup) + return self._catalog_cache + + def genres(self) -> List[str]: + """Optional: Liefert alle Genres aus dem Serien-Katalog.""" + if not self._requests_available: + return [] + catalog = self._ensure_catalog() + return sorted(catalog.keys(), key=str.casefold) + + def capabilities(self) -> set[str]: + """Meldet unterstützte Features für Router-Menüs.""" + return {"popular_series", "genres", "latest_episodes"} + + def popular_series(self) -> List[str]: + """Liefert die Titel der beliebten Serien (Quelle: `/beliebte-serien`).""" + if not self._requests_available: + return [] + entries = self._ensure_popular() + self._series_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def titles_for_genre(self, genre: str) -> List[str]: + """Optional: Liefert Titel für ein Genre.""" + if not self._requests_available: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if genre == self.POPULAR_GENRE_LABEL: + return self.popular_series() + catalog = self._ensure_catalog() + entries = catalog.get(genre, []) + self._series_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def _ensure_popular(self) -> List[SeriesResult]: + """Laedt und cached die Liste der beliebten Serien aus `/beliebte-serien`.""" + if self._popular_cache is not None: + return list(self._popular_cache) + soup = _get_soup_simple(POPULAR_SERIES_URL) + results: List[SeriesResult] = [] + seen: set[str] = set() + + # Neues Layout (Stand: 2026-01): Abschnitt "Meistgesehen" hat Karten mit + # `a.show-card` und Titel im `img alt=...`. + anchors = None + for section in soup.select("div.mb-5"): + h2 = section.select_one("h2") + label = (h2.get_text(" ", strip=True) if h2 else "").casefold() + if "meistgesehen" in label: + anchors = section.select("a.show-card[href]") + break + if anchors is None: + anchors = soup.select("a.show-card[href]") + + for anchor in anchors: + href = (anchor.get("href") or "").strip() + if not href or "/serie/" not in href: + continue + img = anchor.select_one("img[alt]") + title = ((img.get("alt") if img else "") or "").strip() + if not title or title in seen: + continue + url = _absolute_url(href).split("#", 1)[0].split("?", 1)[0].rstrip("/") + url = re.sub(r"/staffel-\\d+(?:/.*)?$", "", url).rstrip("/") + if not url: + continue + _log_parsed_url(url) + seen.add(title) + results.append(SeriesResult(title=title, description="", url=url)) + + + self._popular_cache = list(results) + return list(results) + + @staticmethod + def _season_label(number: int) -> str: + return f"Staffel {number}" + + @staticmethod + def _episode_label(info: EpisodeInfo) -> str: + suffix_parts: List[str] = [] + if info.original_title: + suffix_parts.append(info.original_title) + # Staffel nicht im Episoden-Label anzeigen (wird im UI bereits gesetzt). + suffix = f" ({' | '.join(suffix_parts)})" if suffix_parts else "" + + return f"Episode {info.number}: {info.title}{suffix}" + + @staticmethod + def _parse_season_number(label: str) -> Optional[int]: + digits = "".join(ch for ch in label if ch.isdigit()) + if not digits: + return None + return int(digits) + + def _clear_episode_cache_for_title(self, title: str) -> None: + keys_to_remove = [key for key in self._episode_label_cache if key[0] == title] + for key in keys_to_remove: + self._episode_label_cache.pop(key, None) + keys_to_remove = [key for key in self._hoster_cache if key[0] == title] + for key in keys_to_remove: + self._hoster_cache.pop(key, None) + + def _cache_episode_labels(self, title: str, season_label: str, season_info: SeasonInfo) -> None: + cache_key = (title, season_label) + self._episode_label_cache[cache_key] = { + self._episode_label(info): info for info in season_info.episodes + } + + def _lookup_episode(self, title: str, season_label: str, episode_label: str) -> Optional[EpisodeInfo]: + cache_key = (title, season_label) + cached = self._episode_label_cache.get(cache_key) + if cached: + return cached.get(episode_label) + + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season_label) + if number is None: + return None + + for season_info in seasons: + if season_info.number == number: + self._cache_episode_labels(title, season_label, season_info) + return self._episode_label_cache.get(cache_key, {}).get(episode_label) + return None + + async def search_titles(self, query: str) -> List[str]: + query = query.strip() + if not query: + self._series_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._catalog_cache = None + return [] + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 nicht suchen.") + try: + # Nutzt den Katalog (/serien), der jetzt nach Genres gruppiert ist. + # Alternativ gäbe es ein Ajax-Endpoint, aber der ist nicht immer zuverlässig erreichbar. + results = search_series(query) + except Exception as exc: # pragma: no cover - defensive logging + self._series_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._catalog_cache = None + raise RuntimeError(f"Serienstream-Suche fehlgeschlagen: {exc}") from exc + self._series_results = {result.title: result for result in results} + self._season_cache.clear() + self._episode_label_cache.clear() + return [result.title for result in results] + + def _ensure_seasons(self, title: str) -> List[SeasonInfo]: + if title in self._season_cache: + seasons = self._season_cache[title] + # Auch bei Cache-Treffern die URLs loggen, damit nachvollziehbar bleibt, + # welche Seiten für Staffel-/Episodenlisten relevant sind. + if _get_setting_bool(GLOBAL_SETTING_LOG_URLS, default=False): + series = self._series_results.get(title) + if series and series.url: + _log_url(series.url, kind="CACHE") + for season in seasons: + if season.url: + _log_url(season.url, kind="CACHE") + return seasons + series = self._series_results.get(title) + if not series: + # Kodi startet das Plugin pro Navigation neu -> Such-Cache im RAM geht verloren. + # Daher den Titel erneut im Katalog auflösen, um die Serien-URL zu bekommen. + catalog = self._ensure_catalog() + lookup_key = title.casefold().strip() + for entries in catalog.values(): + for entry in entries: + if entry.title.casefold().strip() == lookup_key: + series = entry + self._series_results[entry.title] = entry + break + if series: + break + if not series: + return [] + try: + seasons = scrape_series_detail(series.url) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Serienstream-Staffeln konnten nicht geladen werden: {exc}") from exc + self._clear_episode_cache_for_title(title) + self._season_cache[title] = seasons + return seasons + + def seasons_for(self, title: str) -> List[str]: + seasons = self._ensure_seasons(title) + # Serienstream liefert gelegentlich Staffeln ohne Episoden (z.B. Parsing-/Layoutwechsel). + # Diese sollen im UI nicht als auswählbarer Menüpunkt erscheinen. + return [self._season_label(season.number) for season in seasons if season.episodes] + + def episodes_for(self, title: str, season: str) -> List[str]: + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season) + if number is None: + return [] + for season_info in seasons: + if season_info.number == number: + labels = [self._episode_label(info) for info in season_info.episodes] + self._cache_episode_labels(title, season, season_info) + return labels + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return None + try: + link = fetch_episode_stream_link( + episode_info.url, + preferred_hosters=self._preferred_hosters, + ) + if link: + _log_url(link, kind="FOUND") + return link + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.") + cache_key = (title, season, episode) + cached = self._hoster_cache.get(cache_key) + if cached is not None: + return list(cached) + + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return [] + try: + names = fetch_episode_hoster_names(episode_info.url) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Hoster konnten nicht geladen werden: {exc}") from exc + self._hoster_cache[cache_key] = list(names) + return list(names) + + def latest_episodes(self, page: int = 1) -> List[LatestEpisode]: + """Liefert die neuesten Episoden aus `/neue-episoden`.""" + if not self._requests_available: + return [] + try: + page = int(page or 1) + except Exception: + page = 1 + page = max(1, page) + cached = self._latest_cache.get(page) + if cached is not None: + return list(cached) + + url = LATEST_EPISODES_URL + if page > 1: + url = f"{url}?page={page}" + soup = _get_soup_simple(url) + episodes = _extract_latest_episodes(soup) + self._latest_cache[page] = list(episodes) + return list(episodes) + + def available_hosters_for_url(self, episode_url: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.") + normalized = _absolute_url(episode_url) + cached = self._latest_hoster_cache.get(normalized) + if cached is not None: + return list(cached) + try: + names = fetch_episode_hoster_names(normalized) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Hoster konnten nicht geladen werden: {exc}") from exc + self._latest_hoster_cache[normalized] = list(names) + return list(names) + + def stream_link_for_url(self, episode_url: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + normalized = _absolute_url(episode_url) + try: + link = fetch_episode_stream_link( + normalized, + preferred_hosters=self._preferred_hosters, + ) + if link: + _log_url(link, kind="FOUND") + return link + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc + + def resolve_stream_link(self, link: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links aufloesen.") + try: + resolved = resolve_redirect(link) + if not resolved: + return None + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved_by_resolveurl = resolve_with_resolveurl(resolved) + if resolved_by_resolveurl: + _log_url("ResolveURL", kind="HOSTER_RESOLVER") + _log_url(resolved_by_resolveurl, kind="MEDIA") + return resolved_by_resolveurl + _log_url(resolved, kind="FINAL") + return resolved + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht verfolgt werden: {exc}") from exc + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + +# Alias für die automatische Plugin-Erkennung. +Plugin = SerienstreamPlugin diff --git a/addon/plugins/topstreamfilm_plugin.py b/addon/plugins/topstreamfilm_plugin.py new file mode 100644 index 0000000..7e03ebc --- /dev/null +++ b/addon/plugins/topstreamfilm_plugin.py @@ -0,0 +1,1027 @@ +"""HTML-basierte Integration fuer eine Streaming-/Mediathek-Seite (Template). + +Dieses Plugin ist als Startpunkt gedacht, um eine eigene/autorisiert betriebene +Seite mit einer HTML-Suche in ViewIt einzubinden. + +Hinweise: +- Nutzt optional `requests` + `beautifulsoup4` (bs4). +- `search_titles` liefert eine Trefferliste (Titel-Strings). +- `seasons_for` / `episodes_for` können für Filme als Single-Season/Single-Episode + modelliert werden (z.B. Staffel 1, Episode 1) oder komplett leer bleiben, + solange nur Serien unterstützt werden. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +import hashlib +import os +import re +import json +from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeAlias +from urllib.parse import urlencode, urljoin + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from regex_patterns import DIGITS + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +ADDON_ID = "plugin.video.viewit" +SETTING_BASE_URL = "topstream_base_url" +DEFAULT_BASE_URL = "https://www.meineseite" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +SETTING_GENRE_MAX_PAGES = "topstream_genre_max_pages" +DEFAULT_TIMEOUT = 20 +DEFAULT_PREFERRED_HOSTERS = ["supervideo", "dropload", "voe"] +MEINECLOUD_HOST = "meinecloud.click" +DEFAULT_GENRE_MAX_PAGES = 20 +HARD_MAX_GENRE_PAGES = 200 +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class SearchHit: + """Interner Treffer mit Title + URL.""" + + title: str + url: str + description: str = "" + + +def _normalize_search_text(value: str) -> str: + """Normalisiert Text für robuste, wortbasierte Suche/Filter. + + Wir ersetzen Nicht-Alphanumerisches durch Leerzeichen und kollabieren Whitespace. + Dadurch kann z.B. "Star Trek: Lower Decks – Der Film" sauber auf Tokens gematcht werden. + """ + + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _matches_query(query: str, *, title: str, description: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = _normalize_search_text(title) + if not haystack: + return False + return normalized_query in haystack + + +def _strip_der_film_suffix(title: str) -> str: + """Entfernt den Suffix 'Der Film' am Ende, z.B. 'Star Trek – Der Film'.""" + title = (title or "").strip() + if not title: + return "" + title = re.sub(r"\s*[-–]\s*der\s+film\s*$", "", title, flags=re.IGNORECASE).strip() + return title + + +class TopstreamfilmPlugin(BasisPlugin): + """Integration fuer eine HTML-basierte Suchseite.""" + + name = "TopStreamFilm" + + def __init__(self) -> None: + self._session: RequestsSession | None = None + self._title_to_url: Dict[str, str] = {} + self._genre_to_url: Dict[str, str] = {} + self._movie_iframe_url: Dict[str, str] = {} + self._movie_title_hint: set[str] = set() + self._genre_last_page: Dict[str, int] = {} + self._season_cache: Dict[str, List[str]] = {} + self._episode_cache: Dict[tuple[str, str], List[str]] = {} + self._episode_to_url: Dict[tuple[str, str, str], str] = {} + self._episode_to_hosters: Dict[tuple[str, str, str], Dict[str, str]] = {} + self._season_to_episode_numbers: Dict[tuple[str, str], List[int]] = {} + self._episode_title_by_number: Dict[tuple[str, int, int], str] = {} + self._detail_html_cache: Dict[str, str] = {} + self._popular_cache: List[str] | None = None + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self.is_available = REQUESTS_AVAILABLE + self.unavailable_reason = None if REQUESTS_AVAILABLE else f"requests/bs4 fehlen: {REQUESTS_IMPORT_ERROR}" + self._load_title_url_cache() + self._load_genre_cache() + + def _cache_dir(self) -> str: + if xbmcaddon and xbmcvfs: + try: + addon = xbmcaddon.Addon(ADDON_ID) + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + if not xbmcvfs.exists(profile): + xbmcvfs.mkdirs(profile) + return profile + except Exception: + pass + return os.path.dirname(__file__) + + def _title_url_cache_path(self) -> str: + return os.path.join(self._cache_dir(), "topstream_title_url_cache.json") + + def _load_title_url_cache(self) -> None: + path = self._title_url_cache_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + else: + return + loaded = json.loads(raw or "{}") + if isinstance(loaded, dict): + # New format: {base_url: {title: url}} + base_url = self._get_base_url() + if base_url in loaded and isinstance(loaded.get(base_url), dict): + loaded = loaded.get(base_url) or {} + # Backwards compatible: {title: url} + for title, url in (loaded or {}).items(): + if isinstance(title, str) and isinstance(url, str) and title.strip() and url.strip(): + self._title_to_url.setdefault(title.strip(), url.strip()) + except Exception: + return + + def _save_title_url_cache(self) -> None: + path = self._title_url_cache_path() + try: + base_url = self._get_base_url() + store: Dict[str, Dict[str, str]] = {} + # merge with existing + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + existing_raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + existing_raw = handle.read() + else: + existing_raw = "" + existing = json.loads(existing_raw or "{}") + if isinstance(existing, dict): + if all(isinstance(k, str) and isinstance(v, dict) for k, v in existing.items()): + store = {k: dict(v) for k, v in existing.items()} # type: ignore[arg-type] + except Exception: + store = {} + + store[base_url] = dict(self._title_to_url) + payload = json.dumps(store, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcaddon and xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + def _genre_cache_path(self) -> str: + return os.path.join(self._cache_dir(), "topstream_genres_cache.json") + + def _load_genre_cache(self) -> None: + path = self._genre_cache_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + else: + return + loaded = json.loads(raw or "{}") + if isinstance(loaded, dict): + base_url = self._get_base_url() + mapping = loaded.get(base_url) + if isinstance(mapping, dict): + for genre, url in mapping.items(): + if isinstance(genre, str) and isinstance(url, str) and genre.strip() and url.strip(): + self._genre_to_url.setdefault(genre.strip(), url.strip()) + except Exception: + return + + def _save_genre_cache(self) -> None: + path = self._genre_cache_path() + try: + base_url = self._get_base_url() + store: Dict[str, Dict[str, str]] = {} + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + existing_raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + existing_raw = handle.read() + else: + existing_raw = "" + existing = json.loads(existing_raw or "{}") + if isinstance(existing, dict): + if all(isinstance(k, str) and isinstance(v, dict) for k, v in existing.items()): + store = {k: dict(v) for k, v in existing.items()} # type: ignore[arg-type] + except Exception: + store = {} + store[base_url] = dict(self._genre_to_url) + payload = json.dumps(store, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcaddon and xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + def _get_session(self) -> RequestsSession: + if requests is None: + raise RuntimeError(self.unavailable_reason or "requests nicht verfügbar.") + if self._session is None: + session = requests.Session() + session.headers.update(HEADERS) + self._session = session + return self._session + + def _get_base_url(self) -> str: + base = DEFAULT_BASE_URL + if xbmcaddon is not None: + try: + addon = xbmcaddon.Addon(ADDON_ID) + raw = (addon.getSetting(SETTING_BASE_URL) or "").strip() + if raw: + base = raw + except Exception: + pass + base = (base or "").strip() + if not base: + return DEFAULT_BASE_URL + if not base.startswith("http://") and not base.startswith("https://"): + base = "https://" + base + return base.rstrip("/") + + def _absolute_url(self, href: str) -> str: + return urljoin(self._get_base_url() + "/", href or "") + + @staticmethod + def _absolute_external_url(href: str, *, base: str = "") -> str: + href = (href or "").strip() + if not href: + return "" + if href.startswith("//"): + return "https:" + href + if href.startswith("http://") or href.startswith("https://"): + return href + if base: + return urljoin(base if base.endswith("/") else base + "/", href) + return href + + def _get_setting_bool(self, setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + def _get_setting_int(self, setting_id: str, *, default: int) -> int: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(ADDON_ID) + getter = getattr(addon, "getSettingInt", None) + if callable(getter): + return int(getter(setting_id)) + raw = str(addon.getSetting(setting_id) or "").strip() + return int(raw) if raw else default + except Exception: + return default + + def _notify_url(self, url: str) -> None: + notify_url(ADDON_ID, heading=self.name, url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + def _log_url(self, url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="topstream_urls.log", url=url, kind=kind) + + def _log_response_html(self, url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="topstream_response", + ) + + def capabilities(self) -> set[str]: + return {"genres", "popular_series"} + + def _popular_url(self) -> str: + return self._absolute_url("/beliebte-filme-online.html") + + def popular_series(self) -> List[str]: + """Liefert die "Meist gesehen"/"Beliebte Filme" Liste. + + Quelle: `/beliebte-filme-online.html` (TopStreamFilm Template). + """ + if self._popular_cache is not None: + return list(self._popular_cache) + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + self._popular_cache = [] + return [] + try: + soup = self._get_soup(self._popular_url()) + except Exception: + self._popular_cache = [] + return [] + + hits = self._parse_listing_titles(soup) + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if not hit.title or hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + if titles: + self._save_title_url_cache() + self._popular_cache = list(titles) + return list(titles) + + def _parse_genres_from_home(self, soup: BeautifulSoupT) -> Dict[str, str]: + genres: Dict[str, str] = {} + if soup is None: + return genres + + # Primär: im Header-Menü unter "KATEGORIEN" + categories_anchor = None + for anchor in soup.select("li.menu-item-has-children a"): + text = (anchor.get_text(" ", strip=True) or "").strip().casefold() + if text == "kategorien": + categories_anchor = anchor + break + if categories_anchor is not None: + try: + parent = categories_anchor.find_parent("li") + except Exception: + parent = None + if parent is not None: + for anchor in parent.select("ul.sub-menu li.cat-item a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + genres[name] = self._absolute_url(href) + + # Fallback: allgemeine cat-item Links (falls Theme anders ist) + if not genres: + for anchor in soup.select("li.cat-item a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + genres[name] = self._absolute_url(href) + + return genres + + def _extract_first_int(self, value: str) -> Optional[int]: + match = re.search(DIGITS, value or "") + return int(match.group(1)) if match else None + + def _strip_links_text(self, node: Any) -> str: + """Extrahiert den Text eines Nodes ohne Linktexte/URLs.""" + if BeautifulSoup is None: + return "" + try: + fragment = BeautifulSoup(str(node), "html.parser") + for anchor in fragment.select("a"): + anchor.extract() + return (fragment.get_text(" ", strip=True) or "").strip() + except Exception: + return "" + + def _clear_stream_index_for_title(self, title: str) -> None: + for key in list(self._season_to_episode_numbers.keys()): + if key[0] == title: + self._season_to_episode_numbers.pop(key, None) + for key in list(self._episode_to_hosters.keys()): + if key[0] == title: + self._episode_to_hosters.pop(key, None) + for key in list(self._episode_title_by_number.keys()): + if key[0] == title: + self._episode_title_by_number.pop(key, None) + + def _parse_stream_accordion(self, soup: BeautifulSoupT, *, title: str) -> None: + """Parst Staffel/Episode/Hoster-Links aus der Detailseite (Accordion).""" + if not soup or not title: + return + + accordion = soup.select_one("#se-accordion") or soup.select_one(".su-accordion#se-accordion") + if accordion is None: + return + + self._clear_stream_index_for_title(title) + + for spoiler in accordion.select(".su-spoiler"): + season_title = spoiler.select_one(".su-spoiler-title") + if not season_title: + continue + + season_text = (season_title.get_text(" ", strip=True) or "").strip() + season_number = self._extract_first_int(season_text) + if season_number is None: + continue + season_label = f"Staffel {season_number}" + + data_target = (season_title.get("data-target") or "").strip() + content = spoiler.select_one(data_target) if data_target.startswith("#") else None + if content is None: + content = spoiler.select_one(".su-spoiler-content") + if content is None: + continue + + episode_numbers: set[int] = set() + for row in content.select(".cu-ss"): + raw_text = self._strip_links_text(row) + raw_text = (raw_text or "").strip() + if not raw_text: + continue + + match = re.search( + r"(?P\d+)\s*x\s*(?P\d+)\s*(?P.*)$", + raw_text, + flags=re.IGNORECASE, + ) + if not match: + continue + row_season = int(match.group("s")) + episode_number = int(match.group("e")) + if row_season != season_number: + continue + + rest = (match.group("rest") or "").strip().replace("–", "-") + # Links stehen als im HTML, d.h. hier bleibt normalerweise nur "Episode X –" übrig. + if "-" in rest: + rest = rest.split("-", 1)[0].strip() + rest = re.sub(r"\bepisode\s*\d+\b", "", rest, flags=re.IGNORECASE).strip() + rest = re.sub(r"^\W+|\W+$", "", rest).strip() + if rest: + self._episode_title_by_number[(title, season_number, episode_number)] = rest + + hosters: Dict[str, str] = {} + for anchor in row.select("a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + hosters[name] = href + if not hosters: + continue + + episode_label = f"Episode {episode_number}" + ep_title = self._episode_title_by_number.get((title, season_number, episode_number), "") + if ep_title: + episode_label = f"Episode {episode_number}: {ep_title}" + + self._episode_to_hosters[(title, season_label, episode_label)] = hosters + episode_numbers.add(episode_number) + + self._season_to_episode_numbers[(title, season_label)] = sorted(episode_numbers) + + def _ensure_stream_index(self, title: str) -> None: + """Stellt sicher, dass Staffel/Episoden/Hoster aus der Detailseite geparst sind.""" + title = (title or "").strip() + if not title: + return + # Wenn bereits Staffeln im Index sind, nichts tun. + if any(key[0] == title for key in self._season_to_episode_numbers.keys()): + return + soup = self._get_detail_soup(title) + if soup is None: + return + self._parse_stream_accordion(soup, title=title) + + def _get_soup(self, url: str) -> BeautifulSoupT: + if BeautifulSoup is None or not REQUESTS_AVAILABLE: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + session = self._get_session() + self._log_url(url, kind="VISIT") + self._notify_url(url) + response = session.get(url, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + self._log_url(response.url, kind="OK") + self._log_response_html(response.url, response.text) + return BeautifulSoup(response.text, "html.parser") + + def _get_detail_soup(self, title: str) -> Optional[BeautifulSoupT]: + title = (title or "").strip() + if not title: + return None + url = self._title_to_url.get(title) + if not url: + return None + if BeautifulSoup is None or not REQUESTS_AVAILABLE: + return None + cached_html = self._detail_html_cache.get(title) + if cached_html: + return BeautifulSoup(cached_html, "html.parser") + soup = self._get_soup(url) + try: + self._detail_html_cache[title] = str(soup) + except Exception: + pass + return soup + + def _detect_movie_iframe_url(self, soup: BeautifulSoupT) -> str: + """Erkennt Film-Detailseiten über eingebettetes MeineCloud-iframe.""" + if not soup: + return "" + for frame in soup.select("iframe[src]"): + src = (frame.get("src") or "").strip() + if not src: + continue + if MEINECLOUD_HOST in src: + return src + return "" + + def _parse_meinecloud_hosters(self, soup: BeautifulSoupT, *, page_url: str) -> Dict[str, str]: + """Parst Hoster-Mirrors aus MeineCloud (Film-Seite). + + Beispiel: +
    +
  • supervideo
  • +
  • dropload
  • +
  • 4K Server
  • +
+ """ + + hosters: Dict[str, str] = {} + if not soup: + return hosters + + for entry in soup.select("ul._player-mirrors li[data-link]"): + raw_link = (entry.get("data-link") or "").strip() + if not raw_link: + continue + name = (entry.get_text(" ", strip=True) or "").strip() + name = name or "Hoster" + url = self._absolute_external_url(raw_link, base=page_url) + if not url: + continue + hosters[name] = url + + # Falls "4K Server" wieder auf eine MeineCloud-Seite zeigt, versuchen wir einmal zu expandieren. + expanded: Dict[str, str] = {} + for name, url in list(hosters.items()): + if MEINECLOUD_HOST in url and "/fullhd/" in url: + try: + nested = self._get_soup(url) + except Exception: + continue + nested_hosters = self._parse_meinecloud_hosters(nested, page_url=url) + for nested_name, nested_url in nested_hosters.items(): + expanded.setdefault(nested_name, nested_url) + if expanded: + hosters.update(expanded) + + return hosters + + def _extract_last_page(self, soup: BeautifulSoupT) -> int: + """Liest aus `div.wp-pagenavi` die höchste Seitenzahl.""" + if not soup: + return 1 + numbers: List[int] = [] + for anchor in soup.select("div.wp-pagenavi a"): + text = (anchor.get_text(" ", strip=True) or "").strip() + if text.isdigit(): + try: + numbers.append(int(text)) + except Exception: + continue + return max(numbers) if numbers else 1 + + def _parse_listing_titles(self, soup: BeautifulSoupT) -> List[SearchHit]: + hits: List[SearchHit] = [] + if not soup: + return hits + for item in soup.select("li.TPostMv"): + anchor = item.select_one("a[href]") + if not anchor: + continue + href = (anchor.get("href") or "").strip() + if not href: + continue + title_tag = anchor.select_one("h3.Title") + raw_title = title_tag.get_text(" ", strip=True) if title_tag else anchor.get_text(" ", strip=True) + raw_title = (raw_title or "").strip() + is_movie_hint = bool(re.search(r"\bder\s+film\b", raw_title, flags=re.IGNORECASE)) + title = _strip_der_film_suffix(raw_title) + if not title: + continue + if is_movie_hint: + self._movie_title_hint.add(title) + hits.append(SearchHit(title=title, url=self._absolute_url(href), description="")) + return hits + + def is_movie(self, title: str) -> bool: + """Schneller Hint (ohne Detail-Request), ob ein Titel ein Film ist.""" + title = (title or "").strip() + if not title: + return False + if title in self._movie_iframe_url or title in self._movie_title_hint: + return True + # Robust: Detailseite prüfen. + # Laut TopStream-Layout sind Serien-Seiten durch `div.serie-menu` (Staffel-Navigation) + # gekennzeichnet. Fehlt das Element, behandeln wir den Titel als Film. + soup = self._get_detail_soup(title) + if soup is None: + return False + has_seasons = bool(soup.select_one("div.serie-menu") or soup.select_one(".serie-menu")) + return not has_seasons + + def genre_page_count(self, genre: str) -> int: + """Optional: Liefert die letzte Seite eines Genres (Pagination).""" + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return 1 + genre = (genre or "").strip() + if not genre: + return 1 + if genre in self._genre_last_page: + return max(1, int(self._genre_last_page[genre] or 1)) + if not self._genre_to_url: + self.genres() + url = self._genre_to_url.get(genre) + if not url: + return 1 + try: + soup = self._get_soup(url) + except Exception: + return 1 + last_page = self._extract_last_page(soup) + self._genre_last_page[genre] = max(1, int(last_page or 1)) + return self._genre_last_page[genre] + + def titles_for_genre_page(self, genre: str, page: int) -> List[str]: + """Optional: Liefert Titel für ein Genre und eine konkrete Seite.""" + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._genre_to_url: + self.genres() + base_url = self._genre_to_url.get(genre) + if not base_url: + return [] + + page = max(1, int(page or 1)) + if page == 1: + url = base_url + else: + url = urljoin(base_url.rstrip("/") + "/", f"page/{page}/") + + try: + soup = self._get_soup(url) + except Exception: + return [] + + hits = self._parse_listing_titles(soup) + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + if titles: + self._save_title_url_cache() + return titles + + def _ensure_title_index(self, title: str) -> None: + """Stellt sicher, dass Film/Serie-Infos für den Titel geparst sind.""" + title = (title or "").strip() + if not title: + return + + # Bereits bekannt? + if title in self._movie_iframe_url: + return + if any(key[0] == title for key in self._season_to_episode_numbers.keys()): + return + + soup = self._get_detail_soup(title) + if soup is None: + return + + movie_url = self._detect_movie_iframe_url(soup) + if movie_url: + self._movie_iframe_url[title] = movie_url + # Film als Single-Season/Single-Episode abbilden, damit ViewIt navigieren kann. + season_label = "Film" + episode_label = "Stream" + self._season_cache[title] = [season_label] + self._episode_cache[(title, season_label)] = [episode_label] + try: + meinecloud_soup = self._get_soup(movie_url) + hosters = self._parse_meinecloud_hosters(meinecloud_soup, page_url=movie_url) + except Exception: + hosters = {} + self._episode_to_hosters[(title, season_label, episode_label)] = hosters or {"MeineCloud": movie_url} + return + + # Sonst: Serie via Streams-Accordion parsen (falls vorhanden). + self._parse_stream_accordion(soup, title=title) + + async def search_titles(self, query: str) -> List[str]: + """Sucht Titel ueber eine HTML-Suche. + + Erwartetes HTML (Snippet): + - Treffer: `li.TPostMv a[href]` + - Titel: `h3.Title` + """ + + if not REQUESTS_AVAILABLE: + return [] + query = (query or "").strip() + if not query: + return [] + + session = self._get_session() + url = self._get_base_url() + "/" + params = {"story": query, "do": "search", "subaction": "search"} + request_url = f"{url}?{urlencode(params)}" + self._log_url(request_url, kind="GET") + self._notify_url(request_url) + response = session.get( + url, + params=params, + timeout=DEFAULT_TIMEOUT, + ) + response.raise_for_status() + self._log_url(response.url, kind="OK") + self._log_response_html(response.url, response.text) + + if BeautifulSoup is None: + return [] + soup = BeautifulSoup(response.text, "html.parser") + + hits: List[SearchHit] = [] + for item in soup.select("li.TPostMv"): + anchor = item.select_one("a[href]") + if not anchor: + continue + href = (anchor.get("href") or "").strip() + if not href: + continue + title_tag = anchor.select_one("h3.Title") + raw_title = title_tag.get_text(" ", strip=True) if title_tag else anchor.get_text(" ", strip=True) + raw_title = (raw_title or "").strip() + is_movie_hint = bool(re.search(r"\bder\s+film\b", raw_title, flags=re.IGNORECASE)) + title = _strip_der_film_suffix(raw_title) + if not title: + continue + if is_movie_hint: + self._movie_title_hint.add(title) + description_tag = item.select_one(".TPMvCn .Description") + description = description_tag.get_text(" ", strip=True) if description_tag else "" + hit = SearchHit(title=title, url=self._absolute_url(href), description=description) + if _matches_query(query, title=hit.title, description=hit.description): + hits.append(hit) + + # Dedup + mapping fuer Navigation + self._title_to_url.clear() + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + self._save_title_url_cache() + return titles + + def genres(self) -> List[str]: + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + if self._genre_to_url: + return sorted(self._genre_to_url.keys(), key=lambda value: value.casefold()) + + try: + soup = self._get_soup(self._get_base_url() + "/") + except Exception: + return [] + parsed = self._parse_genres_from_home(soup) + self._genre_to_url.clear() + self._genre_to_url.update(parsed) + self._save_genre_cache() + return sorted(self._genre_to_url.keys(), key=lambda value: value.casefold()) + + def titles_for_genre(self, genre: str) -> List[str]: + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._genre_to_url: + self.genres() + url = self._genre_to_url.get(genre) + if not url: + return [] + + # Backwards-compatible: liefert nur Seite 1 (Paging läuft über titles_for_genre_page()). + titles = self.titles_for_genre_page(genre, 1) + titles.sort(key=lambda value: value.casefold()) + return titles + + def seasons_for(self, title: str) -> List[str]: + title = (title or "").strip() + if not title or not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + if title in self._movie_iframe_url: + return ["Film"] + + # Primär: Streams-Accordion (enthält echte Staffel-/Episodenlistings). + self._ensure_stream_index(title) + seasons = sorted( + {season_label for (t, season_label) in self._season_to_episode_numbers.keys() if t == title}, + key=lambda value: (self._extract_first_int(value) or 0), + ) + if seasons: + self._season_cache[title] = list(seasons) + return list(seasons) + + # Fallback: Staffel-Tabs im Seitenmenü (ohne Links). + cached = self._season_cache.get(title) + if cached is not None: + return list(cached) + + soup = self._get_detail_soup(title) + if soup is None: + self._season_cache[title] = [] + return [] + + numbers: List[int] = [] + seen: set[int] = set() + for anchor in soup.select( + "div.serie-menu div.tt_season ul.nav a[href^='#season-']," + " .serie-menu .tt_season a[href^='#season-']," + " a[data-toggle='tab'][href^='#season-']" + ): + text = (anchor.get_text(" ", strip=True) or "").strip() + num = self._extract_first_int(text) + if num is None: + href = (anchor.get("href") or "").strip() + num = self._extract_first_int(href.replace("#season-", "")) + if num is None or num in seen: + continue + seen.add(num) + numbers.append(num) + + seasons = [f"Staffel {n}" for n in sorted(numbers)] + self._season_cache[title] = list(seasons) + return list(seasons) + + def episodes_for(self, title: str, season: str) -> List[str]: + title = (title or "").strip() + season = (season or "").strip() + if not title or not season or not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + if title in self._movie_iframe_url and season == "Film": + return ["Stream"] + + cache_key = (title, season) + cached = self._episode_cache.get(cache_key) + if cached is not None: + return list(cached) + + self._ensure_stream_index(title) + episode_numbers = self._season_to_episode_numbers.get((title, season), []) + episodes: List[str] = [] + season_number = self._extract_first_int(season) or 0 + for ep_no in episode_numbers: + label = f"Episode {ep_no}" + ep_title = self._episode_title_by_number.get((title, season_number, ep_no), "") + if ep_title: + label = f"Episode {ep_no}: {ep_title}" + episodes.append(label) + + self._episode_cache[cache_key] = list(episodes) + return list(episodes) + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + if not title or not season or not episode: + return [] + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + self._ensure_stream_index(title) + hosters = self._episode_to_hosters.get((title, season, episode), {}) + return sorted(hosters.keys(), key=lambda value: value.casefold()) + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster and hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + if not title or not season or not episode: + return None + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return None + + self._ensure_title_index(title) + self._ensure_stream_index(title) + hosters = self._episode_to_hosters.get((title, season, episode), {}) + if not hosters: + return None + + preferred = [h.casefold() for h in (self._preferred_hosters or [])] + if preferred: + for preferred_name in preferred: + for actual_name, url in hosters.items(): + if actual_name.casefold() == preferred_name: + return url + + # Wenn nichts passt: deterministisch den ersten. + first_name = sorted(hosters.keys(), key=lambda value: value.casefold())[0] + return hosters.get(first_name) + + def resolve_stream_link(self, link: str) -> Optional[str]: + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved = resolve_with_resolveurl(link) + return resolved or link + return link + + +# Alias für die automatische Plugin-Erkennung. +Plugin = TopstreamfilmPlugin diff --git a/addon/regex_patterns.py b/addon/regex_patterns.py new file mode 100644 index 0000000..c3c0b08 --- /dev/null +++ b/addon/regex_patterns.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 +"""Shared regex pattern constants. + +Keep common patterns in one place to avoid accidental double-escaping (e.g. \"\\\\d\"). +""" + +SEASON_EPISODE_TAG = r"S\s*(\d+)\s*E\s*(\d+)" +SEASON_EPISODE_URL = r"/staffel-(\d+)/episode-(\d+)" +STAFFEL_NUM_IN_URL = r"/staffel-(\d+)" +DIGITS = r"(\d+)" + diff --git a/addon/requirements.txt b/addon/requirements.txt new file mode 100644 index 0000000..e35775c --- /dev/null +++ b/addon/requirements.txt @@ -0,0 +1,2 @@ +beautifulsoup4>=4.12 +requests>=2.31 diff --git a/addon/resolveurl_backend.py b/addon/resolveurl_backend.py new file mode 100644 index 0000000..5b9a17a --- /dev/null +++ b/addon/resolveurl_backend.py @@ -0,0 +1,43 @@ +"""Optionales ResolveURL-Backend für das Kodi-Addon. + +Wenn `script.module.resolveurl` installiert ist, kann damit eine Hoster-URL +zu einer abspielbaren Media-URL (inkl. evtl. Header-Suffix) aufgelöst werden. +""" + +from __future__ import annotations + +from typing import Optional + + +def resolve(url: str) -> Optional[str]: + if not url: + return None + try: + import resolveurl # type: ignore + except Exception: + return None + + try: + hosted = getattr(resolveurl, "HostedMediaFile", None) + if callable(hosted): + hmf = hosted(url) + valid = getattr(hmf, "valid_url", None) + if callable(valid) and not valid(): + return None + resolver = getattr(hmf, "resolve", None) + if callable(resolver): + result = resolver() + return str(result) if result else None + except Exception: + pass + + try: + resolve_fn = getattr(resolveurl, "resolve", None) + if callable(resolve_fn): + result = resolve_fn(url) + return str(result) if result else None + except Exception: + return None + + return None + diff --git a/addon/resources/logo-solid.jpg b/addon/resources/logo-solid.jpg new file mode 100644 index 0000000..0c93e34 Binary files /dev/null and b/addon/resources/logo-solid.jpg differ diff --git a/addon/resources/logo.png b/addon/resources/logo.png new file mode 100644 index 0000000..d11893e Binary files /dev/null and b/addon/resources/logo.png differ diff --git a/addon/resources/settings.xml b/addon/resources/settings.xml new file mode 100644 index 0000000..efe74a3 --- /dev/null +++ b/addon/resources/settings.xml @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/addon/tmdb.py b/addon/tmdb.py new file mode 100644 index 0000000..830e770 --- /dev/null +++ b/addon/tmdb.py @@ -0,0 +1,652 @@ +from __future__ import annotations + +from dataclasses import dataclass +import json +import threading +from typing import Callable, Dict, List, Optional, Tuple +from urllib.parse import urlencode + +try: # pragma: no cover - optional dependency + import requests +except ImportError: # pragma: no cover + requests = None + + +TMDB_API_BASE = "https://api.themoviedb.org/3" +TMDB_IMAGE_BASE = "https://image.tmdb.org/t/p" +_TMDB_THREAD_LOCAL = threading.local() + + +def _get_tmdb_session() -> "requests.Session | None": + """Returns a per-thread shared requests Session. + + We use thread-local storage because ViewIt prefetches TMDB metadata using threads. + `requests.Session` is not guaranteed to be thread-safe, but reusing a session within + the same thread keeps connections warm. + """ + + if requests is None: + return None + sess = getattr(_TMDB_THREAD_LOCAL, "session", None) + if sess is None: + sess = requests.Session() + setattr(_TMDB_THREAD_LOCAL, "session", sess) + return sess + + +@dataclass(frozen=True) +class TmdbCastMember: + name: str + role: str + thumb: str + + +@dataclass(frozen=True) +class TmdbShowMeta: + tmdb_id: int + plot: str + poster: str + fanart: str + rating: float + votes: int + cast: List[TmdbCastMember] + + +def _image_url(path: str, *, size: str) -> str: + path = (path or "").strip() + if not path: + return "" + return f"{TMDB_IMAGE_BASE}/{size}{path}" + + +def _fetch_credits( + *, + kind: str, + tmdb_id: int, + api_key: str, + language: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, +) -> List[TmdbCastMember]: + if requests is None or not tmdb_id: + return [] + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/{kind}/{tmdb_id}/credits?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /{kind}/{{id}}/credits request_failed error={exc!r}") + return [] + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /{kind}/{{id}}/credits status={status}") + if status != 200: + return [] + try: + payload = response.json() or {} + except Exception: + return [] + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /{kind}/{{id}}/credits body={dumped[:2000]}") + + cast_payload = payload.get("cast") or [] + if callable(log): + log(f"TMDB CREDITS /{kind}/{{id}}/credits cast={len(cast_payload)}") + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + + # Viele Kodi-Skins zeigen bei fehlendem Thumbnail Platzhalter-Köpfe. + # Bevorzugt daher Cast-Einträge mit Bild; nur wenn gar keine Bilder existieren, + # geben wir Namen ohne Bild zurück. + if with_images: + return with_images[:30] + return without_images[:30] + + +def _parse_cast_payload(cast_payload: object) -> List[TmdbCastMember]: + if not isinstance(cast_payload, list): + return [] + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + if not isinstance(entry, dict): + continue + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + if with_images: + return with_images[:30] + return without_images[:30] + + +def _tmdb_get_json( + *, + url: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, + session: "requests.Session | None" = None, +) -> Tuple[int | None, object | None, str]: + """Fetches TMDB JSON with optional shared session. + + Returns: (status_code, payload_or_none, body_text_or_empty) + """ + + if requests is None: + return None, None, "" + if callable(log): + log(f"TMDB GET {url}") + sess = session or _get_tmdb_session() or requests.Session() + try: + response = sess.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR request_failed url={url} error={exc!r}") + return None, None, "" + + status = getattr(response, "status_code", None) + payload: object | None = None + body_text = "" + try: + payload = response.json() + except Exception: + try: + body_text = (response.text or "").strip() + except Exception: + body_text = "" + + if callable(log): + log(f"TMDB RESPONSE status={status} url={url}") + if log_responses: + if payload is not None: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY url={url} body={dumped[:2000]}") + elif body_text: + log(f"TMDB RESPONSE_BODY url={url} body={body_text[:2000]}") + return status, payload, body_text + + +def fetch_tv_episode_credits( + *, + tmdb_id: int, + season_number: int, + episode_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> List[TmdbCastMember]: + """Lädt Cast für eine konkrete Episode (/tv/{id}/season/{n}/episode/{e}/credits).""" + if requests is None: + return [] + api_key = (api_key or "").strip() + if not api_key or not tmdb_id: + return [] + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}/episode/{episode_number}/credits?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /tv/{{id}}/season/{{n}}/episode/{{e}}/credits request_failed error={exc!r}") + return [] + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}}/episode/{{e}}/credits status={status}") + if status != 200: + return [] + try: + payload = response.json() or {} + except Exception: + return [] + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}}/episode/{{e}}/credits body={dumped[:2000]}") + + cast_payload = payload.get("cast") or [] + if callable(log): + log(f"TMDB CREDITS /tv/{{id}}/season/{{n}}/episode/{{e}}/credits cast={len(cast_payload)}") + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + if with_images: + return with_images[:30] + return without_images[:30] + + +def lookup_tv_show( + *, + title: str, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, + include_cast: bool = False, +) -> Optional[TmdbShowMeta]: + """Sucht eine TV-Show bei TMDB und liefert Plot + Poster-URL (wenn vorhanden).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key: + return None + query = (title or "").strip() + if not query: + return None + + params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "query": query, + "include_adult": "false", + "page": "1", + } + url = f"{TMDB_API_BASE}/search/tv?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json( + url=url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + results = (payload or {}).get("results") if isinstance(payload, dict) else [] + results = results or [] + if callable(log): + log(f"TMDB RESPONSE /search/tv status={status} results={len(results)}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /search/tv body={body_text[:2000]}") + + if status != 200: + return None + if not results: + return None + + normalized_query = query.casefold() + best = None + for candidate in results: + name = (candidate.get("name") or "").casefold() + original_name = (candidate.get("original_name") or "").casefold() + if name == normalized_query or original_name == normalized_query: + best = candidate + break + if best is None: + best = results[0] + + tmdb_id = int(best.get("id") or 0) + plot = (best.get("overview") or "").strip() + poster = _image_url(best.get("poster_path") or "", size="w342") + fanart = _image_url(best.get("backdrop_path") or "", size="w780") + try: + rating = float(best.get("vote_average") or 0.0) + except Exception: + rating = 0.0 + try: + votes = int(best.get("vote_count") or 0) + except Exception: + votes = 0 + if not tmdb_id: + return None + cast: List[TmdbCastMember] = [] + if include_cast and tmdb_id: + detail_params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "append_to_response": "credits", + } + detail_url = f"{TMDB_API_BASE}/tv/{tmdb_id}?{urlencode(detail_params)}" + d_status, d_payload, d_body = _tmdb_get_json( + url=detail_url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}} status={d_status}") + if log_responses and d_payload is None and d_body: + log(f"TMDB RESPONSE_BODY /tv/{{id}} body={d_body[:2000]}") + if d_status == 200 and isinstance(d_payload, dict): + credits = d_payload.get("credits") or {} + cast = _parse_cast_payload((credits or {}).get("cast")) + if not plot and not poster and not fanart and not rating and not votes and not cast: + return None + return TmdbShowMeta( + tmdb_id=tmdb_id, + plot=plot, + poster=poster, + fanart=fanart, + rating=rating, + votes=votes, + cast=cast, + ) + + +@dataclass(frozen=True) +class TmdbMovieMeta: + tmdb_id: int + plot: str + poster: str + fanart: str + runtime_minutes: int + rating: float + votes: int + cast: List[TmdbCastMember] + + +def _fetch_movie_details( + *, + tmdb_id: int, + api_key: str, + language: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, + include_cast: bool, +) -> Tuple[int, List[TmdbCastMember]]: + """Fetches /movie/{id} and (optionally) bundles credits via append_to_response=credits.""" + if requests is None or not tmdb_id: + return 0, [] + api_key = (api_key or "").strip() + if not api_key: + return 0, [] + params: Dict[str, str] = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + } + if include_cast: + params["append_to_response"] = "credits" + url = f"{TMDB_API_BASE}/movie/{tmdb_id}?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses) + if callable(log): + log(f"TMDB RESPONSE /movie/{{id}} status={status}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /movie/{{id}} body={body_text[:2000]}") + if status != 200 or not isinstance(payload, dict): + return 0, [] + try: + runtime = int(payload.get("runtime") or 0) + except Exception: + runtime = 0 + cast: List[TmdbCastMember] = [] + if include_cast: + credits = payload.get("credits") or {} + cast = _parse_cast_payload((credits or {}).get("cast")) + return runtime, cast + + +def lookup_movie( + *, + title: str, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, + include_cast: bool = False, +) -> Optional[TmdbMovieMeta]: + """Sucht einen Film bei TMDB und liefert Plot + Poster-URL (wenn vorhanden).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key: + return None + query = (title or "").strip() + if not query: + return None + + params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "query": query, + "include_adult": "false", + "page": "1", + } + url = f"{TMDB_API_BASE}/search/movie?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json( + url=url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + results = (payload or {}).get("results") if isinstance(payload, dict) else [] + results = results or [] + if callable(log): + log(f"TMDB RESPONSE /search/movie status={status} results={len(results)}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /search/movie body={body_text[:2000]}") + + if status != 200: + return None + if not results: + return None + + normalized_query = query.casefold() + best = None + for candidate in results: + name = (candidate.get("title") or "").casefold() + original_name = (candidate.get("original_title") or "").casefold() + if name == normalized_query or original_name == normalized_query: + best = candidate + break + if best is None: + best = results[0] + + tmdb_id = int(best.get("id") or 0) + plot = (best.get("overview") or "").strip() + poster = _image_url(best.get("poster_path") or "", size="w342") + fanart = _image_url(best.get("backdrop_path") or "", size="w780") + runtime_minutes = 0 + try: + rating = float(best.get("vote_average") or 0.0) + except Exception: + rating = 0.0 + try: + votes = int(best.get("vote_count") or 0) + except Exception: + votes = 0 + if not tmdb_id: + return None + cast: List[TmdbCastMember] = [] + runtime_minutes, cast = _fetch_movie_details( + tmdb_id=tmdb_id, + api_key=api_key, + language=language, + timeout=timeout, + log=log, + log_responses=log_responses, + include_cast=include_cast, + ) + if not plot and not poster and not fanart and not rating and not votes and not cast: + return None + return TmdbMovieMeta( + tmdb_id=tmdb_id, + plot=plot, + poster=poster, + fanart=fanart, + runtime_minutes=runtime_minutes, + rating=rating, + votes=votes, + cast=cast, + ) + + +@dataclass(frozen=True) +class TmdbEpisodeMeta: + plot: str + thumb: str + runtime_minutes: int + + +@dataclass(frozen=True) +class TmdbSeasonMeta: + plot: str + poster: str + + +def lookup_tv_season_summary( + *, + tmdb_id: int, + season_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> Optional[TmdbSeasonMeta]: + """Lädt Staffel-Meta (Plot + Poster).""" + if requests is None: + return None + + api_key = (api_key or "").strip() + if not api_key or not tmdb_id: + return None + + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception: + return None + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status}") + if status != 200: + return None + try: + payload = response.json() or {} + except Exception: + return None + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={dumped[:2000]}") + + plot = (payload.get("overview") or "").strip() + poster_path = (payload.get("poster_path") or "").strip() + poster = f"{TMDB_IMAGE_BASE}/w342{poster_path}" if poster_path else "" + if not plot and not poster: + return None + return TmdbSeasonMeta(plot=plot, poster=poster) + + +def lookup_tv_season( + *, + tmdb_id: int, + season_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> Optional[Dict[int, TmdbEpisodeMeta]]: + """Lädt Episoden-Meta für eine Staffel: episode_number -> (plot, thumb).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key or not tmdb_id or season_number is None: + return None + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /tv/{{id}}/season/{{n}} request_failed error={exc!r}") + return None + + status = getattr(response, "status_code", None) + payload = None + body_text = "" + try: + payload = response.json() or {} + except Exception: + try: + body_text = (response.text or "").strip() + except Exception: + body_text = "" + + episodes = (payload or {}).get("episodes") or [] + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status} episodes={len(episodes)}") + if log_responses: + if payload is not None: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={dumped[:2000]}") + elif body_text: + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={body_text[:2000]}") + + if status != 200 or not episodes: + return None + + result: Dict[int, TmdbEpisodeMeta] = {} + for entry in episodes: + try: + ep_number = int(entry.get("episode_number") or 0) + except Exception: + continue + if not ep_number: + continue + plot = (entry.get("overview") or "").strip() + runtime_minutes = 0 + try: + runtime_minutes = int(entry.get("runtime") or 0) + except Exception: + runtime_minutes = 0 + still_path = (entry.get("still_path") or "").strip() + thumb = f"{TMDB_IMAGE_BASE}/w300{still_path}" if still_path else "" + if not plot and not thumb and not runtime_minutes: + continue + result[ep_number] = TmdbEpisodeMeta(plot=plot, thumb=thumb, runtime_minutes=runtime_minutes) + return result or None diff --git a/dist/plugin.video.viewit-0.1.46.zip b/dist/plugin.video.viewit-0.1.46.zip new file mode 100644 index 0000000..8314172 Binary files /dev/null and b/dist/plugin.video.viewit-0.1.46.zip differ diff --git a/dist/plugin.video.viewit/LICENSE.txt b/dist/plugin.video.viewit/LICENSE.txt new file mode 100644 index 0000000..678a7ba --- /dev/null +++ b/dist/plugin.video.viewit/LICENSE.txt @@ -0,0 +1,598 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for the +work, and the source code for shared libraries and dynamically linked +subprograms that the work is specifically designed to require, such as +by intimate data communication or control flow between those subprograms +and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified it, +and giving a relevant date. + + b) The work must carry prominent notices stating that it is released +under this License and any conditions added under section 7. This +requirement modifies the requirement in section 4 to "keep intact all +notices". + + c) You must license the entire work, as a whole, under this License +to anyone who comes into possession of a copy. This License will +therefore apply, along with any applicable section 7 additional terms, +to the whole of the work, and all its parts, regardless of how they are +packaged. This License gives no permission to license the work in any +other way, but it does not invalidate such permission if you have +separately received it. + + d) If the work has interactive user interfaces, each must display +Appropriate Legal Notices; however, if the Program has interactive +interfaces that do not display Appropriate Legal Notices, your work +need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by the +Corresponding Source fixed on a durable physical medium customarily +used for software interchange. + + b) Convey the object code in, or embodied in, a physical product +(including a physical distribution medium), accompanied by a written +offer, valid for at least three years and valid for as long as you +offer spare parts or customer support for that product model, to give +anyone who possesses the object code either (1) a copy of the +Corresponding Source for all the software in the product that is +covered by this License, on a durable physical medium customarily used +for software interchange, for a price no more than your reasonable cost +of physically performing this conveying of source, or (2) access to +copy the Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the +written offer to provide the Corresponding Source. This alternative +is allowed only occasionally and noncommercially, and only if you +received the object code with such an offer, in accord with subsection +6b. + + d) Convey the object code by offering access from a designated place +(gratis or for a charge), and offer equivalent access to the +Corresponding Source in the same way through the same place at no +further charge. You need not require recipients to copy the +Corresponding Source along with the object code. If the place to copy +the object code is a network server, the Corresponding Source may be on +a different server (operated by you or a third party) that supports +equivalent copying facilities, provided you maintain clear directions +next to the object code saying where to find the Corresponding Source. +Regardless of what server hosts the Corresponding Source, you remain +obligated to ensure that it is available for as long as needed to +satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided +you inform other peers where the object code and Corresponding Source +of the work are being offered to the general public at no charge under +subsection 6d. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the +terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or +author attributions in that material or in the Appropriate Legal +Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or +requiring that modified versions of such material be marked in +reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or +authors of the material; or + + e) Declining to grant rights under trademark law for use of some +trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that +material by anyone who conveys the material (or modified versions of +it) with contractual assumptions of liability to the recipient, for +any liability that these contractual assumptions directly impose on +those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that transaction +who receives a copy of the work also receives whatever licenses to the +work the party's predecessor in interest had or could give under the +previous paragraph, plus a right to possession of the Corresponding +Source of the work from the predecessor in interest, if the +predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or hereafter +acquired, that would be infringed by some manner, permitted by this +License, of making, using, or selling its contributor version, but do +not include claims that would be infringed only as a consequence of +further modification of the contributor version. For purposes of this +definition, "control" includes the right to grant patent sublicenses in +a manner consistent with the requirements of this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is conditioned +on the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the business +of distributing software, under which you make payment to the third +party based on the extent of your activity of conveying the work, and +under which the third party grants, to any of the parties who would +receive the covered work from you, a discriminatory patent license (a) +in connection with copies of the covered work conveyed by you (or +copies made from those copies), or (b) primarily for and in connection +with specific products or compilations that contain the covered work, +unless you entered into that arrangement, or that patent license was +granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not convey it at all. For example, if you agree to terms that +obligate you to collect a royalty for further conveying from those to +whom you convey the Program, the only way you could satisfy both those +terms and this License would be to refrain entirely from conveying the +Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + diff --git a/dist/plugin.video.viewit/NOTICE.txt b/dist/plugin.video.viewit/NOTICE.txt new file mode 100644 index 0000000..b7d030d --- /dev/null +++ b/dist/plugin.video.viewit/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright (C) 2026 ViewIt contributors + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +This Kodi addon depends on `script.module.resolveurl`. diff --git a/dist/plugin.video.viewit/README_DEPENDENCIES.txt b/dist/plugin.video.viewit/README_DEPENDENCIES.txt new file mode 100644 index 0000000..f9c1c29 --- /dev/null +++ b/dist/plugin.video.viewit/README_DEPENDENCIES.txt @@ -0,0 +1,11 @@ +Abhaengigkeiten fuer Serienstream-Plugin: +- Python-Paket: requests +- Python-Paket: beautifulsoup4 +- Kodi-Addon: script.module.resolveurl + +Hinweis: +Kodi nutzt sein eigenes Python. Installiere Pakete in die Kodi-Python-Umgebung +oder nutze ein Kodi-Addon, das Python-Pakete mitliefert. + +Lizenz: +Dieses Kodi-Addon ist GPL-3.0-or-later (siehe `LICENSE.txt`). diff --git a/dist/plugin.video.viewit/addon.xml b/dist/plugin.video.viewit/addon.xml new file mode 100644 index 0000000..92f80c7 --- /dev/null +++ b/dist/plugin.video.viewit/addon.xml @@ -0,0 +1,21 @@ + + + + + + + + + + video + + + ViewIt Kodi Plugin + Streaming-Addon für Streamingseiten: Suche, Staffeln/Episoden und Wiedergabe. + + icon.png + + GPL-3.0-or-later + all + + diff --git a/dist/plugin.video.viewit/default.py b/dist/plugin.video.viewit/default.py new file mode 100644 index 0000000..191f345 --- /dev/null +++ b/dist/plugin.video.viewit/default.py @@ -0,0 +1,2417 @@ +#!/usr/bin/env python3 +"""ViewIt Kodi-Addon Einstiegspunkt. + +Dieses Modul ist der Router fuer die Kodi-Navigation: es rendert Menues, +ruft Plugin-Implementierungen auf und startet die Wiedergabe. +""" + +from __future__ import annotations + +import asyncio +from contextlib import contextmanager +from datetime import datetime +import importlib.util +import inspect +import os +import re +import sys +from pathlib import Path +from types import ModuleType +from urllib.parse import parse_qs, urlencode + +try: # pragma: no cover - Kodi runtime + import xbmc # type: ignore[import-not-found] + import xbmcaddon # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] + import xbmcplugin # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow importing outside Kodi (e.g. linting) + xbmc = None + xbmcaddon = None + xbmcgui = None + xbmcplugin = None + xbmcvfs = None + + class _XbmcStub: + LOGDEBUG = 0 + LOGINFO = 1 + LOGWARNING = 2 + + @staticmethod + def log(message: str, level: int = 1) -> None: + print(f"[KodiStub:{level}] {message}") + + class Player: + def play(self, item: str, listitem: object | None = None) -> None: + print(f"[KodiStub] play: {item}") + + class _XbmcGuiStub: + INPUT_ALPHANUM = 0 + NOTIFICATION_INFO = 0 + + class Dialog: + def input(self, heading: str, type: int = 0) -> str: + raise RuntimeError("xbmcgui ist nicht verfuegbar (KodiStub).") + + def select(self, heading: str, options: list[str]) -> int: + raise RuntimeError("xbmcgui ist nicht verfuegbar (KodiStub).") + + def notification(self, heading: str, message: str, icon: int = 0, time: int = 0) -> None: + print(f"[KodiStub] notification: {heading}: {message}") + + class ListItem: + def __init__(self, label: str = "", path: str = "") -> None: + self._label = label + self._path = path + + def setInfo(self, type: str, infoLabels: dict[str, str]) -> None: + return + + class _XbmcPluginStub: + @staticmethod + def addDirectoryItem(*, handle: int, url: str, listitem: object, isFolder: bool) -> None: + print(f"[KodiStub] addDirectoryItem: {url}") + + @staticmethod + def endOfDirectory(handle: int) -> None: + print(f"[KodiStub] endOfDirectory: {handle}") + + @staticmethod + def setPluginCategory(handle: int, category: str) -> None: + print(f"[KodiStub] category: {category}") + + xbmc = _XbmcStub() + xbmcgui = _XbmcGuiStub() + xbmcplugin = _XbmcPluginStub() + +from plugin_interface import BasisPlugin +from tmdb import TmdbCastMember, fetch_tv_episode_credits, lookup_movie, lookup_tv_season, lookup_tv_season_summary, lookup_tv_show + +PLUGIN_DIR = Path(__file__).with_name("plugins") +_PLUGIN_CACHE: dict[str, BasisPlugin] | None = None +_TMDB_CACHE: dict[str, tuple[dict[str, str], dict[str, str]]] = {} +_TMDB_CAST_CACHE: dict[str, list[TmdbCastMember]] = {} +_TMDB_ID_CACHE: dict[str, int] = {} +_TMDB_SEASON_CACHE: dict[tuple[int, int, str, str], dict[int, tuple[dict[str, str], dict[str, str]]]] = {} +_TMDB_SEASON_SUMMARY_CACHE: dict[tuple[int, int, str, str], tuple[dict[str, str], dict[str, str]]] = {} +_TMDB_EPISODE_CAST_CACHE: dict[tuple[int, int, int, str], list[TmdbCastMember]] = {} +_TMDB_LOG_PATH: str | None = None +_GENRE_TITLES_CACHE: dict[tuple[str, str], list[str]] = {} +_ADDON_INSTANCE = None +_PLAYSTATE_CACHE: dict[str, dict[str, object]] | None = None +WATCHED_THRESHOLD = 0.9 + + +def _tmdb_prefetch_concurrency() -> int: + """Max number of concurrent TMDB lookups when prefetching metadata for lists.""" + try: + raw = _get_setting_string("tmdb_prefetch_concurrency").strip() + value = int(raw) if raw else 6 + except Exception: + value = 6 + return max(1, min(20, value)) + + +def _log(message: str, level: int = xbmc.LOGINFO) -> None: + xbmc.log(f"[ViewIt] {message}", level) + + +def _busy_open() -> None: + try: # pragma: no cover - Kodi runtime + if xbmc is not None and hasattr(xbmc, "executebuiltin"): + xbmc.executebuiltin("ActivateWindow(busydialognocancel)") + except Exception: + pass + + +def _busy_close() -> None: + try: # pragma: no cover - Kodi runtime + if xbmc is not None and hasattr(xbmc, "executebuiltin"): + xbmc.executebuiltin("Dialog.Close(busydialognocancel)") + xbmc.executebuiltin("Dialog.Close(busydialog)") + except Exception: + pass + + +@contextmanager +def _busy_dialog(): + _busy_open() + try: + yield + finally: + _busy_close() + + +def _get_handle() -> int: + return int(sys.argv[1]) if len(sys.argv) > 1 else -1 + + +def _set_content(handle: int, content: str) -> None: + """Hint Kodi about the content type so skins can show watched/resume overlays.""" + content = (content or "").strip() + if not content: + return + try: # pragma: no cover - Kodi runtime + setter = getattr(xbmcplugin, "setContent", None) + if callable(setter): + setter(handle, content) + except Exception: + pass + + +def _get_addon(): + global _ADDON_INSTANCE + if xbmcaddon is None: + return None + if _ADDON_INSTANCE is None: + _ADDON_INSTANCE = xbmcaddon.Addon() + return _ADDON_INSTANCE + + +def _playstate_key(*, plugin_name: str, title: str, season: str, episode: str) -> str: + plugin_name = (plugin_name or "").strip() + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + return f"{plugin_name}\t{title}\t{season}\t{episode}" + + +def _playstate_path() -> str: + return _get_log_path("playstate.json") + + +def _load_playstate() -> dict[str, dict[str, object]]: + global _PLAYSTATE_CACHE + if _PLAYSTATE_CACHE is not None: + return _PLAYSTATE_CACHE + path = _playstate_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + else: + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + data = json.loads(raw or "{}") + if isinstance(data, dict): + normalized: dict[str, dict[str, object]] = {} + for key, value in data.items(): + if isinstance(key, str) and isinstance(value, dict): + normalized[key] = dict(value) + _PLAYSTATE_CACHE = normalized + return normalized + except Exception: + pass + _PLAYSTATE_CACHE = {} + return {} + + +def _save_playstate(state: dict[str, dict[str, object]]) -> None: + global _PLAYSTATE_CACHE + _PLAYSTATE_CACHE = state + path = _playstate_path() + try: + payload = json.dumps(state, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + +def _get_playstate(key: str) -> dict[str, object]: + return dict(_load_playstate().get(key, {}) or {}) + + +def _set_playstate(key: str, value: dict[str, object]) -> None: + state = _load_playstate() + if value: + state[key] = dict(value) + else: + state.pop(key, None) + _save_playstate(state) + + +def _apply_playstate_to_info(info_labels: dict[str, object], playstate: dict[str, object]) -> dict[str, object]: + info_labels = dict(info_labels or {}) + watched = bool(playstate.get("watched") or False) + resume_position = playstate.get("resume_position") + resume_total = playstate.get("resume_total") + if watched: + info_labels["playcount"] = 1 + info_labels.pop("resume_position", None) + info_labels.pop("resume_total", None) + else: + try: + pos = int(resume_position) if resume_position is not None else 0 + tot = int(resume_total) if resume_total is not None else 0 + except Exception: + pos, tot = 0, 0 + if pos > 0 and tot > 0: + info_labels["resume_position"] = pos + info_labels["resume_total"] = tot + return info_labels + + +def _time_label(seconds: int) -> str: + try: + seconds = int(seconds or 0) + except Exception: + seconds = 0 + if seconds <= 0: + return "" + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + secs = seconds % 60 + if hours > 0: + return f"{hours:02d}:{minutes:02d}:{secs:02d}" + return f"{minutes:02d}:{secs:02d}" + + +def _label_with_playstate(label: str, playstate: dict[str, object]) -> str: + watched = bool(playstate.get("watched") or False) + if watched: + return f"✓ {label}" + resume_pos = playstate.get("resume_position") + try: + pos = int(resume_pos) if resume_pos is not None else 0 + except Exception: + pos = 0 + if pos > 0: + return f"↩ {_time_label(pos)} {label}" + return label + + +def _title_playstate(plugin_name: str, title: str) -> dict[str, object]: + return _get_playstate(_playstate_key(plugin_name=plugin_name, title=title, season="", episode="")) + + +def _season_playstate(plugin_name: str, title: str, season: str) -> dict[str, object]: + return _get_playstate(_playstate_key(plugin_name=plugin_name, title=title, season=season, episode="")) + + +def _get_setting_string(setting_id: str) -> str: + if xbmcaddon is None: + return "" + addon = _get_addon() + if addon is None: + return "" + getter = getattr(addon, "getSettingString", None) + if callable(getter): + try: + return str(getter(setting_id) or "") + except TypeError: + return "" + getter = getattr(addon, "getSetting", None) + if callable(getter): + try: + return str(getter(setting_id) or "") + except TypeError: + return "" + return "" + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + if xbmcaddon is None: + return default + addon = _get_addon() + if addon is None: + return default + getter = getattr(addon, "getSettingBool", None) + if callable(getter): + # Kodi kann für unbekannte Settings stillschweigend `False` liefern. + # Damit neue Settings mit `default=True` korrekt funktionieren, prüfen wir auf leeren Raw-Value. + raw_getter = getattr(addon, "getSetting", None) + if callable(raw_getter): + try: + raw = str(raw_getter(setting_id) or "").strip() + except TypeError: + raw = "" + if raw == "": + return default + try: + return bool(getter(setting_id)) + except TypeError: + return default + getter = getattr(addon, "getSetting", None) + if callable(getter): + try: + raw = str(getter(setting_id) or "").strip().lower() + except TypeError: + return default + if raw in {"true", "1", "yes", "on"}: + return True + if raw in {"false", "0", "no", "off"}: + return False + return default + + +def _apply_video_info(item, info_labels: dict[str, object] | None, cast: list[TmdbCastMember] | None) -> None: + """Setzt Metadaten bevorzugt via InfoTagVideo (Kodi v20+), mit Fallback auf deprecated APIs.""" + + if not info_labels and not cast: + return + + info_labels = dict(info_labels or {}) + + get_tag = getattr(item, "getVideoInfoTag", None) + tag = None + if callable(get_tag): + try: + tag = get_tag() + except Exception: + tag = None + + if tag is not None: + try: + title = info_labels.get("title") or "" + plot = info_labels.get("plot") or "" + mediatype = info_labels.get("mediatype") or "" + tvshowtitle = info_labels.get("tvshowtitle") or "" + season = info_labels.get("season") + episode = info_labels.get("episode") + rating = info_labels.get("rating") + votes = info_labels.get("votes") + duration = info_labels.get("duration") + playcount = info_labels.get("playcount") + resume_position = info_labels.get("resume_position") + resume_total = info_labels.get("resume_total") + + setter = getattr(tag, "setTitle", None) + if callable(setter) and title: + setter(str(title)) + setter = getattr(tag, "setPlot", None) + if callable(setter) and plot: + setter(str(plot)) + setter = getattr(tag, "setMediaType", None) + if callable(setter) and mediatype: + setter(str(mediatype)) + setter = getattr(tag, "setTvShowTitle", None) + if callable(setter) and tvshowtitle: + setter(str(tvshowtitle)) + setter = getattr(tag, "setSeason", None) + if callable(setter) and season not in (None, "", 0, "0"): + setter(int(season)) # type: ignore[arg-type] + setter = getattr(tag, "setEpisode", None) + if callable(setter) and episode not in (None, "", 0, "0"): + setter(int(episode)) # type: ignore[arg-type] + + if rating not in (None, "", 0, "0"): + try: + rating_f = float(rating) # type: ignore[arg-type] + except Exception: + rating_f = 0.0 + if rating_f: + set_rating = getattr(tag, "setRating", None) + if callable(set_rating): + try: + if votes not in (None, "", 0, "0"): + set_rating(rating_f, int(votes), "tmdb") # type: ignore[misc] + else: + set_rating(rating_f) # type: ignore[misc] + except Exception: + try: + set_rating(rating_f, int(votes or 0), "tmdb", True) # type: ignore[misc] + except Exception: + pass + + if duration not in (None, "", 0, "0"): + try: + duration_i = int(duration) # type: ignore[arg-type] + except Exception: + duration_i = 0 + if duration_i: + set_duration = getattr(tag, "setDuration", None) + if callable(set_duration): + try: + set_duration(duration_i) + except Exception: + pass + + if playcount not in (None, "", 0, "0"): + try: + playcount_i = int(playcount) # type: ignore[arg-type] + except Exception: + playcount_i = 0 + if playcount_i: + set_playcount = getattr(tag, "setPlaycount", None) + if callable(set_playcount): + try: + set_playcount(playcount_i) + except Exception: + pass + + try: + pos = int(resume_position) if resume_position is not None else 0 + tot = int(resume_total) if resume_total is not None else 0 + except Exception: + pos, tot = 0, 0 + if pos > 0 and tot > 0: + set_resume = getattr(tag, "setResumePoint", None) + if callable(set_resume): + try: + set_resume(pos, tot) + except Exception: + try: + set_resume(pos) # type: ignore[misc] + except Exception: + pass + + if cast: + set_cast = getattr(tag, "setCast", None) + actor_cls = getattr(xbmc, "Actor", None) + if callable(set_cast) and actor_cls is not None: + actors = [] + for index, member in enumerate(cast[:30]): + try: + actors.append(actor_cls(member.name, member.role, index, member.thumb)) + except Exception: + try: + actors.append(actor_cls(member.name, member.role)) + except Exception: + continue + try: + set_cast(actors) + except Exception: + pass + elif callable(set_cast): + cast_dicts = [ + {"name": m.name, "role": m.role, "thumbnail": m.thumb} + for m in cast[:30] + if m.name + ] + try: + set_cast(cast_dicts) + except Exception: + pass + + return + except Exception: + # Fallback below + pass + + # Deprecated fallback for older Kodi. + try: + item.setInfo("video", info_labels) # type: ignore[arg-type] + except Exception: + pass + if cast: + set_cast = getattr(item, "setCast", None) + if callable(set_cast): + try: + set_cast([m.name for m in cast[:30] if m.name]) + except Exception: + pass + + +def _get_log_path(filename: str) -> str: + if xbmcaddon and xbmcvfs: + addon = xbmcaddon.Addon() + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + log_dir = os.path.join(profile, "logs") + if not xbmcvfs.exists(log_dir): + xbmcvfs.mkdirs(log_dir) + return os.path.join(log_dir, filename) + return os.path.join(os.path.dirname(__file__), filename) + + +def _tmdb_file_log(message: str) -> None: + global _TMDB_LOG_PATH + if _TMDB_LOG_PATH is None: + _TMDB_LOG_PATH = _get_log_path("tmdb.log") + timestamp = datetime.utcnow().isoformat(timespec="seconds") + "Z" + line = f"{timestamp}\t{message}\n" + try: + with open(_TMDB_LOG_PATH, "a", encoding="utf-8") as handle: + handle.write(line) + except Exception: + if xbmcvfs is None: + return + try: + handle = xbmcvfs.File(_TMDB_LOG_PATH, "a") + handle.write(line) + handle.close() + except Exception: + return + + +def _tmdb_labels_and_art(title: str) -> tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]: + title_key = (title or "").strip().casefold() + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + show_fanart = _get_setting_bool("tmdb_show_fanart", default=True) + show_rating = _get_setting_bool("tmdb_show_rating", default=True) + show_votes = _get_setting_bool("tmdb_show_votes", default=False) + show_cast = _get_setting_bool("tmdb_show_cast", default=False) + flags = f"p{int(show_plot)}a{int(show_art)}f{int(show_fanart)}r{int(show_rating)}v{int(show_votes)}c{int(show_cast)}" + cache_key = f"{language}|{flags}|{title_key}" + cached = _TMDB_CACHE.get(cache_key) + if cached is not None: + info, art = cached + # Cast wird nicht in _TMDB_CACHE gehalten (weil es ListItem.setCast betrifft), daher separat cachen: + cast_cached = _TMDB_CAST_CACHE.get(cache_key, []) + return info, art, list(cast_cached) + + info_labels: dict[str, str] = {"title": title} + art: dict[str, str] = {} + cast: list[TmdbCastMember] = [] + query = (title or "").strip() + api_key = _get_setting_string("tmdb_api_key").strip() + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + if api_key: + try: + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + # Einige Plugins liefern Titel wie "… – Der Film". Für TMDB ist oft der Basistitel besser. + candidates: list[str] = [] + if query: + candidates.append(query) + simplified = re.sub(r"\s*[-–]\s*der\s+film\s*$", "", query, flags=re.IGNORECASE).strip() + if simplified and simplified not in candidates: + candidates.append(simplified) + + meta = None + is_tv = False + for candidate in candidates: + meta = lookup_tv_show( + title=candidate, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + include_cast=show_cast, + ) + if meta: + is_tv = True + break + if not meta: + for candidate in candidates: + movie = lookup_movie( + title=candidate, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + include_cast=show_cast, + ) + if movie: + meta = movie + break + except Exception as exc: + try: + _tmdb_file_log(f"TMDB ERROR lookup_failed title={title!r} error={exc!r}") + except Exception: + pass + _log(f"TMDB Meta fehlgeschlagen: {exc}", xbmc.LOGDEBUG) + meta = None + if meta: + # Nur TV-IDs cachen (für Staffel-/Episoden-Lookups); Movie-IDs würden dort fehlschlagen. + if is_tv: + _TMDB_ID_CACHE[title_key] = int(getattr(meta, "tmdb_id", 0) or 0) + info_labels.setdefault("mediatype", "tvshow") + else: + info_labels.setdefault("mediatype", "movie") + if show_plot and getattr(meta, "plot", ""): + info_labels["plot"] = getattr(meta, "plot", "") + runtime_minutes = int(getattr(meta, "runtime_minutes", 0) or 0) + if runtime_minutes > 0 and not is_tv: + info_labels["duration"] = str(runtime_minutes * 60) + rating = getattr(meta, "rating", 0.0) or 0.0 + votes = getattr(meta, "votes", 0) or 0 + if show_rating and rating: + # Kodi akzeptiert je nach Version float oder string; wir bleiben bei strings wie im restlichen Code. + info_labels["rating"] = str(rating) + if show_votes and votes: + info_labels["votes"] = str(votes) + if show_art and getattr(meta, "poster", ""): + poster = getattr(meta, "poster", "") + art.update({"thumb": poster, "poster": poster, "icon": poster}) + if show_fanart and getattr(meta, "fanart", ""): + fanart = getattr(meta, "fanart", "") + if fanart: + art.update({"fanart": fanart, "landscape": fanart}) + if show_cast: + cast = list(getattr(meta, "cast", []) or []) + elif log_requests or log_responses: + _tmdb_file_log(f"TMDB MISS title={title!r}") + + _TMDB_CACHE[cache_key] = (info_labels, art) + _TMDB_CAST_CACHE[cache_key] = list(cast) + return info_labels, art, list(cast) + + +async def _tmdb_labels_and_art_bulk_async( + titles: list[str], +) -> dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]]: + titles = [str(t).strip() for t in (titles or []) if t and str(t).strip()] + if not titles: + return {} + + unique_titles: list[str] = list(dict.fromkeys(titles)) + limit = _tmdb_prefetch_concurrency() + semaphore = asyncio.Semaphore(limit) + + async def fetch_one(title: str): + async with semaphore: + return title, await asyncio.to_thread(_tmdb_labels_and_art, title) + + tasks = [fetch_one(title) for title in unique_titles] + results = await asyncio.gather(*tasks, return_exceptions=True) + mapped: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + for entry in results: + if isinstance(entry, Exception): + continue + try: + title, payload = entry + except Exception: + continue + if isinstance(title, str) and isinstance(payload, tuple) and len(payload) == 3: + mapped[title] = payload # type: ignore[assignment] + return mapped + + +def _tmdb_labels_and_art_bulk( + titles: list[str], +) -> dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]]: + return _run_async(_tmdb_labels_and_art_bulk_async(titles)) + + +def _tmdb_episode_labels_and_art(*, title: str, season_label: str, episode_label: str) -> tuple[dict[str, str], dict[str, str]]: + title_key = (title or "").strip().casefold() + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + _tmdb_labels_and_art(title) + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + return {"title": episode_label}, {} + + season_number = _extract_first_int(season_label) + episode_number = _extract_first_int(episode_label) + if season_number is None or episode_number is None: + return {"title": episode_label}, {} + + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + flags = f"p{int(show_plot)}a{int(show_art)}" + season_key = (tmdb_id, season_number, language, flags) + cached_season = _TMDB_SEASON_CACHE.get(season_key) + if cached_season is None: + api_key = _get_setting_string("tmdb_api_key").strip() + if not api_key: + return {"title": episode_label}, {} + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + try: + season_meta = lookup_tv_season( + tmdb_id=tmdb_id, + season_number=season_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn(f"TMDB ERROR season_lookup_failed tmdb_id={tmdb_id} season={season_number} error={exc!r}") + season_meta = None + mapped: dict[int, tuple[dict[str, str], dict[str, str]]] = {} + if season_meta: + for ep_no, ep in season_meta.items(): + info: dict[str, str] = {"title": f"Episode {ep_no}"} + if show_plot and ep.plot: + info["plot"] = ep.plot + if getattr(ep, "runtime_minutes", 0): + info["duration"] = str(int(getattr(ep, "runtime_minutes", 0)) * 60) + art: dict[str, str] = {} + if show_art and ep.thumb: + art = {"thumb": ep.thumb} + mapped[ep_no] = (info, art) + _TMDB_SEASON_CACHE[season_key] = mapped + cached_season = mapped + + return cached_season.get(episode_number, ({"title": episode_label}, {})) + + +def _tmdb_episode_cast(*, title: str, season_label: str, episode_label: str) -> list[TmdbCastMember]: + show_episode_cast = _get_setting_bool("tmdb_show_episode_cast", default=False) + if not show_episode_cast: + return [] + + title_key = (title or "").strip().casefold() + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + _tmdb_labels_and_art(title) + tmdb_id = _TMDB_ID_CACHE.get(title_key) + if not tmdb_id: + return [] + + season_number = _extract_first_int(season_label) + episode_number = _extract_first_int(episode_label) + if season_number is None or episode_number is None: + return [] + + language = _get_setting_string("tmdb_language").strip() or "de-DE" + cache_key = (tmdb_id, season_number, episode_number, language) + cached = _TMDB_EPISODE_CAST_CACHE.get(cache_key) + if cached is not None: + return list(cached) + + api_key = _get_setting_string("tmdb_api_key").strip() + if not api_key: + _TMDB_EPISODE_CAST_CACHE[cache_key] = [] + return [] + + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + try: + cast = fetch_tv_episode_credits( + tmdb_id=tmdb_id, + season_number=season_number, + episode_number=episode_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn( + f"TMDB ERROR episode_credits_failed tmdb_id={tmdb_id} season={season_number} episode={episode_number} error={exc!r}" + ) + cast = [] + _TMDB_EPISODE_CAST_CACHE[cache_key] = list(cast) + return list(cast) + + +def _add_directory_item( + handle: int, + label: str, + action: str, + params: dict[str, str] | None = None, + *, + is_folder: bool = True, + info_labels: dict[str, str] | None = None, + art: dict[str, str] | None = None, + cast: list[TmdbCastMember] | None = None, +) -> None: + """Fuegt einen Eintrag (Folder oder Playable) in die Kodi-Liste ein.""" + query: dict[str, str] = {"action": action} + if params: + query.update(params) + url = f"{sys.argv[0]}?{urlencode(query)}" + item = xbmcgui.ListItem(label=label) + if not is_folder: + try: + item.setProperty("IsPlayable", "true") + except Exception: + pass + _apply_video_info(item, info_labels, cast) + if art: + setter = getattr(item, "setArt", None) + if callable(setter): + try: + setter(art) + except Exception: + pass + xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=item, isFolder=is_folder) + + +def _show_root_menu() -> None: + handle = _get_handle() + _log("Root-Menue wird angezeigt.") + _add_directory_item(handle, "Globale Suche", "search") + + plugins = _discover_plugins() + for plugin_name in sorted(plugins.keys(), key=lambda value: value.casefold()): + display = f"{plugin_name}" + _add_directory_item(handle, display, "plugin_menu", {"plugin": plugin_name}, is_folder=True) + + _add_directory_item(handle, "Einstellungen", "settings") + xbmcplugin.endOfDirectory(handle) + + +def _show_plugin_menu(plugin_name: str) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Plugin", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, plugin_name) + + _add_directory_item(handle, "Suche", "plugin_search", {"plugin": plugin_name}, is_folder=True) + + if _plugin_has_capability(plugin, "new_titles"): + _add_directory_item(handle, "Neue Titel", "new_titles", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + if _plugin_has_capability(plugin, "latest_episodes"): + _add_directory_item(handle, "Neueste Folgen", "latest_episodes", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + if _plugin_has_capability(plugin, "genres"): + _add_directory_item(handle, "Genres", "genres", {"plugin": plugin_name}, is_folder=True) + + if _plugin_has_capability(plugin, "popular_series"): + _add_directory_item(handle, "Meist gesehen", "popular", {"plugin": plugin_name, "page": "1"}, is_folder=True) + + xbmcplugin.endOfDirectory(handle) + + +def _show_plugin_search(plugin_name: str) -> None: + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Suche", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + _show_root_menu() + return + + _log(f"Plugin-Suche gestartet: {plugin_name}") + dialog = xbmcgui.Dialog() + query = dialog.input(f"{plugin_name}: Titel eingeben", type=xbmcgui.INPUT_ALPHANUM).strip() + if not query: + _log("Plugin-Suche abgebrochen (leere Eingabe).", xbmc.LOGDEBUG) + _show_plugin_menu(plugin_name) + return + _log(f"Plugin-Suchbegriff ({plugin_name}): {query}", xbmc.LOGDEBUG) + _show_plugin_search_results(plugin_name, query) + + +def _show_plugin_search_results(plugin_name: str, query: str) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + query = (query or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Suche", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, f"{plugin_name}: {query}") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + _log(f"Suche nach Titeln (Plugin={plugin_name}): {query}") + + try: + results = _run_async(plugin.search_titles(query)) + except Exception as exc: + _log(f"Suche fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Suche", "Suche fehlgeschlagen.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + results = [str(t).strip() for t in (results or []) if t and str(t).strip()] + results.sort(key=lambda value: value.casefold()) + tmdb_prefetched: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + if results: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(list(results)) + for title in results: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + merged_info = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool(plugin_name.casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False)) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=merged_info, + art=art, + cast=cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _import_plugin_module(path: Path) -> ModuleType: + spec = importlib.util.spec_from_file_location(path.stem, path) + if spec is None or spec.loader is None: + raise ImportError(f"Modul-Spezifikation fuer {path.name} fehlt.") + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + try: + spec.loader.exec_module(module) + except Exception: + sys.modules.pop(spec.name, None) + raise + return module + + +def _discover_plugins() -> dict[str, BasisPlugin]: + """Laedt alle Plugins aus `plugins/*.py` und cached Instanzen im RAM.""" + global _PLUGIN_CACHE + if _PLUGIN_CACHE is not None: + return _PLUGIN_CACHE + # Plugins werden dynamisch aus `plugins/*.py` geladen, damit Integrationen getrennt + # entwickelt und bei Fehlern isoliert deaktiviert werden koennen. + plugins: dict[str, BasisPlugin] = {} + if not PLUGIN_DIR.exists(): + _PLUGIN_CACHE = plugins + return plugins + for file_path in sorted(PLUGIN_DIR.glob("*.py")): + if file_path.name.startswith("_"): + continue + try: + module = _import_plugin_module(file_path) + except Exception as exc: + xbmc.log(f"Plugin-Datei {file_path.name} konnte nicht geladen werden: {exc}", xbmc.LOGWARNING) + continue + plugin_classes = [ + obj + for obj in module.__dict__.values() + if inspect.isclass(obj) and issubclass(obj, BasisPlugin) and obj is not BasisPlugin + ] + for cls in plugin_classes: + try: + instance = cls() + except Exception as exc: + xbmc.log(f"Plugin {cls.__name__} konnte nicht geladen werden: {exc}", xbmc.LOGWARNING) + continue + if getattr(instance, "is_available", True) is False: + reason = getattr(instance, "unavailable_reason", "Nicht verfuegbar.") + xbmc.log(f"Plugin {cls.__name__} deaktiviert: {reason}", xbmc.LOGWARNING) + continue + plugins[instance.name] = instance + _PLUGIN_CACHE = plugins + return plugins + + +def _run_async(coro): + """Fuehrt eine Coroutine aus, auch wenn Kodi bereits einen Event-Loop hat.""" + try: + loop = asyncio.get_event_loop() + except RuntimeError: + loop = None + if loop and loop.is_running(): + temp_loop = asyncio.new_event_loop() + try: + return temp_loop.run_until_complete(coro) + finally: + temp_loop.close() + return asyncio.run(coro) + + +def _show_search() -> None: + _log("Suche gestartet.") + dialog = xbmcgui.Dialog() + query = dialog.input("Serientitel eingeben", type=xbmcgui.INPUT_ALPHANUM).strip() + if not query: + _log("Suche abgebrochen (leere Eingabe).", xbmc.LOGDEBUG) + _show_root_menu() + return + _log(f"Suchbegriff: {query}", xbmc.LOGDEBUG) + _show_search_results(query) + + +def _show_search_results(query: str) -> None: + handle = _get_handle() + _log(f"Suche nach Titeln: {query}") + _set_content(handle, "tvshows") + plugins = _discover_plugins() + if not plugins: + xbmcgui.Dialog().notification("Suche", "Keine Plugins gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + for plugin_name, plugin in plugins.items(): + try: + results = _run_async(plugin.search_titles(query)) + except Exception as exc: + _log(f"Suche fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + continue + _log(f"Treffer ({plugin_name}): {len(results)}", xbmc.LOGDEBUG) + tmdb_prefetched: dict[str, tuple[dict[str, str], dict[str, str], list[TmdbCastMember]]] = {} + if results: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(list(results)) + for title in results: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + merged_info = _apply_playstate_to_info(dict(info_labels), playstate) + label = _label_with_duration(title, info_labels) + label = _label_with_playstate(label, playstate) + label = f"{label} [{plugin_name}]" + direct_play = bool( + plugin_name.casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=merged_info, + art=art, + cast=cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_seasons(plugin_name: str, title: str) -> None: + handle = _get_handle() + _log(f"Staffeln laden: {plugin_name} / {title}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Staffeln", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + # Einschalten liefert Filme. Für Playback soll nach dem Öffnen des Titels direkt ein + # einzelnes abspielbares Item angezeigt werden: -> ( abspielbar). + # Wichtig: ohne zusätzliche Netzwerkanfragen (sonst bleibt Kodi ggf. im Busy-Spinner hängen). + if (plugin_name or "").casefold() == "einschalten" and _get_setting_bool("einschalten_enable_playback", default=False): + xbmcplugin.setPluginCategory(handle, title) + _set_content(handle, "movies") + playstate = _title_playstate(plugin_name, title) + info_labels: dict[str, object] = {"title": title, "mediatype": "movie"} + info_labels = _apply_playstate_to_info(info_labels, playstate) + display_label = _label_with_playstate(title, playstate) + _add_directory_item( + handle, + display_label, + "play_movie", + {"plugin": plugin_name, "title": title}, + is_folder=False, + info_labels=info_labels, + ) + xbmcplugin.endOfDirectory(handle) + return + + # Optional: Plugins können schnell (ohne Detail-Request) sagen, ob ein Titel ein Film ist. + # Dann zeigen wir direkt ein einzelnes abspielbares Item: -> (). + is_movie = getattr(plugin, "is_movie", None) + if callable(is_movie): + try: + if bool(is_movie(title)): + xbmcplugin.setPluginCategory(handle, title) + _set_content(handle, "movies") + playstate = _title_playstate(plugin_name, title) + info_labels: dict[str, object] = {"title": title, "mediatype": "movie"} + info_labels = _apply_playstate_to_info(info_labels, playstate) + display_label = _label_with_playstate(title, playstate) + _add_directory_item( + handle, + display_label, + "play_movie", + {"plugin": plugin_name, "title": title}, + is_folder=False, + info_labels=info_labels, + ) + xbmcplugin.endOfDirectory(handle) + return + except Exception: + pass + + title_info_labels: dict[str, str] | None = None + title_art: dict[str, str] | None = None + title_cast: list[TmdbCastMember] | None = None + meta_getter = getattr(plugin, "metadata_for", None) + if callable(meta_getter): + try: + with _busy_dialog(): + meta_labels, meta_art, meta_cast = meta_getter(title) + if isinstance(meta_labels, dict): + title_info_labels = {str(k): str(v) for k, v in meta_labels.items() if v} + if isinstance(meta_art, dict): + title_art = {str(k): str(v) for k, v in meta_art.items() if v} + if isinstance(meta_cast, list): + # type: ignore[assignment] - plugins may return cast in their own shape; best-effort only + title_cast = meta_cast # noqa: PGH003 + except Exception: + pass + + try: + seasons = plugin.seasons_for(title) + except Exception as exc: + _log(f"Staffeln laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Staffeln", "Konnte Staffeln nicht laden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + count = len(seasons) + suffix = "Staffel" if count == 1 else "Staffeln" + xbmcplugin.setPluginCategory(handle, f"{title} ({count} {suffix})") + _set_content(handle, "seasons") + # Staffel-Metadaten (Plot/Poster) optional via TMDB. + _tmdb_labels_and_art(title) + api_key = _get_setting_string("tmdb_api_key").strip() + language = _get_setting_string("tmdb_language").strip() or "de-DE" + show_plot = _get_setting_bool("tmdb_show_plot", default=True) + show_art = _get_setting_bool("tmdb_show_art", default=True) + flags = f"p{int(show_plot)}a{int(show_art)}" + log_requests = _get_setting_bool("tmdb_log_requests", default=False) + log_responses = _get_setting_bool("tmdb_log_responses", default=False) + log_fn = _tmdb_file_log if (log_requests or log_responses) else None + for season in seasons: + info_labels: dict[str, str] | None = None + art: dict[str, str] | None = None + season_number = _extract_first_int(season) + if api_key and season_number is not None: + cache_key = (_TMDB_ID_CACHE.get((title or "").strip().casefold(), 0), season_number, language, flags) + cached = _TMDB_SEASON_SUMMARY_CACHE.get(cache_key) + if cached is None and cache_key[0]: + try: + meta = lookup_tv_season_summary( + tmdb_id=cache_key[0], + season_number=season_number, + api_key=api_key, + language=language, + log=log_fn, + log_responses=log_responses, + ) + except Exception as exc: + if log_fn: + log_fn(f"TMDB ERROR season_summary_failed tmdb_id={cache_key[0]} season={season_number} error={exc!r}") + meta = None + labels = {"title": season} + art_map: dict[str, str] = {} + if meta: + if show_plot and meta.plot: + labels["plot"] = meta.plot + if show_art and meta.poster: + art_map = {"thumb": meta.poster, "poster": meta.poster} + cached = (labels, art_map) + _TMDB_SEASON_SUMMARY_CACHE[cache_key] = cached + if cached is not None: + info_labels, art = cached + merged_labels = dict(info_labels or {}) + if title_info_labels: + merged_labels = dict(title_info_labels) + merged_labels.update(dict(info_labels or {})) + season_state = _season_playstate(plugin_name, title, season) + merged_labels = _apply_playstate_to_info(dict(merged_labels), season_state) + merged_art: dict[str, str] | None = art + if title_art: + merged_art = dict(title_art) + if isinstance(art, dict): + merged_art.update({k: str(v) for k, v in art.items() if v}) + + _add_directory_item( + handle, + _label_with_playstate(season, season_state), + "episodes", + {"plugin": plugin_name, "title": title, "season": season}, + is_folder=True, + info_labels=merged_labels or None, + art=merged_art, + cast=title_cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_episodes(plugin_name: str, title: str, season: str) -> None: + handle = _get_handle() + _log(f"Episoden laden: {plugin_name} / {title} / {season}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Episoden", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + season_number = _extract_first_int(season) + if season_number is not None: + xbmcplugin.setPluginCategory(handle, f"{title} - Staffel {season_number}") + else: + xbmcplugin.setPluginCategory(handle, f"{title} - {season}") + _set_content(handle, "episodes") + + episodes = list(plugin.episodes_for(title, season)) + if episodes: + show_info, show_art, show_cast = _tmdb_labels_and_art(title) + show_fanart = (show_art or {}).get("fanart") if isinstance(show_art, dict) else "" + show_poster = (show_art or {}).get("poster") if isinstance(show_art, dict) else "" + with _busy_dialog(): + for episode in episodes: + info_labels, art = _tmdb_episode_labels_and_art(title=title, season_label=season, episode_label=episode) + episode_cast = _tmdb_episode_cast(title=title, season_label=season, episode_label=episode) + merged_info = dict(show_info or {}) + merged_info.update(dict(info_labels or {})) + merged_art: dict[str, str] = {} + if isinstance(show_art, dict): + merged_art.update({k: str(v) for k, v in show_art.items() if v}) + if isinstance(art, dict): + merged_art.update({k: str(v) for k, v in art.items() if v}) + + # Kodi Info-Dialog für Episoden hängt oft an diesen Feldern. + season_number = _extract_first_int(season) or 0 + episode_number = _extract_first_int(episode) or 0 + merged_info.setdefault("mediatype", "episode") + merged_info.setdefault("tvshowtitle", title) + if season_number: + merged_info.setdefault("season", str(season_number)) + if episode_number: + merged_info.setdefault("episode", str(episode_number)) + + # Episode-Items ohne eigenes Artwork: Fanart/Poster vom Titel durchreichen. + if show_fanart: + merged_art.setdefault("fanart", show_fanart) + merged_art.setdefault("landscape", show_fanart) + if show_poster: + merged_art.setdefault("poster", show_poster) + + key = _playstate_key(plugin_name=plugin_name, title=title, season=season, episode=episode) + merged_info = _apply_playstate_to_info(merged_info, _get_playstate(key)) + + display_label = episode + _add_directory_item( + handle, + display_label, + "play_episode", + {"plugin": plugin_name, "title": title, "season": season, "episode": episode}, + is_folder=False, + info_labels=merged_info, + art=merged_art, + cast=episode_cast or show_cast, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_sources() -> None: + handle = _get_handle() + _log("Genre-Quellen laden.") + plugins = _discover_plugins() + sources: list[tuple[str, BasisPlugin]] = [] + for plugin_name, plugin in plugins.items(): + if plugin.__class__.genres is BasisPlugin.genres: + continue + if plugin.__class__.titles_for_genre is BasisPlugin.titles_for_genre: + continue + sources.append((plugin_name, plugin)) + + if not sources: + xbmcgui.Dialog().notification("Genres", "Keine Genre-Quellen gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + for plugin_name, plugin in sources: + _add_directory_item( + handle, + f"Genres [{plugin_name}]", + "genres", + {"plugin": plugin_name}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genres(plugin_name: str) -> None: + handle = _get_handle() + _log(f"Genres laden: {plugin_name}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Genres", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + try: + genres = plugin.genres() + except Exception as exc: + _log(f"Genres konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Genres konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + for genre in genres: + # Wenn Plugin Paging unterstützt, direkt paginierte Titelliste öffnen. + paging_getter = getattr(plugin, "titles_for_genre_page", None) + if callable(paging_getter): + _add_directory_item( + handle, + genre, + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": "1"}, + is_folder=True, + ) + continue + _add_directory_item( + handle, + genre, + "genre_series", + {"plugin": plugin_name, "genre": genre}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_titles_page(plugin_name: str, genre: str, page: int = 1) -> None: + handle = _get_handle() + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Genres", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + page = max(1, int(page or 1)) + paging_getter = getattr(plugin, "titles_for_genre_page", None) + if not callable(paging_getter): + xbmcgui.Dialog().notification("Genres", "Paging nicht verfügbar.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + total_pages = None + count_getter = getattr(plugin, "genre_page_count", None) + if callable(count_getter): + try: + total_pages = int(count_getter(genre) or 1) + except Exception: + total_pages = None + if total_pages is not None: + page = min(page, max(1, total_pages)) + xbmcplugin.setPluginCategory(handle, f"{genre} ({page}/{total_pages})") + else: + xbmcplugin.setPluginCategory(handle, f"{genre} ({page})") + _set_content(handle, "movies" if (plugin_name or "").casefold() == "einschalten" else "tvshows") + + if page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": str(page - 1)}, + is_folder=True, + ) + + try: + titles = list(paging_getter(genre, page) or []) + except Exception as exc: + _log(f"Genre-Seite konnte nicht geladen werden ({plugin_name}/{genre} p{page}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Seite konnte nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if titles: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(titles) + for title in titles: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in titles: + playstate = _title_playstate(plugin_name, title) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + show_next = False + if total_pages is not None: + show_next = page < total_pages + else: + has_more_getter = getattr(plugin, "genre_has_more", None) + if callable(has_more_getter): + try: + show_next = bool(has_more_getter(genre, page)) + except Exception: + show_next = False + + if show_next: + _add_directory_item( + handle, + "Nächste Seite", + "genre_titles_page", + {"plugin": plugin_name, "genre": genre, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _title_group_key(title: str) -> str: + raw = (title or "").strip() + if not raw: + return "#" + for char in raw: + if char.isdigit(): + return "0-9" + if char.isalpha(): + normalized = char.casefold() + if normalized == "ä": + normalized = "a" + elif normalized == "ö": + normalized = "o" + elif normalized == "ü": + normalized = "u" + elif normalized == "ß": + normalized = "s" + return normalized.upper() + return "#" + + +def _genre_title_groups() -> list[tuple[str, str]]: + return [ + ("A-E", "A-E"), + ("F-J", "F-J"), + ("K-O", "K-O"), + ("P-T", "P-T"), + ("U-Z", "U-Z"), + ("0-9", "0-9"), + ] + + +def _group_matches(group_code: str, title: str) -> bool: + key = _title_group_key(title) + if group_code == "0-9": + return key == "0-9" + if key == "0-9" or key == "#": + return False + if group_code == "A-E": + return "A" <= key <= "E" + if group_code == "F-J": + return "F" <= key <= "J" + if group_code == "K-O": + return "K" <= key <= "O" + if group_code == "P-T": + return "P" <= key <= "T" + if group_code == "U-Z": + return "U" <= key <= "Z" + return False + + +def _get_genre_titles(plugin_name: str, genre: str) -> list[str]: + cache_key = (plugin_name, genre) + cached = _GENRE_TITLES_CACHE.get(cache_key) + if cached is not None: + return list(cached) + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + return [] + titles = plugin.titles_for_genre(genre) + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + _GENRE_TITLES_CACHE[cache_key] = list(titles) + return list(titles) + + +def _show_genre_series(plugin_name: str, genre: str) -> None: + handle = _get_handle() + xbmcplugin.setPluginCategory(handle, genre) + for label, group_code in _genre_title_groups(): + _add_directory_item( + handle, + label, + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _parse_positive_int(value: str, *, default: int = 1) -> int: + try: + parsed = int(str(value or "").strip()) + except Exception: + return default + return parsed if parsed > 0 else default + + +def _popular_genre_label(plugin: BasisPlugin) -> str | None: + label = getattr(plugin, "POPULAR_GENRE_LABEL", None) + if isinstance(label, str) and label.strip(): + return label.strip() + return None + + +def _plugin_has_capability(plugin: BasisPlugin, capability: str) -> bool: + getter = getattr(plugin, "capabilities", None) + if callable(getter): + try: + capabilities = getter() + except Exception: + capabilities = set() + try: + return capability in set(capabilities or []) + except Exception: + return False + # Backwards compatibility: Popular via POPULAR_GENRE_LABEL constant. + if capability == "popular_series": + return _popular_genre_label(plugin) is not None + return False + + +def _plugins_with_popular() -> list[tuple[str, BasisPlugin, str]]: + results: list[tuple[str, BasisPlugin, str]] = [] + for plugin_name, plugin in _discover_plugins().items(): + if not _plugin_has_capability(plugin, "popular_series"): + continue + label = _popular_genre_label(plugin) or "" + results.append((plugin_name, plugin, label)) + return results + + +def _show_popular(plugin_name: str | None = None, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + if plugin_name: + plugin = _discover_plugins().get(plugin_name) + if plugin is None or not _plugin_has_capability(plugin, "popular_series"): + xbmcgui.Dialog().notification("Beliebte Serien", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + try: + popular_getter = getattr(plugin, "popular_series", None) + if callable(popular_getter): + titles = list(popular_getter() or []) + else: + label = _popular_genre_label(plugin) + if not label: + titles = [] + else: + titles = list(plugin.titles_for_genre(label) or []) + except Exception as exc: + _log(f"Beliebte Serien konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Beliebte Serien", "Serien konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + total = len(titles) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"Beliebte Serien [{plugin_name}] ({page}/{total_pages})") + _set_content(handle, "tvshows") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "popular", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = titles[start:end] + + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + _add_directory_item( + handle, + display_label, + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + if total_pages > 1 and page < total_pages: + _add_directory_item( + handle, + "Nächste Seite", + "popular", + {"plugin": plugin_name, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + return + + sources = _plugins_with_popular() + if not sources: + xbmcgui.Dialog().notification("Beliebte Serien", "Keine Quellen gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, "Beliebte Serien") + for name, plugin, _label in sources: + _add_directory_item( + handle, + f"Beliebte Serien [{plugin.name}]", + "popular", + {"plugin": name, "page": "1"}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_new_titles(plugin_name: str, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if plugin is None or not _plugin_has_capability(plugin, "new_titles"): + xbmcgui.Dialog().notification("Neue Titel", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + getter = getattr(plugin, "new_titles", None) + if not callable(getter): + xbmcgui.Dialog().notification("Neue Titel", "Nicht verfügbar.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + paging_getter = getattr(plugin, "new_titles_page", None) + has_more_getter = getattr(plugin, "new_titles_has_more", None) + + if callable(paging_getter): + xbmcplugin.setPluginCategory(handle, f"Neue Titel [{plugin_name}] ({page})") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + if page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + try: + page_items = list(paging_getter(page) or []) + except Exception as exc: + _log(f"Neue Titel konnten nicht geladen werden ({plugin_name} p{page}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neue Titel", "Titel konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + page_items = [str(t).strip() for t in page_items if t and str(t).strip()] + page_items.sort(key=lambda value: value.casefold()) + else: + try: + titles = list(getter() or []) + except Exception as exc: + _log(f"Neue Titel konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neue Titel", "Titel konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + titles = [str(t).strip() for t in titles if t and str(t).strip()] + titles.sort(key=lambda value: value.casefold()) + total = len(titles) + if total == 0: + xbmcgui.Dialog().notification( + "Neue Titel", + "Keine Titel gefunden (Basis-URL/Index prüfen).", + xbmcgui.NOTIFICATION_INFO, + 4000, + ) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"Neue Titel [{plugin_name}] ({page}/{total_pages})") + _set_content(handle, "movies" if plugin_name.casefold() == "einschalten" else "tvshows") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = titles[start:end] + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "movie") + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + display_label, + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + direct_play = bool( + plugin_name.casefold() == "einschalten" + and _get_setting_bool("einschalten_enable_playback", default=False) + ) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "play_movie" if direct_play else "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=not direct_play, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + show_next = False + if callable(paging_getter) and callable(has_more_getter): + try: + show_next = bool(has_more_getter(page)) + except Exception: + show_next = False + elif "total_pages" in locals(): + show_next = bool(total_pages > 1 and page < total_pages) # type: ignore[name-defined] + + if show_next: + _add_directory_item( + handle, + "Nächste Seite", + "new_titles", + {"plugin": plugin_name, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + + +def _show_latest_episodes(plugin_name: str, page: int = 1) -> None: + handle = _get_handle() + plugin_name = (plugin_name or "").strip() + plugin = _discover_plugins().get(plugin_name) + if not plugin: + xbmcgui.Dialog().notification("Neueste Folgen", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + getter = getattr(plugin, "latest_episodes", None) + if not callable(getter): + xbmcgui.Dialog().notification("Neueste Folgen", "Nicht unterstützt.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + xbmcplugin.setPluginCategory(handle, f"{plugin_name}: Neueste Folgen") + _set_content(handle, "episodes") + + try: + with _busy_dialog(): + entries = list(getter(page) or []) + except Exception as exc: + _log(f"Neueste Folgen fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Neueste Folgen", "Abruf fehlgeschlagen.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + for entry in entries: + try: + title = str(getattr(entry, "series_title", "") or "").strip() + season_number = int(getattr(entry, "season", 0) or 0) + episode_number = int(getattr(entry, "episode", 0) or 0) + url = str(getattr(entry, "url", "") or "").strip() + airdate = str(getattr(entry, "airdate", "") or "").strip() + except Exception: + continue + if not title or not url or season_number < 0 or episode_number <= 0: + continue + + season_label = f"Staffel {season_number}" + episode_label = f"Episode {episode_number}" + key = _playstate_key(plugin_name=plugin_name, title=title, season=season_label, episode=episode_label) + playstate = _get_playstate(key) + + label = f"{title} - S{season_number:02d}E{episode_number:02d}" + if airdate: + label = f"{label} ({airdate})" + label = _label_with_playstate(label, playstate) + + info_labels: dict[str, object] = { + "title": f"{title} - S{season_number:02d}E{episode_number:02d}", + "tvshowtitle": title, + "season": season_number, + "episode": episode_number, + "mediatype": "episode", + } + info_labels = _apply_playstate_to_info(info_labels, playstate) + + _add_directory_item( + handle, + label, + "play_episode_url", + { + "plugin": plugin_name, + "title": title, + "season": str(season_number), + "episode": str(episode_number), + "url": url, + }, + is_folder=False, + info_labels=info_labels, + ) + + xbmcplugin.endOfDirectory(handle) + + +def _show_genre_series_group(plugin_name: str, genre: str, group_code: str, page: int = 1) -> None: + handle = _get_handle() + page_size = 10 + page = max(1, int(page or 1)) + + try: + titles = _get_genre_titles(plugin_name, genre) + except Exception as exc: + _log(f"Genre-Serien konnten nicht geladen werden ({plugin_name}): {exc}", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Genres", "Serien konnten nicht geladen werden.", xbmcgui.NOTIFICATION_INFO, 3000) + xbmcplugin.endOfDirectory(handle) + return + + filtered = [title for title in titles if _group_matches(group_code, title)] + total = len(filtered) + total_pages = max(1, (total + page_size - 1) // page_size) + page = min(page, total_pages) + xbmcplugin.setPluginCategory(handle, f"{genre} [{group_code}] ({page}/{total_pages})") + + if total_pages > 1 and page > 1: + _add_directory_item( + handle, + "Vorherige Seite", + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code, "page": str(page - 1)}, + is_folder=True, + ) + + start = (page - 1) * page_size + end = start + page_size + page_items = filtered[start:end] + show_tmdb = _get_setting_bool("tmdb_genre_metadata", default=False) + + if page_items: + if show_tmdb: + with _busy_dialog(): + tmdb_prefetched = _tmdb_labels_and_art_bulk(page_items) + for title in page_items: + info_labels, art, cast = tmdb_prefetched.get(title, _tmdb_labels_and_art(title)) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "tvshow") + if (info_labels.get("mediatype") or "").strip().casefold() == "tvshow": + info_labels.setdefault("tvshowtitle", title) + playstate = _title_playstate(plugin_name, title) + info_labels = _apply_playstate_to_info(dict(info_labels), playstate) + display_label = _label_with_duration(title, info_labels) + display_label = _label_with_playstate(display_label, playstate) + _add_directory_item( + handle, + display_label, + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=info_labels, + art=art, + cast=cast, + ) + else: + for title in page_items: + playstate = _title_playstate(plugin_name, title) + _add_directory_item( + handle, + _label_with_playstate(title, playstate), + "seasons", + {"plugin": plugin_name, "title": title}, + is_folder=True, + info_labels=_apply_playstate_to_info({"title": title}, playstate), + ) + + if total_pages > 1 and page < total_pages: + _add_directory_item( + handle, + "Nächste Seite", + "genre_series_group", + {"plugin": plugin_name, "genre": genre, "group": group_code, "page": str(page + 1)}, + is_folder=True, + ) + xbmcplugin.endOfDirectory(handle) + +def _open_settings() -> None: + """Oeffnet das Kodi-Addon-Settings-Dialog.""" + if xbmcaddon is None: # pragma: no cover - outside Kodi + raise RuntimeError("xbmcaddon ist nicht verfuegbar (KodiStub).") + addon = xbmcaddon.Addon() + addon.openSettings() + + +def _extract_first_int(value: str) -> int | None: + match = re.search(r"(\d+)", value or "") + if not match: + return None + try: + return int(match.group(1)) + except Exception: + return None + + +def _duration_label(duration_seconds: int) -> str: + try: + duration_seconds = int(duration_seconds or 0) + except Exception: + duration_seconds = 0 + if duration_seconds <= 0: + return "" + total_minutes = max(0, duration_seconds // 60) + hours = max(0, total_minutes // 60) + minutes = max(0, total_minutes % 60) + return f"{hours:02d}:{minutes:02d} Laufzeit" + + +def _label_with_duration(label: str, info_labels: dict[str, str] | None) -> str: + return label + + +def _play_final_link( + link: str, + *, + display_title: str | None = None, + info_labels: dict[str, str] | None = None, + art: dict[str, str] | None = None, + cast: list[TmdbCastMember] | None = None, + resolve_handle: int | None = None, +) -> None: + list_item = xbmcgui.ListItem(label=display_title or "", path=link) + try: + list_item.setProperty("IsPlayable", "true") + except Exception: + pass + merged_info: dict[str, object] = dict(info_labels or {}) + if display_title: + merged_info["title"] = display_title + _apply_video_info(list_item, merged_info, cast) + if art: + setter = getattr(list_item, "setArt", None) + if callable(setter): + try: + setter(art) + except Exception: + pass + + # Bei Plugin-Play-Items sollte Kodi via setResolvedUrl() die Wiedergabe starten. + # player.play() kann dazu führen, dass Kodi den Item-Callback nochmal triggert (Hoster-Auswahl doppelt). + resolved = False + if resolve_handle is not None: + resolver = getattr(xbmcplugin, "setResolvedUrl", None) + if callable(resolver): + try: + resolver(resolve_handle, True, list_item) + resolved = True + except Exception: + pass + + if not resolved: + player = xbmc.Player() + player.play(item=link, listitem=list_item) + + +def _track_playback_and_update_state(key: str) -> None: + if not key: + return + monitor = xbmc.Monitor() if xbmc is not None and hasattr(xbmc, "Monitor") else None + player = xbmc.Player() + + # Wait for playback start. + started = False + for _ in range(30): + try: + if player.isPlayingVideo(): + started = True + break + except Exception: + pass + if monitor and monitor.waitForAbort(0.5): + return + if not started: + return + + last_pos = 0.0 + total = 0.0 + while True: + try: + if not player.isPlayingVideo(): + break + last_pos = float(player.getTime() or 0.0) + total = float(player.getTotalTime() or 0.0) + except Exception: + pass + if monitor and monitor.waitForAbort(1.0): + return + + if total <= 0.0: + return + percent = max(0.0, min(1.0, last_pos / total)) + state: dict[str, object] = {"last_position": int(last_pos), "resume_total": int(total), "percent": percent} + if percent >= WATCHED_THRESHOLD: + state["watched"] = True + state["resume_position"] = 0 + elif last_pos > 0: + state["watched"] = False + state["resume_position"] = int(last_pos) + _set_playstate(key, state) + + # Zusätzlich aggregiert speichern, damit Titel-/Staffel-Listen "gesehen/fortsetzen" + # anzeigen können (für Filme/Serien gleichermaßen). + try: + parts = str(key).split("\t") + if len(parts) == 4: + plugin_name, title, season, _episode = parts + plugin_name = (plugin_name or "").strip() + title = (title or "").strip() + season = (season or "").strip() + if plugin_name and title: + _set_playstate(_playstate_key(plugin_name=plugin_name, title=title, season="", episode=""), state) + if season: + _set_playstate(_playstate_key(plugin_name=plugin_name, title=title, season=season, episode=""), state) + except Exception: + pass + + +def _play_episode( + plugin_name: str, + title: str, + season: str, + episode: str, + *, + resolve_handle: int | None = None, +) -> None: + _log(f"Play anfordern: {plugin_name} / {title} / {season} / {episode}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Play", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + + available_hosters: list[str] = [] + hoster_getter = getattr(plugin, "available_hosters_for", None) + if callable(hoster_getter): + try: + with _busy_dialog(): + available_hosters = list(hoster_getter(title, season, episode) or []) + except Exception as exc: + _log(f"Hoster laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + + selected_hoster: str | None = None + if available_hosters: + if len(available_hosters) == 1: + selected_hoster = available_hosters[0] + else: + selected_index = xbmcgui.Dialog().select("Hoster wählen", available_hosters) + if selected_index is None or selected_index < 0: + _log("Play abgebrochen (kein Hoster gewählt).", xbmc.LOGDEBUG) + return + selected_hoster = available_hosters[selected_index] + + # Manche Plugins erlauben (optional) eine temporaere Einschränkung auf einen Hoster. + preferred_setter = getattr(plugin, "set_preferred_hosters", None) + restore_hosters: list[str] | None = None + if selected_hoster and callable(preferred_setter): + current = getattr(plugin, "_preferred_hosters", None) + if isinstance(current, list): + restore_hosters = list(current) + preferred_setter([selected_hoster]) + + try: + link = plugin.stream_link_for(title, season, episode) + if not link: + _log("Kein Stream-Link gefunden.", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Play", "Kein Stream-Link gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + _log(f"Stream-Link: {link}", xbmc.LOGDEBUG) + final_link = plugin.resolve_stream_link(link) or link + finally: + if restore_hosters is not None and callable(preferred_setter): + preferred_setter(restore_hosters) + + _log(f"Finaler Link: {final_link}", xbmc.LOGDEBUG) + season_number = _extract_first_int(season) + episode_number = _extract_first_int(episode) + if season_number is not None and episode_number is not None: + display_title = f"{title} - S{season_number:02d}E{episode_number:02d}" + else: + display_title = title + info_labels, art, cast = _tmdb_labels_and_art(title) + display_title = _label_with_duration(display_title, info_labels) + _play_final_link( + final_link, + display_title=display_title, + info_labels=info_labels, + art=art, + cast=cast, + resolve_handle=resolve_handle, + ) + _track_playback_and_update_state( + _playstate_key(plugin_name=plugin_name, title=title, season=season, episode=episode) + ) + + +def _play_episode_url( + plugin_name: str, + *, + title: str, + season_number: int, + episode_number: int, + episode_url: str, + resolve_handle: int | None = None, +) -> None: + season_label = f"Staffel {season_number}" if season_number > 0 else "" + episode_label = f"Episode {episode_number}" if episode_number > 0 else "" + _log(f"Play (URL) anfordern: {plugin_name} / {title} / {season_label} / {episode_label} / {episode_url}") + plugin = _discover_plugins().get(plugin_name) + if plugin is None: + xbmcgui.Dialog().notification("Play", "Plugin nicht gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + + available_hosters: list[str] = [] + hoster_getter = getattr(plugin, "available_hosters_for_url", None) + if callable(hoster_getter): + try: + with _busy_dialog(): + available_hosters = list(hoster_getter(episode_url) or []) + except Exception as exc: + _log(f"Hoster laden fehlgeschlagen ({plugin_name}): {exc}", xbmc.LOGWARNING) + + selected_hoster: str | None = None + if available_hosters: + if len(available_hosters) == 1: + selected_hoster = available_hosters[0] + else: + selected_index = xbmcgui.Dialog().select("Hoster wählen", available_hosters) + if selected_index is None or selected_index < 0: + _log("Play abgebrochen (kein Hoster gewählt).", xbmc.LOGDEBUG) + return + selected_hoster = available_hosters[selected_index] + + preferred_setter = getattr(plugin, "set_preferred_hosters", None) + restore_hosters: list[str] | None = None + if selected_hoster and callable(preferred_setter): + current = getattr(plugin, "_preferred_hosters", None) + if isinstance(current, list): + restore_hosters = list(current) + preferred_setter([selected_hoster]) + + try: + link_getter = getattr(plugin, "stream_link_for_url", None) + if not callable(link_getter): + xbmcgui.Dialog().notification("Play", "Nicht unterstützt.", xbmcgui.NOTIFICATION_INFO, 3000) + return + link = link_getter(episode_url) + if not link: + _log("Kein Stream-Link gefunden.", xbmc.LOGWARNING) + xbmcgui.Dialog().notification("Play", "Kein Stream-Link gefunden.", xbmcgui.NOTIFICATION_INFO, 3000) + return + _log(f"Stream-Link: {link}", xbmc.LOGDEBUG) + final_link = plugin.resolve_stream_link(link) or link + finally: + if restore_hosters is not None and callable(preferred_setter): + preferred_setter(restore_hosters) + + display_title = f"{title} - S{season_number:02d}E{episode_number:02d}" if season_number and episode_number else title + info_labels, art, cast = _tmdb_labels_and_art(title) + info_labels = dict(info_labels or {}) + info_labels.setdefault("mediatype", "episode") + info_labels.setdefault("tvshowtitle", title) + if season_number > 0: + info_labels["season"] = str(season_number) + if episode_number > 0: + info_labels["episode"] = str(episode_number) + display_title = _label_with_duration(display_title, info_labels) + _play_final_link( + final_link, + display_title=display_title, + info_labels=info_labels, + art=art, + cast=cast, + resolve_handle=resolve_handle, + ) + _track_playback_and_update_state( + _playstate_key(plugin_name=plugin_name, title=title, season=season_label, episode=episode_label) + ) + + +def _parse_params() -> dict[str, str]: + """Parst Kodi-Plugin-Parameter aus `sys.argv[2]`.""" + if len(sys.argv) <= 2 or not sys.argv[2]: + return {} + raw_params = parse_qs(sys.argv[2].lstrip("?"), keep_blank_values=True) + return {key: values[0] for key, values in raw_params.items()} + + +def run() -> None: + params = _parse_params() + action = params.get("action") + _log(f"Action: {action}", xbmc.LOGDEBUG) + if action == "search": + _show_search() + elif action == "plugin_menu": + _show_plugin_menu(params.get("plugin", "")) + elif action == "plugin_search": + _show_plugin_search(params.get("plugin", "")) + elif action == "genre_sources": + _show_genre_sources() + elif action == "genres": + _show_genres(params.get("plugin", "")) + elif action == "new_titles": + _show_new_titles( + params.get("plugin", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "latest_episodes": + _show_latest_episodes( + params.get("plugin", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "genre_series": + _show_genre_series( + params.get("plugin", ""), + params.get("genre", ""), + ) + elif action == "genre_titles_page": + _show_genre_titles_page( + params.get("plugin", ""), + params.get("genre", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "genre_series_group": + _show_genre_series_group( + params.get("plugin", ""), + params.get("genre", ""), + params.get("group", ""), + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "popular": + _show_popular( + params.get("plugin") or None, + _parse_positive_int(params.get("page", "1"), default=1), + ) + elif action == "settings": + _open_settings() + elif action == "seasons": + _show_seasons(params.get("plugin", ""), params.get("title", "")) + elif action == "episodes": + _show_episodes( + params.get("plugin", ""), + params.get("title", ""), + params.get("season", ""), + ) + elif action == "play_episode": + _play_episode( + params.get("plugin", ""), + params.get("title", ""), + params.get("season", ""), + params.get("episode", ""), + resolve_handle=_get_handle(), + ) + elif action == "play_movie": + plugin_name = params.get("plugin", "") + title = params.get("title", "") + # Einschalten liefert Filme (keine Staffeln/Episoden). Für Playback nutzen wir: + # -> Stream -> . + if (plugin_name or "").casefold() == "einschalten": + _play_episode( + plugin_name, + title, + "Stream", + title, + resolve_handle=_get_handle(), + ) + else: + _play_episode( + plugin_name, + title, + "Film", + "Stream", + resolve_handle=_get_handle(), + ) + elif action == "play_episode_url": + _play_episode_url( + params.get("plugin", ""), + title=params.get("title", ""), + season_number=_parse_positive_int(params.get("season", "0"), default=0), + episode_number=_parse_positive_int(params.get("episode", "0"), default=0), + episode_url=params.get("url", ""), + resolve_handle=_get_handle(), + ) + elif action == "play": + link = params.get("url", "") + if link: + _play_final_link(link, resolve_handle=_get_handle()) + else: + _show_root_menu() + + +if __name__ == "__main__": + run() diff --git a/dist/plugin.video.viewit/http_session_pool.py b/dist/plugin.video.viewit/http_session_pool.py new file mode 100644 index 0000000..725fa43 --- /dev/null +++ b/dist/plugin.video.viewit/http_session_pool.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +"""Shared requests.Session pooling for plugins. + +Goal: reuse TCP connections/cookies across multiple HTTP calls within a Kodi session. +""" + +from __future__ import annotations + +from typing import Any, Dict, Optional + +try: # pragma: no cover - optional dependency + import requests +except Exception: # pragma: no cover + requests = None + +_SESSIONS: Dict[str, Any] = {} + + +def get_requests_session(key: str, *, headers: Optional[dict[str, str]] = None): + """Return a cached `requests.Session()` for the given key.""" + if requests is None: + raise RuntimeError("requests ist nicht verfuegbar.") + key = (key or "").strip() or "default" + session = _SESSIONS.get(key) + if session is None: + session = requests.Session() + _SESSIONS[key] = session + if headers: + try: + session.headers.update({str(k): str(v) for k, v in headers.items() if k and v}) + except Exception: + pass + return session + diff --git a/dist/plugin.video.viewit/icon.png b/dist/plugin.video.viewit/icon.png new file mode 100644 index 0000000..9e65f73 Binary files /dev/null and b/dist/plugin.video.viewit/icon.png differ diff --git a/dist/plugin.video.viewit/plugin_helpers.py b/dist/plugin.video.viewit/plugin_helpers.py new file mode 100644 index 0000000..ef634c0 --- /dev/null +++ b/dist/plugin.video.viewit/plugin_helpers.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +"""Shared helpers for ViewIt plugins. + +Focus: +- Kodi addon settings access (string/bool) +- Optional URL notifications +- Optional URL logging +- Optional HTML response dumps + +Designed to work both in Kodi and outside Kodi (for linting/tests). +""" + +from __future__ import annotations + +from datetime import datetime +import hashlib +import os +from typing import Optional + +try: # pragma: no cover - Kodi runtime + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow importing outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + + +def get_setting_string(addon_id: str, setting_id: str, *, default: str = "") -> str: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(addon_id) + getter = getattr(addon, "getSettingString", None) + if getter is not None: + return str(getter(setting_id) or "").strip() + return str(addon.getSetting(setting_id) or "").strip() + except Exception: + return default + + +def get_setting_bool(addon_id: str, setting_id: str, *, default: bool = False) -> bool: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(addon_id) + getter = getattr(addon, "getSettingBool", None) + if getter is not None: + return bool(getter(setting_id)) + raw = addon.getSetting(setting_id) + return str(raw).strip().lower() in {"1", "true", "yes", "on"} + except Exception: + return default + + +def notify_url(addon_id: str, *, heading: str, url: str, enabled_setting_id: str) -> None: + if xbmcgui is None: + return + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + try: + xbmcgui.Dialog().notification(heading, url, xbmcgui.NOTIFICATION_INFO, 3000) + except Exception: + return + + +def _profile_logs_dir(addon_id: str) -> Optional[str]: + if xbmcaddon is None or xbmcvfs is None: + return None + try: + addon = xbmcaddon.Addon(addon_id) + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + log_dir = os.path.join(profile, "logs") + if not xbmcvfs.exists(log_dir): + xbmcvfs.mkdirs(log_dir) + return log_dir + except Exception: + return None + + +def _append_text_file(path: str, content: str) -> None: + try: + with open(path, "a", encoding="utf-8") as handle: + handle.write(content) + return + except Exception: + pass + if xbmcvfs is None: + return + try: + handle = xbmcvfs.File(path, "a") + handle.write(content) + handle.close() + except Exception: + return + + +def log_url(addon_id: str, *, enabled_setting_id: str, log_filename: str, url: str, kind: str = "VISIT") -> None: + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + timestamp = datetime.utcnow().isoformat(timespec="seconds") + "Z" + line = f"{timestamp}\t{kind}\t{url}\n" + log_dir = _profile_logs_dir(addon_id) + if log_dir: + _append_text_file(os.path.join(log_dir, log_filename), line) + return + _append_text_file(os.path.join(os.path.dirname(__file__), log_filename), line) + + +def dump_response_html( + addon_id: str, + *, + enabled_setting_id: str, + url: str, + body: str, + filename_prefix: str, +) -> None: + if not get_setting_bool(addon_id, enabled_setting_id, default=False): + return + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S_%f") + digest = hashlib.md5(url.encode("utf-8")).hexdigest() # nosec - filename only + filename = f"{filename_prefix}_{timestamp}_{digest}.html" + log_dir = _profile_logs_dir(addon_id) + path = os.path.join(log_dir, filename) if log_dir else os.path.join(os.path.dirname(__file__), filename) + content = f"\n{body or ''}" + _append_text_file(path, content) + diff --git a/dist/plugin.video.viewit/plugin_interface.py b/dist/plugin.video.viewit/plugin_interface.py new file mode 100644 index 0000000..a8b5b37 --- /dev/null +++ b/dist/plugin.video.viewit/plugin_interface.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +"""Gemeinsame Schnittstelle fuer Kodi-Plugins.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import List, Optional, Set + + +class BasisPlugin(ABC): + """Abstrakte Basisklasse fuer alle Integrationen.""" + + name: str + + @abstractmethod + async def search_titles(self, query: str) -> List[str]: + """Liefert eine Liste aller Treffer fuer die Suche.""" + + @abstractmethod + def seasons_for(self, title: str) -> List[str]: + """Liefert alle Staffeln zu einem Titel.""" + + @abstractmethod + def episodes_for(self, title: str, season: str) -> List[str]: + """Liefert alle Folgen zu einer Staffel.""" + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + """Optional: Liefert den Stream-Link fuer eine konkrete Folge.""" + return None + + def resolve_stream_link(self, link: str) -> Optional[str]: + """Optional: Folgt einem Stream-Link und liefert die finale URL.""" + return None + + def genres(self) -> List[str]: + """Optional: Liefert eine Liste an Genres (falls verfügbar).""" + return [] + + def titles_for_genre(self, genre: str) -> List[str]: + """Optional: Liefert alle Serientitel zu einem Genre.""" + return [] + + def capabilities(self) -> Set[str]: + """Optional: Liefert eine Menge an Features/Capabilities dieses Plugins. + + Beispiele: + - `popular_series`: Plugin kann eine Liste beliebter Serien liefern. + """ + + return set() + + def popular_series(self) -> List[str]: + """Optional: Liefert eine Liste beliebter Serien (als Titel-Strings).""" + + return [] diff --git a/dist/plugin.video.viewit/plugins/__init__.py b/dist/plugin.video.viewit/plugins/__init__.py new file mode 100644 index 0000000..9929cfa --- /dev/null +++ b/dist/plugin.video.viewit/plugins/__init__.py @@ -0,0 +1 @@ +"""Kodi addon plugins.""" diff --git a/dist/plugin.video.viewit/plugins/_template_plugin.py b/dist/plugin.video.viewit/plugins/_template_plugin.py new file mode 100644 index 0000000..a5244e2 --- /dev/null +++ b/dist/plugin.video.viewit/plugins/_template_plugin.py @@ -0,0 +1,127 @@ +"""Template fuer ein neues ViewIt-Plugin (Basis: serienstream_plugin). + +Diese Datei wird NICHT automatisch geladen (Dateiname beginnt mit `_`). +Zum Verwenden: +1) Kopiere/benenne die Datei um (ohne fuehrenden Unterstrich), z.B. `my_site_plugin.py` +2) Passe `name`, `BASE_URL` und die Implementierungen an. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, List, Optional, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +ADDON_ID = "plugin.video.viewit" +BASE_URL = "https://example.com" +DEFAULT_TIMEOUT = 20 +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class TitleHit: + """Ein Suchtreffer mit Titel und Detail-URL.""" + + title: str + url: str + + +class TemplatePlugin(BasisPlugin): + """Vorlage fuer eine Streamingseiten-Integration. + + Optional kann ein Plugin Capabilities deklarieren (z.B. `popular_series`), + damit der Router passende Menüpunkte anbieten kann. + """ + + name = "Template" + + def __init__(self) -> None: + self._session: RequestsSession | None = None + + @property + def is_available(self) -> bool: + return REQUESTS_AVAILABLE + + @property + def unavailable_reason(self) -> str: + if REQUESTS_AVAILABLE: + return "" + return f"requests/bs4 nicht verfuegbar: {REQUESTS_IMPORT_ERROR}" + + def _get_session(self) -> RequestsSession: + if requests is None: + raise RuntimeError(self.unavailable_reason) + if self._session is None: + session = requests.Session() + session.headers.update(HEADERS) + self._session = session + return self._session + + async def search_titles(self, query: str) -> List[str]: + """TODO: Suche auf der Zielseite implementieren.""" + _ = query + return [] + + def seasons_for(self, title: str) -> List[str]: + """TODO: Staffeln fuer einen Titel liefern.""" + _ = title + return [] + + def episodes_for(self, title: str, season: str) -> List[str]: + """TODO: Episoden fuer Titel+Staffel liefern.""" + _ = (title, season) + return [] + + def capabilities(self) -> set[str]: + """Optional: Deklariert Fähigkeiten dieses Plugins. + + Beispiele: + - `popular_series`: Plugin kann beliebte Serien liefern + - `genres`: Plugin unterstützt Genre-Browser + """ + + return set() + + def popular_series(self) -> List[str]: + """Optional: Liste beliebter Serien (nur wenn `popular_series` gesetzt ist).""" + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + """Optional: Embed-/Hoster-Link fuer eine Episode.""" + _ = (title, season, episode) + return None + + def resolve_stream_link(self, link: str) -> Optional[str]: + """Optional: Redirect-/Mirror-Aufloesung.""" + return link diff --git a/dist/plugin.video.viewit/plugins/aniworld_plugin.py b/dist/plugin.video.viewit/plugins/aniworld_plugin.py new file mode 100644 index 0000000..99d7a65 --- /dev/null +++ b/dist/plugin.video.viewit/plugins/aniworld_plugin.py @@ -0,0 +1,877 @@ +"""AniWorld (aniworld.to) Integration als Downloader-Plugin. + +Dieses Plugin ist weitgehend kompatibel zur Serienstream-Integration: +- gleiche Staffel-/Episoden-URL-Struktur (/staffel-x/episode-y) +- gleiche Hoster-/Watch-Layouts (best-effort) +""" + +from __future__ import annotations + +from dataclasses import dataclass +import re +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from http_session_pool import get_requests_session +from regex_patterns import DIGITS, SEASON_EPISODE_TAG, SEASON_EPISODE_URL, STAFFEL_NUM_IN_URL + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +BASE_URL = "https://aniworld.to" +ANIME_BASE_URL = f"{BASE_URL}/anime/stream" +POPULAR_ANIMES_URL = f"{BASE_URL}/beliebte-animes" +GENRES_URL = f"{BASE_URL}/animes" +LATEST_EPISODES_URL = f"{BASE_URL}/neue-episoden" +SEARCH_URL = f"{BASE_URL}/search?q={{query}}" +SEARCH_API_URL = f"{BASE_URL}/ajax/search" +DEFAULT_PREFERRED_HOSTERS = ["voe"] +DEFAULT_TIMEOUT = 20 +ADDON_ID = "plugin.video.viewit" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass +class SeriesResult: + title: str + description: str + url: str + + +@dataclass +class EpisodeInfo: + number: int + title: str + original_title: str + url: str + + +@dataclass +class LatestEpisode: + series_title: str + season: int + episode: int + url: str + airdate: str + + +@dataclass +class SeasonInfo: + number: int + url: str + episodes: List[EpisodeInfo] + + +def _absolute_url(href: str) -> str: + return f"{BASE_URL}{href}" if href.startswith("/") else href + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="aniworld_urls.log", url=url, kind=kind) + + +def _log_visit(url: str) -> None: + _log_url(url, kind="VISIT") + notify_url(ADDON_ID, heading="AniWorld", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_parsed_url(url: str) -> None: + _log_url(url, kind="PARSE") + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="aniworld_response", + ) + + +def _normalize_search_text(value: str) -> str: + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _strip_html(text: str) -> str: + if not text: + return "" + return re.sub(r"<[^>]+>", "", text) + + +def _matches_query(query: str, *, title: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = _normalize_search_text(title) + if not haystack: + return False + return normalized_query in haystack + + +def _ensure_requests() -> None: + if requests is None or BeautifulSoup is None: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + + +def _looks_like_cloudflare_challenge(body: str) -> bool: + lower = body.lower() + markers = ( + "cf-browser-verification", + "cf-challenge", + "cf_chl", + "challenge-platform", + "attention required! | cloudflare", + "just a moment...", + "cloudflare ray id", + ) + return any(marker in lower for marker in markers) + + +def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("aniworld", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _get_soup_simple(url: str) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = get_requests_session("aniworld", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _post_json(url: str, *, payload: Dict[str, str], session: Optional[RequestsSession] = None) -> Any: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("aniworld", headers=HEADERS) + response = sess.post(url, data=payload, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + try: + return response.json() + except Exception: + return None + + +def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str: + canonical = soup.select_one('link[rel="canonical"][href]') + href = (canonical.get("href") if canonical else "") or "" + href = href.strip() + if href.startswith("http://") or href.startswith("https://"): + return href.rstrip("/") + return fallback.rstrip("/") + + +def _series_root_url(url: str) -> str: + normalized = (url or "").strip().rstrip("/") + normalized = re.sub(r"/staffel-\d+(?:/.*)?$", "", normalized) + normalized = re.sub(r"/episode-\d+(?:/.*)?$", "", normalized) + return normalized.rstrip("/") + + +def _extract_season_links(soup: BeautifulSoupT) -> List[Tuple[int, str]]: + season_links: List[Tuple[int, str]] = [] + seen_numbers: set[int] = set() + for anchor in soup.select('.hosterSiteDirectNav a[href*="/staffel-"]'): + href = anchor.get("href") or "" + if "/episode-" in href: + continue + match = re.search(STAFFEL_NUM_IN_URL, href) + if match: + number = int(match.group(1)) + else: + label = anchor.get_text(strip=True) + if not label.isdigit(): + continue + number = int(label) + if number in seen_numbers: + continue + seen_numbers.add(number) + season_url = _absolute_url(href) + if season_url: + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + return season_links + + +def _extract_number_of_seasons(soup: BeautifulSoupT) -> Optional[int]: + tag = soup.select_one('meta[itemprop="numberOfSeasons"]') + if not tag: + return None + content = (tag.get("content") or "").strip() + if not content.isdigit(): + return None + count = int(content) + return count if count > 0 else None + + +def _extract_episodes(soup: BeautifulSoupT) -> List[EpisodeInfo]: + episodes: List[EpisodeInfo] = [] + rows = soup.select("table.seasonEpisodesList tbody tr") + for index, row in enumerate(rows): + cells = row.find_all("td") + if not cells: + continue + episode_cell = cells[0] + number_text = episode_cell.get_text(strip=True) + digits = "".join(ch for ch in number_text if ch.isdigit()) + number = int(digits) if digits else index + 1 + link = episode_cell.find("a") + href = link.get("href") if link else "" + url = _absolute_url(href or "") + if url: + _log_parsed_url(url) + + title_tag = row.select_one(".seasonEpisodeTitle strong") + original_tag = row.select_one(".seasonEpisodeTitle span") + title = title_tag.get_text(strip=True) if title_tag else "" + original_title = original_tag.get_text(strip=True) if original_tag else "" + + if url: + episodes.append(EpisodeInfo(number=number, title=title, original_title=original_title, url=url)) + return episodes + + +_LATEST_EPISODE_TAG_RE = re.compile(SEASON_EPISODE_TAG, re.IGNORECASE) +_LATEST_EPISODE_URL_RE = re.compile(SEASON_EPISODE_URL, re.IGNORECASE) + + +def _extract_latest_episodes(soup: BeautifulSoupT) -> List[LatestEpisode]: + episodes: List[LatestEpisode] = [] + seen: set[str] = set() + + for anchor in soup.select(".newEpisodeList a[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/anime/stream/" not in href: + continue + url = _absolute_url(href) + if not url: + continue + + title_tag = anchor.select_one("strong") + series_title = (title_tag.get_text(strip=True) if title_tag else "").strip() + if not series_title: + continue + + season_number: Optional[int] = None + episode_number: Optional[int] = None + + match = _LATEST_EPISODE_URL_RE.search(href) + if match: + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + + if season_number is None or episode_number is None: + tag_node = ( + anchor.select_one("span.listTag.bigListTag.blue2") + or anchor.select_one("span.listTag.blue2") + or anchor.select_one("span.blue2") + ) + tag_text = (tag_node.get_text(" ", strip=True) if tag_node else "").strip() + match = _LATEST_EPISODE_TAG_RE.search(tag_text) + if not match: + continue + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + + if season_number is None or episode_number is None: + continue + + airdate_node = anchor.select_one("span.elementFloatRight") + airdate = (airdate_node.get_text(" ", strip=True) if airdate_node else "").strip() + + key = f"{url}\t{season_number}\t{episode_number}" + if key in seen: + continue + seen.add(key) + + _log_parsed_url(url) + episodes.append( + LatestEpisode( + series_title=series_title, + season=season_number, + episode=episode_number, + url=url, + airdate=airdate, + ) + ) + + return episodes + + +def scrape_anime_detail(anime_identifier: str, max_seasons: Optional[int] = None) -> List[SeasonInfo]: + _ensure_requests() + anime_url = _series_root_url(_absolute_url(anime_identifier)) + _log_url(anime_url, kind="ANIME") + session = get_requests_session("aniworld", headers=HEADERS) + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(anime_url, session=session) + + base_anime_url = _series_root_url(_extract_canonical_url(soup, anime_url)) + season_links = _extract_season_links(soup) + season_count = _extract_number_of_seasons(soup) + if season_count and (not season_links or len(season_links) < season_count): + existing = {number for number, _ in season_links} + for number in range(1, season_count + 1): + if number in existing: + continue + season_url = f"{base_anime_url}/staffel-{number}" + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + if max_seasons is not None: + season_links = season_links[:max_seasons] + + seasons: List[SeasonInfo] = [] + for number, url in season_links: + season_soup = _get_soup(url, session=session) + episodes = _extract_episodes(season_soup) + seasons.append(SeasonInfo(number=number, url=url, episodes=episodes)) + seasons.sort(key=lambda s: s.number) + return seasons + + +def resolve_redirect(target_url: str) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(target_url) + _log_visit(normalized_url) + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + response = session.get(normalized_url, headers=HEADERS, timeout=DEFAULT_TIMEOUT, allow_redirects=True) + if response.url: + _log_url(response.url, kind="RESOLVED") + return response.url if response.url else None + + +def fetch_episode_hoster_names(episode_url: str) -> List[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + soup = _get_soup(normalized_url, session=session) + names: List[str] = [] + seen: set[str] = set() + for anchor in soup.select(".hosterSiteVideo a.watchEpisode"): + title = anchor.select_one("h4") + name = title.get_text(strip=True) if title else "" + if not name: + name = anchor.get_text(" ", strip=True) + name = (name or "").strip() + if name.lower().startswith("hoster "): + name = name[7:].strip() + href = anchor.get("href") or "" + url = _absolute_url(href) + if url: + _log_parsed_url(url) + key = name.casefold().strip() + if not key or key in seen: + continue + seen.add(key) + names.append(name) + if names: + _log_url(f"{normalized_url}#hosters={','.join(names)}", kind="HOSTERS") + return names + + +def fetch_episode_stream_link( + episode_url: str, + *, + preferred_hosters: Optional[List[str]] = None, +) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + preferred = [hoster.lower() for hoster in (preferred_hosters or DEFAULT_PREFERRED_HOSTERS)] + session = get_requests_session("aniworld", headers=HEADERS) + _get_soup(BASE_URL, session=session) + soup = _get_soup(normalized_url, session=session) + candidates: List[Tuple[str, str]] = [] + for anchor in soup.select(".hosterSiteVideo a.watchEpisode"): + name_tag = anchor.select_one("h4") + name = name_tag.get_text(strip=True) if name_tag else "" + href = anchor.get("href") or "" + url = _absolute_url(href) + if url: + _log_parsed_url(url) + if name and url: + candidates.append((name, url)) + if not candidates: + return None + candidates.sort(key=lambda item: item[0].casefold()) + selected_url = None + for wanted in preferred: + for name, url in candidates: + if wanted in name.casefold(): + selected_url = url + break + if selected_url: + break + if not selected_url: + selected_url = candidates[0][1] + resolved = resolve_redirect(selected_url) or selected_url + return resolved + + +def search_animes(query: str) -> List[SeriesResult]: + _ensure_requests() + query = (query or "").strip() + if not query: + return [] + session = get_requests_session("aniworld", headers=HEADERS) + try: + session.get(BASE_URL, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + except Exception: + pass + data = _post_json(SEARCH_API_URL, payload={"keyword": query}, session=session) + results: List[SeriesResult] = [] + seen: set[str] = set() + if isinstance(data, list): + for entry in data: + if not isinstance(entry, dict): + continue + title = _strip_html((entry.get("title") or "").strip()) + if not title or not _matches_query(query, title=title): + continue + link = (entry.get("link") or "").strip() + if not link.startswith("/anime/stream/"): + continue + if "/staffel-" in link or "/episode-" in link: + continue + if link.rstrip("/") == "/anime/stream": + continue + url = _absolute_url(link) if link else "" + if url: + _log_parsed_url(url) + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + description = (entry.get("description") or "").strip() + results.append(SeriesResult(title=title, description=description, url=url)) + return results + + soup = _get_soup_simple(SEARCH_URL.format(query=requests.utils.quote(query))) + for anchor in soup.select("a[href^='/anime/stream/'][href]"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title_node = anchor.select_one("h3") or anchor.select_one("strong") + title = (title_node.get_text(" ", strip=True) if title_node else anchor.get_text(" ", strip=True)).strip() + if not title: + continue + if not _matches_query(query, title=title): + continue + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + results.append(SeriesResult(title=title, description="", url=url)) + return results + + +class AniworldPlugin(BasisPlugin): + name = "AniWorld (aniworld.to)" + + def __init__(self) -> None: + self._anime_results: Dict[str, SeriesResult] = {} + self._season_cache: Dict[str, List[SeasonInfo]] = {} + self._episode_label_cache: Dict[Tuple[str, str], Dict[str, EpisodeInfo]] = {} + self._popular_cache: Optional[List[SeriesResult]] = None + self._genre_cache: Optional[Dict[str, List[SeriesResult]]] = None + self._latest_cache: Dict[int, List[LatestEpisode]] = {} + self._latest_hoster_cache: Dict[str, List[str]] = {} + self._requests_available = REQUESTS_AVAILABLE + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {} + self.is_available = True + self.unavailable_reason: Optional[str] = None + if not self._requests_available: # pragma: no cover - optional dependency + self.is_available = False + self.unavailable_reason = "requests/bs4 fehlen. Installiere 'requests' und 'beautifulsoup4'." + if REQUESTS_IMPORT_ERROR: + print(f"AniworldPlugin Importfehler: {REQUESTS_IMPORT_ERROR}") + + def capabilities(self) -> set[str]: + return {"popular_series", "genres", "latest_episodes"} + + def _find_series_by_title(self, title: str) -> Optional[SeriesResult]: + title = (title or "").strip() + if not title: + return None + + direct = self._anime_results.get(title) + if direct: + return direct + + wanted = title.casefold().strip() + + for candidate in self._anime_results.values(): + if candidate.title and candidate.title.casefold().strip() == wanted: + return candidate + + try: + for entry in self._ensure_popular(): + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + try: + for entries in self._ensure_genres().values(): + for entry in entries: + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + try: + for entry in search_animes(title): + if entry.title and entry.title.casefold().strip() == wanted: + self._anime_results[entry.title] = entry + return entry + except Exception: + pass + + return None + + def _ensure_popular(self) -> List[SeriesResult]: + if self._popular_cache is not None: + return list(self._popular_cache) + soup = _get_soup_simple(POPULAR_ANIMES_URL) + results: List[SeriesResult] = [] + seen: set[str] = set() + for anchor in soup.select("div.seriesListContainer a[href^='/anime/stream/']"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title_node = anchor.select_one("h3") + title = (title_node.get_text(" ", strip=True) if title_node else "").strip() + if not title: + continue + description = "" + desc_node = anchor.select_one("small") + if desc_node: + description = desc_node.get_text(" ", strip=True).strip() + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + results.append(SeriesResult(title=title, description=description, url=url)) + self._popular_cache = list(results) + return list(results) + + def popular_series(self) -> List[str]: + if not self._requests_available: + return [] + entries = self._ensure_popular() + self._anime_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def latest_episodes(self, page: int = 1) -> List[LatestEpisode]: + if not self._requests_available: + return [] + try: + page = int(page or 1) + except Exception: + page = 1 + page = max(1, page) + + cached = self._latest_cache.get(page) + if cached is not None: + return list(cached) + + url = LATEST_EPISODES_URL + if page > 1: + url = f"{url}?page={page}" + + soup = _get_soup_simple(url) + episodes = _extract_latest_episodes(soup) + self._latest_cache[page] = list(episodes) + return list(episodes) + + def _ensure_genres(self) -> Dict[str, List[SeriesResult]]: + if self._genre_cache is not None: + return {key: list(value) for key, value in self._genre_cache.items()} + soup = _get_soup_simple(GENRES_URL) + results: Dict[str, List[SeriesResult]] = {} + genre_blocks = soup.select("#seriesContainer div.genre") + if not genre_blocks: + genre_blocks = soup.select("div.genre") + for genre_block in genre_blocks: + name_node = genre_block.select_one(".seriesGenreList h3") + genre_name = (name_node.get_text(" ", strip=True) if name_node else "").strip() + if not genre_name: + continue + entries: List[SeriesResult] = [] + seen: set[str] = set() + for anchor in genre_block.select("ul li a[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/staffel-" in href or "/episode-" in href: + continue + url = _absolute_url(href) + if url: + _log_parsed_url(url) + title = (anchor.get_text(" ", strip=True) or "").strip() + if not title: + continue + key = title.casefold().strip() + if key in seen: + continue + seen.add(key) + entries.append(SeriesResult(title=title, description="", url=url)) + if entries: + results[genre_name] = entries + self._genre_cache = {key: list(value) for key, value in results.items()} + # Für spätere Auflösung (Seasons/Episoden) die Titel->URL Zuordnung auffüllen. + for entries in results.values(): + for entry in entries: + if not entry.title: + continue + if entry.title not in self._anime_results: + self._anime_results[entry.title] = entry + return {key: list(value) for key, value in results.items()} + + def genres(self) -> List[str]: + if not self._requests_available: + return [] + genres = list(self._ensure_genres().keys()) + return [g for g in genres if g] + + def titles_for_genre(self, genre: str) -> List[str]: + genre = (genre or "").strip() + if not genre or not self._requests_available: + return [] + mapping = self._ensure_genres() + entries = mapping.get(genre) + if entries is None: + wanted = genre.casefold() + for key, value in mapping.items(): + if key.casefold() == wanted: + entries = value + break + if not entries: + return [] + # Zusätzlich sicherstellen, dass die Titel im Cache sind. + self._anime_results.update({entry.title: entry for entry in entries if entry.title and entry.title not in self._anime_results}) + return [entry.title for entry in entries if entry.title] + + def _season_label(self, number: int) -> str: + return f"Staffel {number}" + + def _parse_season_number(self, season_label: str) -> Optional[int]: + match = re.search(DIGITS, season_label or "") + return int(match.group(1)) if match else None + + def _episode_label(self, info: EpisodeInfo) -> str: + title = (info.title or "").strip() + if title: + return f"Episode {info.number} - {title}" + return f"Episode {info.number}" + + def _cache_episode_labels(self, title: str, season_label: str, season_info: SeasonInfo) -> None: + cache_key = (title, season_label) + self._episode_label_cache[cache_key] = {self._episode_label(info): info for info in season_info.episodes} + + def _lookup_episode(self, title: str, season_label: str, episode_label: str) -> Optional[EpisodeInfo]: + cache_key = (title, season_label) + cached = self._episode_label_cache.get(cache_key) + if cached: + return cached.get(episode_label) + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season_label) + if number is None: + return None + for season_info in seasons: + if season_info.number == number: + self._cache_episode_labels(title, season_label, season_info) + return self._episode_label_cache.get(cache_key, {}).get(episode_label) + return None + + async def search_titles(self, query: str) -> List[str]: + query = (query or "").strip() + if not query: + self._anime_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._popular_cache = None + return [] + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 nicht suchen.") + try: + results = search_animes(query) + except Exception as exc: # pragma: no cover + self._anime_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + raise RuntimeError(f"AniWorld-Suche fehlgeschlagen: {exc}") from exc + self._anime_results = {result.title: result for result in results} + self._season_cache.clear() + self._episode_label_cache.clear() + return [result.title for result in results] + + def _ensure_seasons(self, title: str) -> List[SeasonInfo]: + if title in self._season_cache: + return self._season_cache[title] + anime = self._find_series_by_title(title) + if not anime: + return [] + seasons = scrape_anime_detail(anime.url) + self._season_cache[title] = list(seasons) + return list(seasons) + + def seasons_for(self, title: str) -> List[str]: + seasons = self._ensure_seasons(title) + return [self._season_label(season.number) for season in seasons if season.episodes] + + def episodes_for(self, title: str, season: str) -> List[str]: + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season) + if number is None: + return [] + for season_info in seasons: + if season_info.number == number: + labels = [self._episode_label(info) for info in season_info.episodes] + self._cache_episode_labels(title, season, season_info) + return labels + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return None + link = fetch_episode_stream_link(episode_info.url, preferred_hosters=self._preferred_hosters) + if link: + _log_url(link, kind="FOUND") + return link + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.") + cache_key = (title, season, episode) + cached = self._hoster_cache.get(cache_key) + if cached is not None: + return list(cached) + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return [] + names = fetch_episode_hoster_names(episode_info.url) + self._hoster_cache[cache_key] = list(names) + return list(names) + + def available_hosters_for_url(self, episode_url: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Hoster laden.") + normalized = _absolute_url(episode_url) + cached = self._latest_hoster_cache.get(normalized) + if cached is not None: + return list(cached) + names = fetch_episode_hoster_names(normalized) + self._latest_hoster_cache[normalized] = list(names) + return list(names) + + def stream_link_for_url(self, episode_url: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + normalized = _absolute_url(episode_url) + link = fetch_episode_stream_link(normalized, preferred_hosters=self._preferred_hosters) + if link: + _log_url(link, kind="FOUND") + return link + + def resolve_stream_link(self, link: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("AniworldPlugin kann ohne requests/bs4 keine Stream-Links aufloesen.") + resolved = resolve_redirect(link) + if not resolved: + return None + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved_by_resolveurl = resolve_with_resolveurl(resolved) + if resolved_by_resolveurl: + _log_url("ResolveURL", kind="HOSTER_RESOLVER") + _log_url(resolved_by_resolveurl, kind="MEDIA") + return resolved_by_resolveurl + _log_url(resolved, kind="FINAL") + return resolved + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + +Plugin = AniworldPlugin diff --git a/dist/plugin.video.viewit/plugins/einschalten_plugin.py b/dist/plugin.video.viewit/plugins/einschalten_plugin.py new file mode 100644 index 0000000..7b4795a --- /dev/null +++ b/dist/plugin.video.viewit/plugins/einschalten_plugin.py @@ -0,0 +1,1052 @@ +"""Einschalten Plugin. + +Optionales Debugging wie bei Serienstream: +- URL-Logging +- HTML-Dumps +- On-Screen URL-Info +""" + +from __future__ import annotations + +import json +import re +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Set +from urllib.parse import urlencode, urljoin, urlsplit + +try: # pragma: no cover - optional dependency (Kodi dependency) + import requests +except ImportError as exc: # pragma: no cover + requests = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url + +ADDON_ID = "plugin.video.viewit" +SETTING_BASE_URL = "einschalten_base_url" +SETTING_INDEX_PATH = "einschalten_index_path" +SETTING_NEW_TITLES_PATH = "einschalten_new_titles_path" +SETTING_SEARCH_PATH = "einschalten_search_path" +SETTING_GENRES_PATH = "einschalten_genres_path" +SETTING_ENABLE_PLAYBACK = "einschalten_enable_playback" +SETTING_WATCH_PATH_TEMPLATE = "einschalten_watch_path_template" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" + +DEFAULT_BASE_URL = "" +DEFAULT_INDEX_PATH = "/" +DEFAULT_NEW_TITLES_PATH = "/movies/new" +DEFAULT_SEARCH_PATH = "/search" +DEFAULT_GENRES_PATH = "/genres" +DEFAULT_WATCH_PATH_TEMPLATE = "/api/movies/{id}/watch" + +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/json;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class MovieItem: + id: int + title: str + release_date: str = "" + poster_path: str = "" + vote_average: float | None = None + collection_id: int | None = None + + +@dataclass(frozen=True) +class MovieDetail: + id: int + title: str + tagline: str = "" + overview: str = "" + release_date: str = "" + runtime_minutes: int | None = None + poster_path: str = "" + backdrop_path: str = "" + vote_average: float | None = None + vote_count: int | None = None + homepage: str = "" + imdb_id: str = "" + wikidata_id: str = "" + genres: List[str] | None = None + + +def _normalize_search_text(value: str) -> str: + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _matches_query(query: str, *, title: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = f" {_normalize_search_text(title)} " + return f" {normalized_query} " in haystack + + +def _filter_movies_by_title(query: str, movies: List[MovieItem]) -> List[MovieItem]: + query = (query or "").strip() + if not query: + return [] + return [movie for movie in movies if _matches_query(query, title=movie.title)] + + +def _get_setting_text(setting_id: str, *, default: str = "") -> str: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(ADDON_ID) + getter = getattr(addon, "getSettingString", None) + if getter is not None: + return str(getter(setting_id) or "").strip() + return str(addon.getSetting(setting_id) or "").strip() + except Exception: + return default + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + +def _ensure_requests() -> None: + if requests is None: + raise RuntimeError(f"requests ist nicht verfuegbar: {REQUESTS_IMPORT_ERROR}") + + +def _extract_ng_state_payload(html: str) -> Dict[str, Any]: + """Extrahiert JSON aus ``.""" + html = html or "" + # Regex ist hier ausreichend und vermeidet bs4-Abhängigkeit. + match = re.search( + r']*id=["\\\']ng-state["\\\'][^>]*>(.*?)', + html, + flags=re.IGNORECASE | re.DOTALL, + ) + if not match: + return {} + raw = (match.group(1) or "").strip() + if not raw: + return {} + try: + data = json.loads(raw) + except Exception: + return {} + return data if isinstance(data, dict) else {} + + +def _notify_url(url: str) -> None: + notify_url(ADDON_ID, heading="einschalten", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="einschalten_urls.log", url=url, kind=kind) + + +def _log_debug_line(message: str) -> None: + try: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="einschalten_debug.log", url=message, kind="DEBUG") + except Exception: + pass + + +def _log_titles(items: list[MovieItem], *, context: str) -> None: + if not items: + return + try: + log_url( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_LOG_URLS, + log_filename="einschalten_titles.log", + url=f"{context}:count={len(items)}", + kind="TITLE", + ) + for item in items: + log_url( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_LOG_URLS, + log_filename="einschalten_titles.log", + url=f"{context}:id={item.id} title={item.title}", + kind="TITLE", + ) + except Exception: + pass + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="einschalten_response", + ) + +def _u_matches(value: Any, expected_path: str) -> bool: + raw = (value or "").strip() + if not raw: + return False + if raw == expected_path: + return True + try: + if "://" in raw: + path = urlsplit(raw).path or "" + else: + path = raw.split("?", 1)[0].split("#", 1)[0] + if path == expected_path: + return True + except Exception: + pass + return raw.endswith(expected_path) + + +def _parse_ng_state_movies(payload: Dict[str, Any]) -> List[MovieItem]: + movies: List[MovieItem] = [] + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + # In ng-state payload, `u` (URL) is a sibling of `b` (body), not nested inside `b`. + if not _u_matches(value.get("u"), "/api/movies"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + data = block.get("data") + if not isinstance(data, list): + continue + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + return movies + + +def _parse_ng_state_movies_with_pagination(payload: Dict[str, Any]) -> tuple[List[MovieItem], bool | None, int | None]: + """Parses ng-state for `u: "/api/movies"` where `b` contains `{data:[...], pagination:{...}}`. + + Returns: (movies, has_more, current_page) + """ + + movies: List[MovieItem] = [] + has_more: bool | None = None + current_page: int | None = None + + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/movies"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + + pagination = block.get("pagination") + if isinstance(pagination, dict): + if "hasMore" in pagination: + has_more = bool(pagination.get("hasMore") is True) + try: + current_page = int(pagination.get("currentPage")) if pagination.get("currentPage") is not None else None + except Exception: + current_page = None + + data = block.get("data") + if not isinstance(data, list): + continue + + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + + # Stop after first matching block (genre pages should only have one). + break + + return movies, has_more, current_page + + +def _parse_ng_state_search_results(payload: Dict[str, Any]) -> List[MovieItem]: + movies: List[MovieItem] = [] + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/search"): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + data = block.get("data") + if not isinstance(data, list): + continue + for item in data: + if not isinstance(item, dict): + continue + try: + movie_id = int(item.get("id")) + except Exception: + continue + title = str(item.get("title") or "").strip() + if not title: + continue + vote_average = item.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + collection_id = item.get("collectionId") + try: + collection_id_i = int(collection_id) if collection_id is not None else None + except Exception: + collection_id_i = None + movies.append( + MovieItem( + id=movie_id, + title=title, + release_date=str(item.get("releaseDate") or ""), + poster_path=str(item.get("posterPath") or ""), + vote_average=vote_average_f, + collection_id=collection_id_i, + ) + ) + return movies + + +def _parse_ng_state_movie_detail(payload: Dict[str, Any], *, movie_id: int) -> MovieDetail | None: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return None + expected_u = f"/api/movies/{movie_id}" + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), expected_u): + continue + block = value.get("b") + if not isinstance(block, dict): + continue + try: + parsed_id = int(block.get("id")) + except Exception: + continue + if parsed_id != movie_id: + continue + title = str(block.get("title") or "").strip() + if not title: + continue + runtime = block.get("runtime") + try: + runtime_i = int(runtime) if runtime is not None else None + except Exception: + runtime_i = None + vote_average = block.get("voteAverage") + try: + vote_average_f = float(vote_average) if vote_average is not None else None + except Exception: + vote_average_f = None + vote_count = block.get("voteCount") + try: + vote_count_i = int(vote_count) if vote_count is not None else None + except Exception: + vote_count_i = None + genres_raw = block.get("genres") + genres: List[str] | None = None + if isinstance(genres_raw, list): + names: List[str] = [] + for g in genres_raw: + if isinstance(g, dict): + name = str(g.get("name") or "").strip() + if name: + names.append(name) + genres = names + return MovieDetail( + id=movie_id, + title=title, + tagline=str(block.get("tagline") or "").strip(), + overview=str(block.get("overview") or "").strip(), + release_date=str(block.get("releaseDate") or "").strip(), + runtime_minutes=runtime_i, + poster_path=str(block.get("posterPath") or "").strip(), + backdrop_path=str(block.get("backdropPath") or "").strip(), + vote_average=vote_average_f, + vote_count=vote_count_i, + homepage=str(block.get("homepage") or "").strip(), + imdb_id=str(block.get("imdbId") or "").strip(), + wikidata_id=str(block.get("wikidataId") or "").strip(), + genres=genres, + ) + return None + + +def _parse_ng_state_genres(payload: Dict[str, Any]) -> Dict[str, int]: + """Parses ng-state for `u: "/api/genres"` where `b` is a list of {id,name}.""" + genres: Dict[str, int] = {} + for value in (payload or {}).values(): + if not isinstance(value, dict): + continue + if not _u_matches(value.get("u"), "/api/genres"): + continue + block = value.get("b") + if not isinstance(block, list): + continue + for item in block: + if not isinstance(item, dict): + continue + name = str(item.get("name") or "").strip() + if not name: + continue + try: + gid = int(item.get("id")) + except Exception: + continue + if gid > 0: + genres[name] = gid + return genres + + +class EinschaltenPlugin(BasisPlugin): + """Metadata-Plugin für eine autorisierte Quelle.""" + + name = "einschalten" + + def __init__(self) -> None: + self.is_available = REQUESTS_AVAILABLE + self.unavailable_reason = None if REQUESTS_AVAILABLE else f"requests fehlt: {REQUESTS_IMPORT_ERROR}" + self._session = None + self._id_by_title: Dict[str, int] = {} + self._detail_html_by_id: Dict[int, str] = {} + self._detail_by_id: Dict[int, MovieDetail] = {} + self._genre_id_by_name: Dict[str, int] = {} + self._genre_has_more_by_id_page: Dict[tuple[int, int], bool] = {} + self._new_titles_has_more_by_page: Dict[int, bool] = {} + + def _get_session(self): + _ensure_requests() + if self._session is None: + self._session = requests.Session() + return self._session + + def _get_base_url(self) -> str: + base = _get_setting_text(SETTING_BASE_URL, default=DEFAULT_BASE_URL).strip() + return base.rstrip("/") + + def _index_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_INDEX_PATH, default=DEFAULT_INDEX_PATH).strip() or "/" + return urljoin(base + "/", path.lstrip("/")) + + def _new_titles_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_NEW_TITLES_PATH, default=DEFAULT_NEW_TITLES_PATH).strip() or "/movies/new" + return urljoin(base + "/", path.lstrip("/")) + + def _genres_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_GENRES_PATH, default=DEFAULT_GENRES_PATH).strip() or "/genres" + return urljoin(base + "/", path.lstrip("/")) + + def _api_genres_url(self) -> str: + base = self._get_base_url() + if not base: + return "" + return urljoin(base + "/", "api/genres") + + def _search_url(self, query: str) -> str: + base = self._get_base_url() + if not base: + return "" + path = _get_setting_text(SETTING_SEARCH_PATH, default=DEFAULT_SEARCH_PATH).strip() or "/search" + url = urljoin(base + "/", path.lstrip("/")) + return f"{url}?{urlencode({'query': query})}" + + def _api_movies_url(self, *, with_genres: int, page: int = 1) -> str: + base = self._get_base_url() + if not base: + return "" + params: Dict[str, str] = {"withGenres": str(int(with_genres))} + if page and int(page) > 1: + params["page"] = str(int(page)) + return urljoin(base + "/", "api/movies") + f"?{urlencode(params)}" + + def _genre_page_url(self, *, genre_id: int, page: int = 1) -> str: + """Genre title pages are rendered server-side and embed the movie list in ng-state. + + Example: + - `/genres/` contains ng-state with `u: "/api/movies"` and `b.data` + `b.pagination`. + """ + + base = self._get_base_url() + if not base: + return "" + genre_root = self._genres_url().rstrip("/") + if not genre_root: + return "" + page = max(1, int(page or 1)) + url = urljoin(genre_root + "/", str(int(genre_id))) + if page > 1: + url = f"{url}?{urlencode({'page': str(page)})}" + return url + + def _movie_detail_url(self, movie_id: int) -> str: + base = self._get_base_url() + if not base: + return "" + return urljoin(base + "/", f"movies/{int(movie_id)}") + + def _watch_url(self, movie_id: int) -> str: + base = self._get_base_url() + if not base: + return "" + template = _get_setting_text(SETTING_WATCH_PATH_TEMPLATE, default=DEFAULT_WATCH_PATH_TEMPLATE).strip() + if not template: + template = DEFAULT_WATCH_PATH_TEMPLATE + try: + path = template.format(id=int(movie_id)) + except Exception: + path = DEFAULT_WATCH_PATH_TEMPLATE.format(id=int(movie_id)) + return urljoin(base + "/", path.lstrip("/")) + + def _ensure_title_id(self, title: str) -> int | None: + title = (title or "").strip() + if not title: + return None + cached = self._id_by_title.get(title) + if isinstance(cached, int) and cached > 0: + return cached + # Fallback: scan index ng-state again to rebuild mapping. + for movie in self._load_movies(): + if movie.title == title: + self._id_by_title[title] = movie.id + return movie.id + # Kodi startet das Plugin pro Navigation neu -> RAM-Cache geht verloren. + # Für Titel, die nicht auf der Index-Seite sind (z.B. /movies/new), lösen wir die ID + # über die Suchseite auf, die ebenfalls `id` + `title` im ng-state liefert. + try: + normalized = title.casefold().strip() + for movie in self._fetch_search_movies(title): + if (movie.title or "").casefold().strip() == normalized: + self._id_by_title[title] = movie.id + return movie.id + except Exception: + pass + return None + + def _fetch_movie_detail(self, movie_id: int) -> str: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return "" + cached = self._detail_html_by_id.get(movie_id) + if isinstance(cached, str) and cached: + return cached + url = self._movie_detail_url(movie_id) + if not url: + return "" + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + self._detail_html_by_id[movie_id] = resp.text or "" + return resp.text or "" + except Exception: + return "" + + def _fetch_watch_payload(self, movie_id: int) -> dict[str, object]: + movie_id = int(movie_id or 0) + if movie_id <= 0: + return {} + url = self._watch_url(movie_id) + if not url: + return {} + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + # Some backends may return JSON with a JSON content-type; for debugging we still dump text. + _log_response_html(resp.url or url, resp.text) + data = resp.json() + return dict(data) if isinstance(data, dict) else {} + except Exception: + return {} + + def _watch_stream_url(self, movie_id: int) -> str: + payload = self._fetch_watch_payload(movie_id) + stream_url = payload.get("streamUrl") + return str(stream_url).strip() if isinstance(stream_url, str) and stream_url.strip() else "" + + def metadata_for(self, title: str) -> tuple[dict[str, str], dict[str, str], list[object] | None]: + """Optional hook for the UI layer (default.py) to attach metadata/art without TMDB.""" + title = (title or "").strip() + movie_id = self._ensure_title_id(title) + if movie_id is None: + return {}, {}, None + + detail = self._detail_by_id.get(movie_id) + if detail is None: + html = self._fetch_movie_detail(movie_id) + payload = _extract_ng_state_payload(html) + parsed = _parse_ng_state_movie_detail(payload, movie_id=movie_id) + if parsed is not None: + self._detail_by_id[movie_id] = parsed + detail = parsed + + info: dict[str, str] = {"mediatype": "movie", "title": title} + art: dict[str, str] = {} + if detail is None: + return info, art, None + + if detail.overview: + info["plot"] = detail.overview + if detail.tagline: + info["tagline"] = detail.tagline + if detail.release_date: + info["premiered"] = detail.release_date + if len(detail.release_date) >= 4 and detail.release_date[:4].isdigit(): + info["year"] = detail.release_date[:4] + if detail.runtime_minutes is not None and detail.runtime_minutes > 0: + info["duration"] = str(int(detail.runtime_minutes) * 60) + if detail.vote_average is not None: + info["rating"] = str(detail.vote_average) + if detail.vote_count is not None: + info["votes"] = str(detail.vote_count) + if detail.genres: + info["genre"] = " / ".join(detail.genres) + + base = self._get_base_url() + if base: + if detail.poster_path: + poster = urljoin(base + "/", f"api/image/poster/{detail.poster_path.lstrip('/')}") + art.update({"thumb": poster, "poster": poster}) + if detail.backdrop_path: + backdrop = urljoin(base + "/", f"api/image/backdrop/{detail.backdrop_path.lstrip('/')}") + art.setdefault("fanart", backdrop) + art.setdefault("landscape", backdrop) + + return info, art, None + + def _fetch_index_movies(self) -> List[MovieItem]: + url = self._index_url() + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + return _parse_ng_state_movies(payload) + except Exception: + return [] + + def _fetch_new_titles_movies(self) -> List[MovieItem]: + # "Neue Filme" lives at `/movies/new` and embeds the list in ng-state (`u: "/api/movies"`). + url = self._new_titles_url() + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + movies = _parse_ng_state_movies(payload) + _log_debug_line(f"parse_ng_state_movies:count={len(movies)}") + if movies: + _log_titles(movies, context="new_titles") + return movies + return [] + except Exception: + return [] + + def _fetch_new_titles_movies_page(self, page: int) -> List[MovieItem]: + page = max(1, int(page or 1)) + url = self._new_titles_url() + if not url: + return [] + if page > 1: + url = f"{url}?{urlencode({'page': str(page)})}" + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload) + _log_debug_line(f"parse_ng_state_movies_page:page={page} count={len(movies)}") + if has_more is not None: + self._new_titles_has_more_by_page[page] = bool(has_more) + elif current_page is not None and int(current_page) != page: + self._new_titles_has_more_by_page[page] = False + if movies: + _log_titles(movies, context=f"new_titles_page={page}") + return movies + self._new_titles_has_more_by_page[page] = False + return [] + except Exception: + return [] + + def new_titles_page(self, page: int) -> List[str]: + """Paged variant: returns titles for `/movies/new?page=`.""" + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + page = max(1, int(page or 1)) + movies = self._fetch_new_titles_movies_page(page) + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + if movie.title in seen: + continue + seen.add(movie.title) + self._id_by_title[movie.title] = movie.id + titles.append(movie.title) + return titles + + def new_titles_has_more(self, page: int) -> bool: + """Tells the UI whether `/movies/new` has a next page after `page`.""" + page = max(1, int(page or 1)) + cached = self._new_titles_has_more_by_page.get(page) + if cached is not None: + return bool(cached) + # Load page to fill cache. + _ = self._fetch_new_titles_movies_page(page) + return bool(self._new_titles_has_more_by_page.get(page, False)) + + def _fetch_search_movies(self, query: str) -> List[MovieItem]: + query = (query or "").strip() + if not query: + return [] + + # Parse ng-state from /search page HTML. + url = self._search_url(query) + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + results = _parse_ng_state_search_results(payload) + return _filter_movies_by_title(query, results) + except Exception: + return [] + + def _load_movies(self) -> List[MovieItem]: + return self._fetch_index_movies() + + def _ensure_genre_index(self) -> None: + if self._genre_id_by_name: + return + # Prefer direct JSON API (simpler): GET /api/genres -> [{"id":..,"name":..}, ...] + api_url = self._api_genres_url() + if api_url: + try: + _log_url(api_url, kind="GET") + _notify_url(api_url) + sess = self._get_session() + resp = sess.get(api_url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or api_url, kind="OK") + payload = resp.json() + if isinstance(payload, list): + parsed: Dict[str, int] = {} + for item in payload: + if not isinstance(item, dict): + continue + name = str(item.get("name") or "").strip() + if not name: + continue + try: + gid = int(item.get("id")) + except Exception: + continue + if gid > 0: + parsed[name] = gid + if parsed: + self._genre_id_by_name.clear() + self._genre_id_by_name.update(parsed) + return + except Exception: + pass + + # Fallback: parse ng-state from HTML /genres page. + url = self._genres_url() + if not url: + return + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + parsed = _parse_ng_state_genres(payload) + if parsed: + self._genre_id_by_name.clear() + self._genre_id_by_name.update(parsed) + except Exception: + return + + async def search_titles(self, query: str) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + query = (query or "").strip() + if not query: + return [] + if not self._get_base_url(): + return [] + + movies = self._fetch_search_movies(query) + if not movies: + movies = _filter_movies_by_title(query, self._load_movies()) + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + if movie.title in seen: + continue + seen.add(movie.title) + self._id_by_title[movie.title] = movie.id + titles.append(movie.title) + titles.sort(key=lambda value: value.casefold()) + return titles + + def genres(self) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + self._ensure_genre_index() + return sorted(self._genre_id_by_name.keys(), key=lambda value: value.casefold()) + + def titles_for_genre(self, genre: str) -> List[str]: + # Backwards compatible (first page only); paging handled via titles_for_genre_page(). + titles = self.titles_for_genre_page(genre, 1) + titles.sort(key=lambda value: value.casefold()) + return titles + + def titles_for_genre_page(self, genre: str, page: int) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._get_base_url(): + return [] + self._ensure_genre_index() + genre_id = self._genre_id_by_name.get(genre) + if not genre_id: + return [] + # Do NOT use `/api/movies?withGenres=...` directly: on some deployments it returns + # a mismatched/unfiltered dataset. Instead parse the server-rendered genre page + # `/genres/` which embeds the correct data in ng-state. + url = self._genre_page_url(genre_id=int(genre_id), page=max(1, int(page or 1))) + if not url: + return [] + try: + _log_url(url, kind="GET") + _notify_url(url) + sess = self._get_session() + resp = sess.get(url, headers=HEADERS, timeout=20) + resp.raise_for_status() + _log_url(resp.url or url, kind="OK") + _log_response_html(resp.url or url, resp.text) + payload = _extract_ng_state_payload(resp.text) + except Exception: + return [] + if not isinstance(payload, dict): + return [] + + movies, has_more, current_page = _parse_ng_state_movies_with_pagination(payload) + page = max(1, int(page or 1)) + if has_more is not None: + self._genre_has_more_by_id_page[(int(genre_id), page)] = bool(has_more) + elif current_page is not None and int(current_page) != page: + # Defensive: if the page param wasn't honored, avoid showing "next". + self._genre_has_more_by_id_page[(int(genre_id), page)] = False + + titles: List[str] = [] + seen: set[str] = set() + for movie in movies: + title = (movie.title or "").strip() + if not title or title in seen: + continue + seen.add(title) + if movie.id > 0: + self._id_by_title[title] = int(movie.id) + titles.append(title) + return titles + + def genre_has_more(self, genre: str, page: int) -> bool: + """Optional: tells the UI whether a genre has more pages after `page`.""" + genre = (genre or "").strip() + if not genre: + return False + self._ensure_genre_index() + genre_id = self._genre_id_by_name.get(genre) + if not genre_id: + return False + page = max(1, int(page or 1)) + cached = self._genre_has_more_by_id_page.get((int(genre_id), page)) + if cached is not None: + return bool(cached) + # If the page wasn't loaded yet, load it (fills the cache) and then report. + _ = self.titles_for_genre_page(genre, page) + return bool(self._genre_has_more_by_id_page.get((int(genre_id), page), False)) + + def seasons_for(self, title: str) -> List[str]: + # Beim Öffnen eines Titels: Detailseite anhand der ID abrufen (HTML) und cachen. + title = (title or "").strip() + if not title: + return [] + movie_id = self._ensure_title_id(title) + if movie_id is not None: + self._fetch_movie_detail(movie_id) + if _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + # Playback: expose a single "Stream" folder (inside: 1 playable item = Filmtitel). + return ["Stream"] + return ["Details"] + + def episodes_for(self, title: str, season: str) -> List[str]: + season = (season or "").strip() + if season.casefold() == "stream" and _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + title = (title or "").strip() + return [title] if title else [] + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not _get_setting_bool(SETTING_ENABLE_PLAYBACK, default=False): + return None + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + # Backwards compatible: + # - old: Film / Stream + # - new: Stream / + if not title: + return None + if season.casefold() == "film" and episode.casefold() == "stream": + pass + elif season.casefold() == "stream" and (episode == title or episode.casefold() == "stream"): + pass + else: + return None + movie_id = self._ensure_title_id(title) + if movie_id is None: + return None + stream_url = self._watch_stream_url(movie_id) + return stream_url or None + + def resolve_stream_link(self, link: str) -> Optional[str]: + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + return resolve_with_resolveurl(link) or link + return link + + def capabilities(self) -> Set[str]: + return {"new_titles", "genres"} + + def new_titles(self) -> List[str]: + if not REQUESTS_AVAILABLE: + return [] + if not self._get_base_url(): + return [] + # Backwards compatible: first page only. UI uses paging via `new_titles_page`. + return self.new_titles_page(1) diff --git a/dist/plugin.video.viewit/plugins/serienstream_plugin.py b/dist/plugin.video.viewit/plugins/serienstream_plugin.py new file mode 100644 index 0000000..8f139dc --- /dev/null +++ b/dist/plugin.video.viewit/plugins/serienstream_plugin.py @@ -0,0 +1,966 @@ +"""Serienstream (s.to) Integration als Downloader-Plugin. + +Hinweise: +- Diese Integration nutzt optional `requests` + `beautifulsoup4` (bs4). +- In Kodi koennen zusaetzliche Debug-Funktionen ueber Addon-Settings aktiviert werden + (URL-Logging, HTML-Dumps, Benachrichtigungen). +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +import hashlib +import os +import re +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, TypeAlias + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from http_session_pool import get_requests_session +from regex_patterns import SEASON_EPISODE_TAG, SEASON_EPISODE_URL + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +BASE_URL = "https://s.to" +SERIES_BASE_URL = f"{BASE_URL}/serie/stream" +POPULAR_SERIES_URL = f"{BASE_URL}/beliebte-serien" +LATEST_EPISODES_URL = f"{BASE_URL}" +DEFAULT_PREFERRED_HOSTERS = ["voe"] +DEFAULT_TIMEOUT = 20 +ADDON_ID = "plugin.video.viewit" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass +class SeriesResult: + title: str + description: str + url: str + + +@dataclass +class EpisodeInfo: + number: int + title: str + original_title: str + url: str + season_label: str = "" + languages: List[str] = field(default_factory=list) + hosters: List[str] = field(default_factory=list) + + +@dataclass +class LatestEpisode: + series_title: str + season: int + episode: int + url: str + airdate: str + + +@dataclass +class SeasonInfo: + number: int + url: str + episodes: List[EpisodeInfo] + + +def _absolute_url(href: str) -> str: + return f"{BASE_URL}{href}" if href.startswith("/") else href + + +def _normalize_series_url(identifier: str) -> str: + if identifier.startswith("http://") or identifier.startswith("https://"): + return identifier.rstrip("/") + slug = identifier.strip("/") + return f"{SERIES_BASE_URL}/{slug}" + + +def _series_root_url(url: str) -> str: + """Normalisiert eine Serien-URL auf die Root-URL (ohne /staffel-x oder /episode-x).""" + normalized = (url or "").strip().rstrip("/") + normalized = re.sub(r"/staffel-\d+(?:/.*)?$", "", normalized) + normalized = re.sub(r"/episode-\d+(?:/.*)?$", "", normalized) + return normalized.rstrip("/") + + +def _log_visit(url: str) -> None: + _log_url(url, kind="VISIT") + _notify_url(url) + if xbmcaddon is None: + print(f"Visiting: {url}") + + +def _normalize_text(value: str) -> str: + """Legacy normalization (kept for backwards compatibility).""" + value = value.casefold() + value = re.sub(r"[^a-z0-9]+", "", value) + return value + + +def _normalize_search_text(value: str) -> str: + """Normalisiert Text für die Suche ohne Wortgrenzen zu "verschmelzen". + + Wichtig: Wir ersetzen Nicht-Alphanumerisches durch Leerzeichen, statt es zu entfernen. + Dadurch entstehen keine künstlichen Treffer über Wortgrenzen hinweg (z.B. "an" + "na" -> "anna"). + """ + + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _get_setting_bool(setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + +def _notify_url(url: str) -> None: + notify_url(ADDON_ID, heading="Serienstream", url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + +def _log_url(url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="serienstream_urls.log", url=url, kind=kind) + + +def _log_parsed_url(url: str) -> None: + _log_url(url, kind="PARSE") + + +def _log_response_html(url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="s_to_response", + ) + + +def _ensure_requests() -> None: + if requests is None or BeautifulSoup is None: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + + +def _looks_like_cloudflare_challenge(body: str) -> bool: + lower = body.lower() + markers = ( + "cf-browser-verification", + "cf-challenge", + "cf_chl", + "challenge-platform", + "attention required! | cloudflare", + "just a moment...", + "cloudflare ray id", + ) + return any(marker in lower for marker in markers) + + +def _get_soup(url: str, *, session: Optional[RequestsSession] = None) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = session or get_requests_session("serienstream", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def _get_soup_simple(url: str) -> BeautifulSoupT: + _ensure_requests() + _log_visit(url) + sess = get_requests_session("serienstream", headers=HEADERS) + response = sess.get(url, headers=HEADERS, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + if response.url and response.url != url: + _log_url(response.url, kind="REDIRECT") + _log_response_html(url, response.text) + if _looks_like_cloudflare_challenge(response.text): + raise RuntimeError("Cloudflare-Schutz erkannt. requests reicht ggf. nicht aus.") + return BeautifulSoup(response.text, "html.parser") + + +def search_series(query: str) -> List[SeriesResult]: + """Sucht Serien im (/serien)-Katalog (Genre-liste) nach Titel/Alt-Titel.""" + _ensure_requests() + normalized_query = _normalize_search_text(query) + if not normalized_query: + return [] + # Direkter Abruf wie in fetch_serien.py. + catalog_url = f"{BASE_URL}/serien?by=genre" + soup = _get_soup_simple(catalog_url) + results: List[SeriesResult] = [] + for series in parse_series_catalog(soup).values(): + for entry in series: + haystack = _normalize_search_text(entry.title) + if entry.title and normalized_query in haystack: + results.append(entry) + return results + + +def parse_series_catalog(soup: BeautifulSoupT) -> Dict[str, List[SeriesResult]]: + """Parst die Serien-Übersicht (/serien) und liefert Genre -> Serienliste.""" + catalog: Dict[str, List[SeriesResult]] = {} + + # Neues Layout (Stand: 2026-01): Gruppen-Header + Liste. + # - Header: `div.background-1 ...` mit `h3` + # - Einträge: `ul.series-list` -> `li.series-item[data-search]` -> `a[href]` + for header in soup.select("div.background-1 h3"): + group = (header.get_text(strip=True) or "").strip() + if not group: + continue + list_node = header.parent.find_next_sibling("ul", class_="series-list") + if not list_node: + continue + series: List[SeriesResult] = [] + for item in list_node.select("li.series-item"): + anchor = item.find("a", href=True) + if not anchor: + continue + href = (anchor.get("href") or "").strip() + url = _absolute_url(href) + if url: + _log_parsed_url(url) + if ("/serie/" not in url) or "/staffel-" in url or "/episode-" in url: + continue + title = (anchor.get_text(" ", strip=True) or "").strip() + description = (item.get("data-search") or "").strip() + if title: + series.append(SeriesResult(title=title, description=description, url=url)) + if series: + catalog[group] = series + + return catalog + + +def _extract_season_links(soup: BeautifulSoupT) -> List[Tuple[int, str]]: + season_links: List[Tuple[int, str]] = [] + seen_numbers: set[int] = set() + anchors = soup.select("ul.nav.list-items-nav a[data-season-pill][href]") + for anchor in anchors: + href = anchor.get("href") or "" + if "/episode-" in href: + continue + data_number = (anchor.get("data-season-pill") or "").strip() + match = re.search(r"/staffel-(\d+)", href) + if match: + number = int(match.group(1)) + elif data_number.isdigit(): + number = int(data_number) + else: + label = anchor.get_text(strip=True) + if not label.isdigit(): + continue + number = int(label) + if number in seen_numbers: + continue + seen_numbers.add(number) + season_url = _absolute_url(href) + if season_url: + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + return season_links + + +def _extract_number_of_seasons(soup: BeautifulSoupT) -> Optional[int]: + tag = soup.select_one('meta[itemprop="numberOfSeasons"]') + if not tag: + return None + content = (tag.get("content") or "").strip() + if not content.isdigit(): + return None + count = int(content) + return count if count > 0 else None + + +def _extract_canonical_url(soup: BeautifulSoupT, fallback: str) -> str: + canonical = soup.select_one('link[rel="canonical"][href]') + href = (canonical.get("href") if canonical else "") or "" + href = href.strip() + if href.startswith("http://") or href.startswith("https://"): + return href.rstrip("/") + return fallback.rstrip("/") + + +def _extract_episodes(soup: BeautifulSoupT) -> List[EpisodeInfo]: + episodes: List[EpisodeInfo] = [] + season_label = "" + season_header = soup.select_one("section.episode-section h2") or soup.select_one("h2.h3") + if season_header: + season_label = (season_header.get_text(" ", strip=True) or "").strip() + + language_map = { + "german": "DE", + "english": "EN", + "japanese": "JP", + "turkish": "TR", + "spanish": "ES", + "italian": "IT", + "french": "FR", + "korean": "KO", + "russian": "RU", + "polish": "PL", + "portuguese": "PT", + "chinese": "ZH", + "arabic": "AR", + "thai": "TH", + } + # Neues Layout (Stand: 2026-01): Episoden-Tabelle mit Zeilen und onclick-URL. + rows = soup.select("table.episode-table tbody tr.episode-row") + for index, row in enumerate(rows): + onclick = (row.get("onclick") or "").strip() + url = "" + if onclick: + match = re.search(r"location=['\\\"]([^'\\\"]+)['\\\"]", onclick) + if match: + url = _absolute_url(match.group(1)) + if not url: + anchor = row.find("a", href=True) + url = _absolute_url(anchor.get("href")) if anchor else "" + if url: + _log_parsed_url(url) + + number_tag = row.select_one(".episode-number-cell") + number_text = (number_tag.get_text(strip=True) if number_tag else "").strip() + match = re.search(r"/episode-(\d+)", url) if url else None + if match: + number = int(match.group(1)) + else: + digits = "".join(ch for ch in number_text if ch.isdigit()) + number = int(digits) if digits else index + 1 + + title_tag = row.select_one(".episode-title-ger") + original_tag = row.select_one(".episode-title-eng") + title = (title_tag.get_text(strip=True) if title_tag else "").strip() + original_title = (original_tag.get_text(strip=True) if original_tag else "").strip() + if not title: + title = f"Episode {number}" + + hosters: List[str] = [] + for img in row.select(".episode-watch-cell img"): + label = (img.get("alt") or img.get("title") or "").strip() + if label and label not in hosters: + hosters.append(label) + + languages: List[str] = [] + for flag in row.select(".episode-language-cell .watch-language"): + classes = flag.get("class") or [] + if isinstance(classes, str): + classes = classes.split() + for cls in classes: + if cls.startswith("svg-flag-"): + key = cls.replace("svg-flag-", "").strip() + if not key: + continue + value = language_map.get(key, key.upper()) + if value and value not in languages: + languages.append(value) + + episodes.append( + EpisodeInfo( + number=number, + title=title, + original_title=original_title, + url=url, + season_label=season_label, + languages=languages, + hosters=hosters, + ) + ) + if episodes: + return episodes + return episodes + + +def fetch_episode_stream_link( + episode_url: str, + *, + preferred_hosters: Optional[List[str]] = None, +) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(episode_url) + preferred = [hoster.lower() for hoster in (preferred_hosters or DEFAULT_PREFERRED_HOSTERS)] + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(normalized_url, session=session) + candidates: List[Tuple[str, str]] = [] + for button in soup.select("button.link-box[data-play-url]"): + play_url = (button.get("data-play-url") or "").strip() + provider = (button.get("data-provider-name") or "").strip() + url = _absolute_url(play_url) + if url: + _log_parsed_url(url) + if provider and url: + candidates.append((provider, url)) + if not candidates: + return None + for preferred_name in preferred: + for name, url in candidates: + if name.lower() == preferred_name: + return url + return candidates[0][1] + + +def fetch_episode_hoster_names(episode_url: str) -> List[str]: + """Liest die verfügbaren Hoster-Namen für eine Episode aus.""" + _ensure_requests() + normalized_url = _absolute_url(episode_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(normalized_url, session=session) + names: List[str] = [] + seen: set[str] = set() + for button in soup.select("button.link-box[data-provider-name]"): + name = (button.get("data-provider-name") or "").strip() + play_url = (button.get("data-play-url") or "").strip() + url = _absolute_url(play_url) + if url: + _log_parsed_url(url) + key = name.casefold().strip() + if not key or key in seen: + continue + seen.add(key) + names.append(name) + _log_url(name, kind="HOSTER") + if names: + _log_url(f"{normalized_url}#hosters={','.join(names)}", kind="HOSTERS") + return names + + +_LATEST_EPISODE_TAG_RE = re.compile(SEASON_EPISODE_TAG, re.IGNORECASE) +_LATEST_EPISODE_URL_RE = re.compile(SEASON_EPISODE_URL, re.IGNORECASE) + + +def _extract_latest_episodes(soup: BeautifulSoupT) -> List[LatestEpisode]: + """Parst die neuesten Episoden von der Startseite.""" + episodes: List[LatestEpisode] = [] + seen: set[str] = set() + + for anchor in soup.select("a.latest-episode-row[href]"): + href = (anchor.get("href") or "").strip() + if not href or "/serie/" not in href: + continue + url = _absolute_url(href) + if not url: + continue + + title_node = anchor.select_one(".ep-title") + series_title = (title_node.get("title") if title_node else "") or "" + series_title = series_title.strip() or (title_node.get_text(strip=True) if title_node else "").strip() + if not series_title: + continue + + season_text = (anchor.select_one(".ep-season").get_text(strip=True) if anchor.select_one(".ep-season") else "").strip() + episode_text = (anchor.select_one(".ep-episode").get_text(strip=True) if anchor.select_one(".ep-episode") else "").strip() + season_number: Optional[int] = None + episode_number: Optional[int] = None + match = re.search(r"S\\s*(\\d+)", season_text, re.IGNORECASE) + if match: + season_number = int(match.group(1)) + match = re.search(r"E\\s*(\\d+)", episode_text, re.IGNORECASE) + if match: + episode_number = int(match.group(1)) + if season_number is None or episode_number is None: + match = _LATEST_EPISODE_URL_RE.search(href) + if match: + season_number = int(match.group(1)) + episode_number = int(match.group(2)) + if season_number is None or episode_number is None: + continue + + airdate_node = anchor.select_one(".ep-time") + airdate = (airdate_node.get_text(" ", strip=True) if airdate_node else "").strip() + + key = f"{url}\\t{season_number}\\t{episode_number}" + if key in seen: + continue + seen.add(key) + + _log_parsed_url(url) + episodes.append( + LatestEpisode( + series_title=series_title, + season=int(season_number), + episode=int(episode_number), + url=url, + airdate=airdate, + ) + ) + + return episodes + + +def resolve_redirect(target_url: str) -> Optional[str]: + _ensure_requests() + normalized_url = _absolute_url(target_url) + _log_visit(normalized_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight optional: Startseite kann 5xx liefern, Zielseite aber funktionieren. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + response = session.get( + normalized_url, + headers=HEADERS, + timeout=DEFAULT_TIMEOUT, + allow_redirects=True, + ) + if response.url: + _log_url(response.url, kind="RESOLVED") + return response.url if response.url else None + + +def scrape_series_detail( + series_identifier: str, + max_seasons: Optional[int] = None, +) -> List[SeasonInfo]: + _ensure_requests() + series_url = _series_root_url(_normalize_series_url(series_identifier)) + _log_url(series_url, kind="SERIES") + _notify_url(series_url) + session = get_requests_session("serienstream", headers=HEADERS) + # Preflight ist optional; manche Umgebungen/Provider leiten die Startseite um. + try: + _get_soup(BASE_URL, session=session) + except Exception: + pass + soup = _get_soup(series_url, session=session) + + base_series_url = _series_root_url(_extract_canonical_url(soup, series_url)) + season_links = _extract_season_links(soup) + season_count = _extract_number_of_seasons(soup) + if season_count and (not season_links or len(season_links) < season_count): + existing = {number for number, _ in season_links} + for number in range(1, season_count + 1): + if number in existing: + continue + season_url = f"{base_series_url}/staffel-{number}" + _log_parsed_url(season_url) + season_links.append((number, season_url)) + season_links.sort(key=lambda item: item[0]) + if max_seasons is not None: + season_links = season_links[:max_seasons] + seasons: List[SeasonInfo] = [] + for number, url in season_links: + season_soup = _get_soup(url, session=session) + episodes = _extract_episodes(season_soup) + seasons.append(SeasonInfo(number=number, url=url, episodes=episodes)) + seasons.sort(key=lambda s: s.number) + return seasons + + +class SerienstreamPlugin(BasisPlugin): + """Downloader-Plugin, das Serien von s.to ueber requests/bs4 bereitstellt.""" + + name = "Serienstream (s.to)" + POPULAR_GENRE_LABEL = "⭐ Beliebte Serien" + + def __init__(self) -> None: + self._series_results: Dict[str, SeriesResult] = {} + self._season_cache: Dict[str, List[SeasonInfo]] = {} + self._episode_label_cache: Dict[Tuple[str, str], Dict[str, EpisodeInfo]] = {} + self._catalog_cache: Optional[Dict[str, List[SeriesResult]]] = None + self._popular_cache: Optional[List[SeriesResult]] = None + self._requests_available = REQUESTS_AVAILABLE + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self._hoster_cache: Dict[Tuple[str, str, str], List[str]] = {} + self._latest_cache: Dict[int, List[LatestEpisode]] = {} + self._latest_hoster_cache: Dict[str, List[str]] = {} + self.is_available = True + self.unavailable_reason: Optional[str] = None + if not self._requests_available: # pragma: no cover - optional dependency + self.is_available = False + self.unavailable_reason = ( + "requests/bs4 fehlen. Installiere 'requests' und 'beautifulsoup4'." + ) + print( + "SerienstreamPlugin deaktiviert: requests/bs4 fehlen. " + "Installiere 'requests' und 'beautifulsoup4'." + ) + if REQUESTS_IMPORT_ERROR: + print(f"Importfehler: {REQUESTS_IMPORT_ERROR}") + return + + def _ensure_catalog(self) -> Dict[str, List[SeriesResult]]: + if self._catalog_cache is not None: + return self._catalog_cache + # Stand: 2026-01 liefert `?by=genre` konsistente Gruppen für `genres()`. + catalog_url = f"{BASE_URL}/serien?by=genre" + soup = _get_soup_simple(catalog_url) + self._catalog_cache = parse_series_catalog(soup) + return self._catalog_cache + + def genres(self) -> List[str]: + """Optional: Liefert alle Genres aus dem Serien-Katalog.""" + if not self._requests_available: + return [] + catalog = self._ensure_catalog() + return sorted(catalog.keys(), key=str.casefold) + + def capabilities(self) -> set[str]: + """Meldet unterstützte Features für Router-Menüs.""" + return {"popular_series", "genres", "latest_episodes"} + + def popular_series(self) -> List[str]: + """Liefert die Titel der beliebten Serien (Quelle: `/beliebte-serien`).""" + if not self._requests_available: + return [] + entries = self._ensure_popular() + self._series_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def titles_for_genre(self, genre: str) -> List[str]: + """Optional: Liefert Titel für ein Genre.""" + if not self._requests_available: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if genre == self.POPULAR_GENRE_LABEL: + return self.popular_series() + catalog = self._ensure_catalog() + entries = catalog.get(genre, []) + self._series_results.update({entry.title: entry for entry in entries if entry.title}) + return [entry.title for entry in entries if entry.title] + + def _ensure_popular(self) -> List[SeriesResult]: + """Laedt und cached die Liste der beliebten Serien aus `/beliebte-serien`.""" + if self._popular_cache is not None: + return list(self._popular_cache) + soup = _get_soup_simple(POPULAR_SERIES_URL) + results: List[SeriesResult] = [] + seen: set[str] = set() + + # Neues Layout (Stand: 2026-01): Abschnitt "Meistgesehen" hat Karten mit + # `a.show-card` und Titel im `img alt=...`. + anchors = None + for section in soup.select("div.mb-5"): + h2 = section.select_one("h2") + label = (h2.get_text(" ", strip=True) if h2 else "").casefold() + if "meistgesehen" in label: + anchors = section.select("a.show-card[href]") + break + if anchors is None: + anchors = soup.select("a.show-card[href]") + + for anchor in anchors: + href = (anchor.get("href") or "").strip() + if not href or "/serie/" not in href: + continue + img = anchor.select_one("img[alt]") + title = ((img.get("alt") if img else "") or "").strip() + if not title or title in seen: + continue + url = _absolute_url(href).split("#", 1)[0].split("?", 1)[0].rstrip("/") + url = re.sub(r"/staffel-\\d+(?:/.*)?$", "", url).rstrip("/") + if not url: + continue + _log_parsed_url(url) + seen.add(title) + results.append(SeriesResult(title=title, description="", url=url)) + + + self._popular_cache = list(results) + return list(results) + + @staticmethod + def _season_label(number: int) -> str: + return f"Staffel {number}" + + @staticmethod + def _episode_label(info: EpisodeInfo) -> str: + suffix_parts: List[str] = [] + if info.original_title: + suffix_parts.append(info.original_title) + # Staffel nicht im Episoden-Label anzeigen (wird im UI bereits gesetzt). + suffix = f" ({' | '.join(suffix_parts)})" if suffix_parts else "" + + return f"Episode {info.number}: {info.title}{suffix}" + + @staticmethod + def _parse_season_number(label: str) -> Optional[int]: + digits = "".join(ch for ch in label if ch.isdigit()) + if not digits: + return None + return int(digits) + + def _clear_episode_cache_for_title(self, title: str) -> None: + keys_to_remove = [key for key in self._episode_label_cache if key[0] == title] + for key in keys_to_remove: + self._episode_label_cache.pop(key, None) + keys_to_remove = [key for key in self._hoster_cache if key[0] == title] + for key in keys_to_remove: + self._hoster_cache.pop(key, None) + + def _cache_episode_labels(self, title: str, season_label: str, season_info: SeasonInfo) -> None: + cache_key = (title, season_label) + self._episode_label_cache[cache_key] = { + self._episode_label(info): info for info in season_info.episodes + } + + def _lookup_episode(self, title: str, season_label: str, episode_label: str) -> Optional[EpisodeInfo]: + cache_key = (title, season_label) + cached = self._episode_label_cache.get(cache_key) + if cached: + return cached.get(episode_label) + + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season_label) + if number is None: + return None + + for season_info in seasons: + if season_info.number == number: + self._cache_episode_labels(title, season_label, season_info) + return self._episode_label_cache.get(cache_key, {}).get(episode_label) + return None + + async def search_titles(self, query: str) -> List[str]: + query = query.strip() + if not query: + self._series_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._catalog_cache = None + return [] + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 nicht suchen.") + try: + # Nutzt den Katalog (/serien), der jetzt nach Genres gruppiert ist. + # Alternativ gäbe es ein Ajax-Endpoint, aber der ist nicht immer zuverlässig erreichbar. + results = search_series(query) + except Exception as exc: # pragma: no cover - defensive logging + self._series_results.clear() + self._season_cache.clear() + self._episode_label_cache.clear() + self._catalog_cache = None + raise RuntimeError(f"Serienstream-Suche fehlgeschlagen: {exc}") from exc + self._series_results = {result.title: result for result in results} + self._season_cache.clear() + self._episode_label_cache.clear() + return [result.title for result in results] + + def _ensure_seasons(self, title: str) -> List[SeasonInfo]: + if title in self._season_cache: + seasons = self._season_cache[title] + # Auch bei Cache-Treffern die URLs loggen, damit nachvollziehbar bleibt, + # welche Seiten für Staffel-/Episodenlisten relevant sind. + if _get_setting_bool(GLOBAL_SETTING_LOG_URLS, default=False): + series = self._series_results.get(title) + if series and series.url: + _log_url(series.url, kind="CACHE") + for season in seasons: + if season.url: + _log_url(season.url, kind="CACHE") + return seasons + series = self._series_results.get(title) + if not series: + # Kodi startet das Plugin pro Navigation neu -> Such-Cache im RAM geht verloren. + # Daher den Titel erneut im Katalog auflösen, um die Serien-URL zu bekommen. + catalog = self._ensure_catalog() + lookup_key = title.casefold().strip() + for entries in catalog.values(): + for entry in entries: + if entry.title.casefold().strip() == lookup_key: + series = entry + self._series_results[entry.title] = entry + break + if series: + break + if not series: + return [] + try: + seasons = scrape_series_detail(series.url) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Serienstream-Staffeln konnten nicht geladen werden: {exc}") from exc + self._clear_episode_cache_for_title(title) + self._season_cache[title] = seasons + return seasons + + def seasons_for(self, title: str) -> List[str]: + seasons = self._ensure_seasons(title) + # Serienstream liefert gelegentlich Staffeln ohne Episoden (z.B. Parsing-/Layoutwechsel). + # Diese sollen im UI nicht als auswählbarer Menüpunkt erscheinen. + return [self._season_label(season.number) for season in seasons if season.episodes] + + def episodes_for(self, title: str, season: str) -> List[str]: + seasons = self._ensure_seasons(title) + number = self._parse_season_number(season) + if number is None: + return [] + for season_info in seasons: + if season_info.number == number: + labels = [self._episode_label(info) for info in season_info.episodes] + self._cache_episode_labels(title, season, season_info) + return labels + return [] + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return None + try: + link = fetch_episode_stream_link( + episode_info.url, + preferred_hosters=self._preferred_hosters, + ) + if link: + _log_url(link, kind="FOUND") + return link + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.") + cache_key = (title, season, episode) + cached = self._hoster_cache.get(cache_key) + if cached is not None: + return list(cached) + + episode_info = self._lookup_episode(title, season, episode) + if not episode_info: + return [] + try: + names = fetch_episode_hoster_names(episode_info.url) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Hoster konnten nicht geladen werden: {exc}") from exc + self._hoster_cache[cache_key] = list(names) + return list(names) + + def latest_episodes(self, page: int = 1) -> List[LatestEpisode]: + """Liefert die neuesten Episoden aus `/neue-episoden`.""" + if not self._requests_available: + return [] + try: + page = int(page or 1) + except Exception: + page = 1 + page = max(1, page) + cached = self._latest_cache.get(page) + if cached is not None: + return list(cached) + + url = LATEST_EPISODES_URL + if page > 1: + url = f"{url}?page={page}" + soup = _get_soup_simple(url) + episodes = _extract_latest_episodes(soup) + self._latest_cache[page] = list(episodes) + return list(episodes) + + def available_hosters_for_url(self, episode_url: str) -> List[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Hoster laden.") + normalized = _absolute_url(episode_url) + cached = self._latest_hoster_cache.get(normalized) + if cached is not None: + return list(cached) + try: + names = fetch_episode_hoster_names(normalized) + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Hoster konnten nicht geladen werden: {exc}") from exc + self._latest_hoster_cache[normalized] = list(names) + return list(names) + + def stream_link_for_url(self, episode_url: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links liefern.") + normalized = _absolute_url(episode_url) + try: + link = fetch_episode_stream_link( + normalized, + preferred_hosters=self._preferred_hosters, + ) + if link: + _log_url(link, kind="FOUND") + return link + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht geladen werden: {exc}") from exc + + def resolve_stream_link(self, link: str) -> Optional[str]: + if not self._requests_available: + raise RuntimeError("SerienstreamPlugin kann ohne requests/bs4 keine Stream-Links aufloesen.") + try: + resolved = resolve_redirect(link) + if not resolved: + return None + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved_by_resolveurl = resolve_with_resolveurl(resolved) + if resolved_by_resolveurl: + _log_url("ResolveURL", kind="HOSTER_RESOLVER") + _log_url(resolved_by_resolveurl, kind="MEDIA") + return resolved_by_resolveurl + _log_url(resolved, kind="FINAL") + return resolved + except Exception as exc: # pragma: no cover - defensive logging + raise RuntimeError(f"Stream-Link konnte nicht verfolgt werden: {exc}") from exc + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + +# Alias für die automatische Plugin-Erkennung. +Plugin = SerienstreamPlugin diff --git a/dist/plugin.video.viewit/plugins/topstreamfilm_plugin.py b/dist/plugin.video.viewit/plugins/topstreamfilm_plugin.py new file mode 100644 index 0000000..7e03ebc --- /dev/null +++ b/dist/plugin.video.viewit/plugins/topstreamfilm_plugin.py @@ -0,0 +1,1027 @@ +"""HTML-basierte Integration fuer eine Streaming-/Mediathek-Seite (Template). + +Dieses Plugin ist als Startpunkt gedacht, um eine eigene/autorisiert betriebene +Seite mit einer HTML-Suche in ViewIt einzubinden. + +Hinweise: +- Nutzt optional `requests` + `beautifulsoup4` (bs4). +- `search_titles` liefert eine Trefferliste (Titel-Strings). +- `seasons_for` / `episodes_for` können für Filme als Single-Season/Single-Episode + modelliert werden (z.B. Staffel 1, Episode 1) oder komplett leer bleiben, + solange nur Serien unterstützt werden. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +import hashlib +import os +import re +import json +from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeAlias +from urllib.parse import urlencode, urljoin + +try: # pragma: no cover - optional dependency + import requests + from bs4 import BeautifulSoup # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - optional dependency + requests = None + BeautifulSoup = None + REQUESTS_AVAILABLE = False + REQUESTS_IMPORT_ERROR = exc +else: + REQUESTS_AVAILABLE = True + REQUESTS_IMPORT_ERROR = None + +try: # pragma: no cover - optional Kodi helpers + import xbmcaddon # type: ignore[import-not-found] + import xbmcvfs # type: ignore[import-not-found] + import xbmcgui # type: ignore[import-not-found] +except ImportError: # pragma: no cover - allow running outside Kodi + xbmcaddon = None + xbmcvfs = None + xbmcgui = None + +from plugin_interface import BasisPlugin +from plugin_helpers import dump_response_html, get_setting_bool, log_url, notify_url +from regex_patterns import DIGITS + +if TYPE_CHECKING: # pragma: no cover + from requests import Session as RequestsSession + from bs4 import BeautifulSoup as BeautifulSoupT # type: ignore[import-not-found] +else: # pragma: no cover + RequestsSession: TypeAlias = Any + BeautifulSoupT: TypeAlias = Any + + +ADDON_ID = "plugin.video.viewit" +SETTING_BASE_URL = "topstream_base_url" +DEFAULT_BASE_URL = "https://www.meineseite" +GLOBAL_SETTING_LOG_URLS = "debug_log_urls" +GLOBAL_SETTING_DUMP_HTML = "debug_dump_html" +GLOBAL_SETTING_SHOW_URL_INFO = "debug_show_url_info" +SETTING_GENRE_MAX_PAGES = "topstream_genre_max_pages" +DEFAULT_TIMEOUT = 20 +DEFAULT_PREFERRED_HOSTERS = ["supervideo", "dropload", "voe"] +MEINECLOUD_HOST = "meinecloud.click" +DEFAULT_GENRE_MAX_PAGES = 20 +HARD_MAX_GENRE_PAGES = 200 +HEADERS = { + "User-Agent": "Mozilla/5.0 (Kodi; ViewIt) AppleWebKit/537.36 (KHTML, like Gecko)", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", + "Accept-Language": "de-DE,de;q=0.9,en;q=0.8", + "Connection": "keep-alive", +} + + +@dataclass(frozen=True) +class SearchHit: + """Interner Treffer mit Title + URL.""" + + title: str + url: str + description: str = "" + + +def _normalize_search_text(value: str) -> str: + """Normalisiert Text für robuste, wortbasierte Suche/Filter. + + Wir ersetzen Nicht-Alphanumerisches durch Leerzeichen und kollabieren Whitespace. + Dadurch kann z.B. "Star Trek: Lower Decks – Der Film" sauber auf Tokens gematcht werden. + """ + + value = (value or "").casefold() + value = re.sub(r"[^a-z0-9]+", " ", value) + value = re.sub(r"\s+", " ", value).strip() + return value + + +def _matches_query(query: str, *, title: str, description: str) -> bool: + normalized_query = _normalize_search_text(query) + if not normalized_query: + return False + haystack = _normalize_search_text(title) + if not haystack: + return False + return normalized_query in haystack + + +def _strip_der_film_suffix(title: str) -> str: + """Entfernt den Suffix 'Der Film' am Ende, z.B. 'Star Trek – Der Film'.""" + title = (title or "").strip() + if not title: + return "" + title = re.sub(r"\s*[-–]\s*der\s+film\s*$", "", title, flags=re.IGNORECASE).strip() + return title + + +class TopstreamfilmPlugin(BasisPlugin): + """Integration fuer eine HTML-basierte Suchseite.""" + + name = "TopStreamFilm" + + def __init__(self) -> None: + self._session: RequestsSession | None = None + self._title_to_url: Dict[str, str] = {} + self._genre_to_url: Dict[str, str] = {} + self._movie_iframe_url: Dict[str, str] = {} + self._movie_title_hint: set[str] = set() + self._genre_last_page: Dict[str, int] = {} + self._season_cache: Dict[str, List[str]] = {} + self._episode_cache: Dict[tuple[str, str], List[str]] = {} + self._episode_to_url: Dict[tuple[str, str, str], str] = {} + self._episode_to_hosters: Dict[tuple[str, str, str], Dict[str, str]] = {} + self._season_to_episode_numbers: Dict[tuple[str, str], List[int]] = {} + self._episode_title_by_number: Dict[tuple[str, int, int], str] = {} + self._detail_html_cache: Dict[str, str] = {} + self._popular_cache: List[str] | None = None + self._default_preferred_hosters: List[str] = list(DEFAULT_PREFERRED_HOSTERS) + self._preferred_hosters: List[str] = list(self._default_preferred_hosters) + self.is_available = REQUESTS_AVAILABLE + self.unavailable_reason = None if REQUESTS_AVAILABLE else f"requests/bs4 fehlen: {REQUESTS_IMPORT_ERROR}" + self._load_title_url_cache() + self._load_genre_cache() + + def _cache_dir(self) -> str: + if xbmcaddon and xbmcvfs: + try: + addon = xbmcaddon.Addon(ADDON_ID) + profile = xbmcvfs.translatePath(addon.getAddonInfo("profile")) + if not xbmcvfs.exists(profile): + xbmcvfs.mkdirs(profile) + return profile + except Exception: + pass + return os.path.dirname(__file__) + + def _title_url_cache_path(self) -> str: + return os.path.join(self._cache_dir(), "topstream_title_url_cache.json") + + def _load_title_url_cache(self) -> None: + path = self._title_url_cache_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + else: + return + loaded = json.loads(raw or "{}") + if isinstance(loaded, dict): + # New format: {base_url: {title: url}} + base_url = self._get_base_url() + if base_url in loaded and isinstance(loaded.get(base_url), dict): + loaded = loaded.get(base_url) or {} + # Backwards compatible: {title: url} + for title, url in (loaded or {}).items(): + if isinstance(title, str) and isinstance(url, str) and title.strip() and url.strip(): + self._title_to_url.setdefault(title.strip(), url.strip()) + except Exception: + return + + def _save_title_url_cache(self) -> None: + path = self._title_url_cache_path() + try: + base_url = self._get_base_url() + store: Dict[str, Dict[str, str]] = {} + # merge with existing + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + existing_raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + existing_raw = handle.read() + else: + existing_raw = "" + existing = json.loads(existing_raw or "{}") + if isinstance(existing, dict): + if all(isinstance(k, str) and isinstance(v, dict) for k, v in existing.items()): + store = {k: dict(v) for k, v in existing.items()} # type: ignore[arg-type] + except Exception: + store = {} + + store[base_url] = dict(self._title_to_url) + payload = json.dumps(store, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcaddon and xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + def _genre_cache_path(self) -> str: + return os.path.join(self._cache_dir(), "topstream_genres_cache.json") + + def _load_genre_cache(self) -> None: + path = self._genre_cache_path() + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + raw = handle.read() + else: + return + loaded = json.loads(raw or "{}") + if isinstance(loaded, dict): + base_url = self._get_base_url() + mapping = loaded.get(base_url) + if isinstance(mapping, dict): + for genre, url in mapping.items(): + if isinstance(genre, str) and isinstance(url, str) and genre.strip() and url.strip(): + self._genre_to_url.setdefault(genre.strip(), url.strip()) + except Exception: + return + + def _save_genre_cache(self) -> None: + path = self._genre_cache_path() + try: + base_url = self._get_base_url() + store: Dict[str, Dict[str, str]] = {} + try: + if xbmcvfs and xbmcvfs.exists(path): + handle = xbmcvfs.File(path) + existing_raw = handle.read() + handle.close() + elif os.path.exists(path): + with open(path, "r", encoding="utf-8") as handle: + existing_raw = handle.read() + else: + existing_raw = "" + existing = json.loads(existing_raw or "{}") + if isinstance(existing, dict): + if all(isinstance(k, str) and isinstance(v, dict) for k, v in existing.items()): + store = {k: dict(v) for k, v in existing.items()} # type: ignore[arg-type] + except Exception: + store = {} + store[base_url] = dict(self._genre_to_url) + payload = json.dumps(store, ensure_ascii=False, sort_keys=True) + except Exception: + return + try: + if xbmcaddon and xbmcvfs: + directory = os.path.dirname(path) + if directory and not xbmcvfs.exists(directory): + xbmcvfs.mkdirs(directory) + handle = xbmcvfs.File(path, "w") + handle.write(payload) + handle.close() + else: + with open(path, "w", encoding="utf-8") as handle: + handle.write(payload) + except Exception: + return + + def _get_session(self) -> RequestsSession: + if requests is None: + raise RuntimeError(self.unavailable_reason or "requests nicht verfügbar.") + if self._session is None: + session = requests.Session() + session.headers.update(HEADERS) + self._session = session + return self._session + + def _get_base_url(self) -> str: + base = DEFAULT_BASE_URL + if xbmcaddon is not None: + try: + addon = xbmcaddon.Addon(ADDON_ID) + raw = (addon.getSetting(SETTING_BASE_URL) or "").strip() + if raw: + base = raw + except Exception: + pass + base = (base or "").strip() + if not base: + return DEFAULT_BASE_URL + if not base.startswith("http://") and not base.startswith("https://"): + base = "https://" + base + return base.rstrip("/") + + def _absolute_url(self, href: str) -> str: + return urljoin(self._get_base_url() + "/", href or "") + + @staticmethod + def _absolute_external_url(href: str, *, base: str = "") -> str: + href = (href or "").strip() + if not href: + return "" + if href.startswith("//"): + return "https:" + href + if href.startswith("http://") or href.startswith("https://"): + return href + if base: + return urljoin(base if base.endswith("/") else base + "/", href) + return href + + def _get_setting_bool(self, setting_id: str, *, default: bool = False) -> bool: + return get_setting_bool(ADDON_ID, setting_id, default=default) + + def _get_setting_int(self, setting_id: str, *, default: int) -> int: + if xbmcaddon is None: + return default + try: + addon = xbmcaddon.Addon(ADDON_ID) + getter = getattr(addon, "getSettingInt", None) + if callable(getter): + return int(getter(setting_id)) + raw = str(addon.getSetting(setting_id) or "").strip() + return int(raw) if raw else default + except Exception: + return default + + def _notify_url(self, url: str) -> None: + notify_url(ADDON_ID, heading=self.name, url=url, enabled_setting_id=GLOBAL_SETTING_SHOW_URL_INFO) + + def _log_url(self, url: str, *, kind: str = "VISIT") -> None: + log_url(ADDON_ID, enabled_setting_id=GLOBAL_SETTING_LOG_URLS, log_filename="topstream_urls.log", url=url, kind=kind) + + def _log_response_html(self, url: str, body: str) -> None: + dump_response_html( + ADDON_ID, + enabled_setting_id=GLOBAL_SETTING_DUMP_HTML, + url=url, + body=body, + filename_prefix="topstream_response", + ) + + def capabilities(self) -> set[str]: + return {"genres", "popular_series"} + + def _popular_url(self) -> str: + return self._absolute_url("/beliebte-filme-online.html") + + def popular_series(self) -> List[str]: + """Liefert die "Meist gesehen"/"Beliebte Filme" Liste. + + Quelle: `/beliebte-filme-online.html` (TopStreamFilm Template). + """ + if self._popular_cache is not None: + return list(self._popular_cache) + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + self._popular_cache = [] + return [] + try: + soup = self._get_soup(self._popular_url()) + except Exception: + self._popular_cache = [] + return [] + + hits = self._parse_listing_titles(soup) + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if not hit.title or hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + if titles: + self._save_title_url_cache() + self._popular_cache = list(titles) + return list(titles) + + def _parse_genres_from_home(self, soup: BeautifulSoupT) -> Dict[str, str]: + genres: Dict[str, str] = {} + if soup is None: + return genres + + # Primär: im Header-Menü unter "KATEGORIEN" + categories_anchor = None + for anchor in soup.select("li.menu-item-has-children a"): + text = (anchor.get_text(" ", strip=True) or "").strip().casefold() + if text == "kategorien": + categories_anchor = anchor + break + if categories_anchor is not None: + try: + parent = categories_anchor.find_parent("li") + except Exception: + parent = None + if parent is not None: + for anchor in parent.select("ul.sub-menu li.cat-item a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + genres[name] = self._absolute_url(href) + + # Fallback: allgemeine cat-item Links (falls Theme anders ist) + if not genres: + for anchor in soup.select("li.cat-item a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + genres[name] = self._absolute_url(href) + + return genres + + def _extract_first_int(self, value: str) -> Optional[int]: + match = re.search(DIGITS, value or "") + return int(match.group(1)) if match else None + + def _strip_links_text(self, node: Any) -> str: + """Extrahiert den Text eines Nodes ohne Linktexte/URLs.""" + if BeautifulSoup is None: + return "" + try: + fragment = BeautifulSoup(str(node), "html.parser") + for anchor in fragment.select("a"): + anchor.extract() + return (fragment.get_text(" ", strip=True) or "").strip() + except Exception: + return "" + + def _clear_stream_index_for_title(self, title: str) -> None: + for key in list(self._season_to_episode_numbers.keys()): + if key[0] == title: + self._season_to_episode_numbers.pop(key, None) + for key in list(self._episode_to_hosters.keys()): + if key[0] == title: + self._episode_to_hosters.pop(key, None) + for key in list(self._episode_title_by_number.keys()): + if key[0] == title: + self._episode_title_by_number.pop(key, None) + + def _parse_stream_accordion(self, soup: BeautifulSoupT, *, title: str) -> None: + """Parst Staffel/Episode/Hoster-Links aus der Detailseite (Accordion).""" + if not soup or not title: + return + + accordion = soup.select_one("#se-accordion") or soup.select_one(".su-accordion#se-accordion") + if accordion is None: + return + + self._clear_stream_index_for_title(title) + + for spoiler in accordion.select(".su-spoiler"): + season_title = spoiler.select_one(".su-spoiler-title") + if not season_title: + continue + + season_text = (season_title.get_text(" ", strip=True) or "").strip() + season_number = self._extract_first_int(season_text) + if season_number is None: + continue + season_label = f"Staffel {season_number}" + + data_target = (season_title.get("data-target") or "").strip() + content = spoiler.select_one(data_target) if data_target.startswith("#") else None + if content is None: + content = spoiler.select_one(".su-spoiler-content") + if content is None: + continue + + episode_numbers: set[int] = set() + for row in content.select(".cu-ss"): + raw_text = self._strip_links_text(row) + raw_text = (raw_text or "").strip() + if not raw_text: + continue + + match = re.search( + r"(?P\d+)\s*x\s*(?P\d+)\s*(?P.*)$", + raw_text, + flags=re.IGNORECASE, + ) + if not match: + continue + row_season = int(match.group("s")) + episode_number = int(match.group("e")) + if row_season != season_number: + continue + + rest = (match.group("rest") or "").strip().replace("–", "-") + # Links stehen als
im HTML, d.h. hier bleibt normalerweise nur "Episode X –" übrig. + if "-" in rest: + rest = rest.split("-", 1)[0].strip() + rest = re.sub(r"\bepisode\s*\d+\b", "", rest, flags=re.IGNORECASE).strip() + rest = re.sub(r"^\W+|\W+$", "", rest).strip() + if rest: + self._episode_title_by_number[(title, season_number, episode_number)] = rest + + hosters: Dict[str, str] = {} + for anchor in row.select("a[href]"): + name = (anchor.get_text(" ", strip=True) or "").strip() + href = (anchor.get("href") or "").strip() + if not name or not href: + continue + hosters[name] = href + if not hosters: + continue + + episode_label = f"Episode {episode_number}" + ep_title = self._episode_title_by_number.get((title, season_number, episode_number), "") + if ep_title: + episode_label = f"Episode {episode_number}: {ep_title}" + + self._episode_to_hosters[(title, season_label, episode_label)] = hosters + episode_numbers.add(episode_number) + + self._season_to_episode_numbers[(title, season_label)] = sorted(episode_numbers) + + def _ensure_stream_index(self, title: str) -> None: + """Stellt sicher, dass Staffel/Episoden/Hoster aus der Detailseite geparst sind.""" + title = (title or "").strip() + if not title: + return + # Wenn bereits Staffeln im Index sind, nichts tun. + if any(key[0] == title for key in self._season_to_episode_numbers.keys()): + return + soup = self._get_detail_soup(title) + if soup is None: + return + self._parse_stream_accordion(soup, title=title) + + def _get_soup(self, url: str) -> BeautifulSoupT: + if BeautifulSoup is None or not REQUESTS_AVAILABLE: + raise RuntimeError("requests/bs4 sind nicht verfuegbar.") + session = self._get_session() + self._log_url(url, kind="VISIT") + self._notify_url(url) + response = session.get(url, timeout=DEFAULT_TIMEOUT) + response.raise_for_status() + self._log_url(response.url, kind="OK") + self._log_response_html(response.url, response.text) + return BeautifulSoup(response.text, "html.parser") + + def _get_detail_soup(self, title: str) -> Optional[BeautifulSoupT]: + title = (title or "").strip() + if not title: + return None + url = self._title_to_url.get(title) + if not url: + return None + if BeautifulSoup is None or not REQUESTS_AVAILABLE: + return None + cached_html = self._detail_html_cache.get(title) + if cached_html: + return BeautifulSoup(cached_html, "html.parser") + soup = self._get_soup(url) + try: + self._detail_html_cache[title] = str(soup) + except Exception: + pass + return soup + + def _detect_movie_iframe_url(self, soup: BeautifulSoupT) -> str: + """Erkennt Film-Detailseiten über eingebettetes MeineCloud-iframe.""" + if not soup: + return "" + for frame in soup.select("iframe[src]"): + src = (frame.get("src") or "").strip() + if not src: + continue + if MEINECLOUD_HOST in src: + return src + return "" + + def _parse_meinecloud_hosters(self, soup: BeautifulSoupT, *, page_url: str) -> Dict[str, str]: + """Parst Hoster-Mirrors aus MeineCloud (Film-Seite). + + Beispiel: +
    +
  • supervideo
  • +
  • dropload
  • +
  • 4K Server
  • +
+ """ + + hosters: Dict[str, str] = {} + if not soup: + return hosters + + for entry in soup.select("ul._player-mirrors li[data-link]"): + raw_link = (entry.get("data-link") or "").strip() + if not raw_link: + continue + name = (entry.get_text(" ", strip=True) or "").strip() + name = name or "Hoster" + url = self._absolute_external_url(raw_link, base=page_url) + if not url: + continue + hosters[name] = url + + # Falls "4K Server" wieder auf eine MeineCloud-Seite zeigt, versuchen wir einmal zu expandieren. + expanded: Dict[str, str] = {} + for name, url in list(hosters.items()): + if MEINECLOUD_HOST in url and "/fullhd/" in url: + try: + nested = self._get_soup(url) + except Exception: + continue + nested_hosters = self._parse_meinecloud_hosters(nested, page_url=url) + for nested_name, nested_url in nested_hosters.items(): + expanded.setdefault(nested_name, nested_url) + if expanded: + hosters.update(expanded) + + return hosters + + def _extract_last_page(self, soup: BeautifulSoupT) -> int: + """Liest aus `div.wp-pagenavi` die höchste Seitenzahl.""" + if not soup: + return 1 + numbers: List[int] = [] + for anchor in soup.select("div.wp-pagenavi a"): + text = (anchor.get_text(" ", strip=True) or "").strip() + if text.isdigit(): + try: + numbers.append(int(text)) + except Exception: + continue + return max(numbers) if numbers else 1 + + def _parse_listing_titles(self, soup: BeautifulSoupT) -> List[SearchHit]: + hits: List[SearchHit] = [] + if not soup: + return hits + for item in soup.select("li.TPostMv"): + anchor = item.select_one("a[href]") + if not anchor: + continue + href = (anchor.get("href") or "").strip() + if not href: + continue + title_tag = anchor.select_one("h3.Title") + raw_title = title_tag.get_text(" ", strip=True) if title_tag else anchor.get_text(" ", strip=True) + raw_title = (raw_title or "").strip() + is_movie_hint = bool(re.search(r"\bder\s+film\b", raw_title, flags=re.IGNORECASE)) + title = _strip_der_film_suffix(raw_title) + if not title: + continue + if is_movie_hint: + self._movie_title_hint.add(title) + hits.append(SearchHit(title=title, url=self._absolute_url(href), description="")) + return hits + + def is_movie(self, title: str) -> bool: + """Schneller Hint (ohne Detail-Request), ob ein Titel ein Film ist.""" + title = (title or "").strip() + if not title: + return False + if title in self._movie_iframe_url or title in self._movie_title_hint: + return True + # Robust: Detailseite prüfen. + # Laut TopStream-Layout sind Serien-Seiten durch `div.serie-menu` (Staffel-Navigation) + # gekennzeichnet. Fehlt das Element, behandeln wir den Titel als Film. + soup = self._get_detail_soup(title) + if soup is None: + return False + has_seasons = bool(soup.select_one("div.serie-menu") or soup.select_one(".serie-menu")) + return not has_seasons + + def genre_page_count(self, genre: str) -> int: + """Optional: Liefert die letzte Seite eines Genres (Pagination).""" + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return 1 + genre = (genre or "").strip() + if not genre: + return 1 + if genre in self._genre_last_page: + return max(1, int(self._genre_last_page[genre] or 1)) + if not self._genre_to_url: + self.genres() + url = self._genre_to_url.get(genre) + if not url: + return 1 + try: + soup = self._get_soup(url) + except Exception: + return 1 + last_page = self._extract_last_page(soup) + self._genre_last_page[genre] = max(1, int(last_page or 1)) + return self._genre_last_page[genre] + + def titles_for_genre_page(self, genre: str, page: int) -> List[str]: + """Optional: Liefert Titel für ein Genre und eine konkrete Seite.""" + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._genre_to_url: + self.genres() + base_url = self._genre_to_url.get(genre) + if not base_url: + return [] + + page = max(1, int(page or 1)) + if page == 1: + url = base_url + else: + url = urljoin(base_url.rstrip("/") + "/", f"page/{page}/") + + try: + soup = self._get_soup(url) + except Exception: + return [] + + hits = self._parse_listing_titles(soup) + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + if titles: + self._save_title_url_cache() + return titles + + def _ensure_title_index(self, title: str) -> None: + """Stellt sicher, dass Film/Serie-Infos für den Titel geparst sind.""" + title = (title or "").strip() + if not title: + return + + # Bereits bekannt? + if title in self._movie_iframe_url: + return + if any(key[0] == title for key in self._season_to_episode_numbers.keys()): + return + + soup = self._get_detail_soup(title) + if soup is None: + return + + movie_url = self._detect_movie_iframe_url(soup) + if movie_url: + self._movie_iframe_url[title] = movie_url + # Film als Single-Season/Single-Episode abbilden, damit ViewIt navigieren kann. + season_label = "Film" + episode_label = "Stream" + self._season_cache[title] = [season_label] + self._episode_cache[(title, season_label)] = [episode_label] + try: + meinecloud_soup = self._get_soup(movie_url) + hosters = self._parse_meinecloud_hosters(meinecloud_soup, page_url=movie_url) + except Exception: + hosters = {} + self._episode_to_hosters[(title, season_label, episode_label)] = hosters or {"MeineCloud": movie_url} + return + + # Sonst: Serie via Streams-Accordion parsen (falls vorhanden). + self._parse_stream_accordion(soup, title=title) + + async def search_titles(self, query: str) -> List[str]: + """Sucht Titel ueber eine HTML-Suche. + + Erwartetes HTML (Snippet): + - Treffer: `li.TPostMv a[href]` + - Titel: `h3.Title` + """ + + if not REQUESTS_AVAILABLE: + return [] + query = (query or "").strip() + if not query: + return [] + + session = self._get_session() + url = self._get_base_url() + "/" + params = {"story": query, "do": "search", "subaction": "search"} + request_url = f"{url}?{urlencode(params)}" + self._log_url(request_url, kind="GET") + self._notify_url(request_url) + response = session.get( + url, + params=params, + timeout=DEFAULT_TIMEOUT, + ) + response.raise_for_status() + self._log_url(response.url, kind="OK") + self._log_response_html(response.url, response.text) + + if BeautifulSoup is None: + return [] + soup = BeautifulSoup(response.text, "html.parser") + + hits: List[SearchHit] = [] + for item in soup.select("li.TPostMv"): + anchor = item.select_one("a[href]") + if not anchor: + continue + href = (anchor.get("href") or "").strip() + if not href: + continue + title_tag = anchor.select_one("h3.Title") + raw_title = title_tag.get_text(" ", strip=True) if title_tag else anchor.get_text(" ", strip=True) + raw_title = (raw_title or "").strip() + is_movie_hint = bool(re.search(r"\bder\s+film\b", raw_title, flags=re.IGNORECASE)) + title = _strip_der_film_suffix(raw_title) + if not title: + continue + if is_movie_hint: + self._movie_title_hint.add(title) + description_tag = item.select_one(".TPMvCn .Description") + description = description_tag.get_text(" ", strip=True) if description_tag else "" + hit = SearchHit(title=title, url=self._absolute_url(href), description=description) + if _matches_query(query, title=hit.title, description=hit.description): + hits.append(hit) + + # Dedup + mapping fuer Navigation + self._title_to_url.clear() + titles: List[str] = [] + seen: set[str] = set() + for hit in hits: + if hit.title in seen: + continue + seen.add(hit.title) + self._title_to_url[hit.title] = hit.url + titles.append(hit.title) + self._save_title_url_cache() + return titles + + def genres(self) -> List[str]: + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + if self._genre_to_url: + return sorted(self._genre_to_url.keys(), key=lambda value: value.casefold()) + + try: + soup = self._get_soup(self._get_base_url() + "/") + except Exception: + return [] + parsed = self._parse_genres_from_home(soup) + self._genre_to_url.clear() + self._genre_to_url.update(parsed) + self._save_genre_cache() + return sorted(self._genre_to_url.keys(), key=lambda value: value.casefold()) + + def titles_for_genre(self, genre: str) -> List[str]: + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + genre = (genre or "").strip() + if not genre: + return [] + if not self._genre_to_url: + self.genres() + url = self._genre_to_url.get(genre) + if not url: + return [] + + # Backwards-compatible: liefert nur Seite 1 (Paging läuft über titles_for_genre_page()). + titles = self.titles_for_genre_page(genre, 1) + titles.sort(key=lambda value: value.casefold()) + return titles + + def seasons_for(self, title: str) -> List[str]: + title = (title or "").strip() + if not title or not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + if title in self._movie_iframe_url: + return ["Film"] + + # Primär: Streams-Accordion (enthält echte Staffel-/Episodenlistings). + self._ensure_stream_index(title) + seasons = sorted( + {season_label for (t, season_label) in self._season_to_episode_numbers.keys() if t == title}, + key=lambda value: (self._extract_first_int(value) or 0), + ) + if seasons: + self._season_cache[title] = list(seasons) + return list(seasons) + + # Fallback: Staffel-Tabs im Seitenmenü (ohne Links). + cached = self._season_cache.get(title) + if cached is not None: + return list(cached) + + soup = self._get_detail_soup(title) + if soup is None: + self._season_cache[title] = [] + return [] + + numbers: List[int] = [] + seen: set[int] = set() + for anchor in soup.select( + "div.serie-menu div.tt_season ul.nav a[href^='#season-']," + " .serie-menu .tt_season a[href^='#season-']," + " a[data-toggle='tab'][href^='#season-']" + ): + text = (anchor.get_text(" ", strip=True) or "").strip() + num = self._extract_first_int(text) + if num is None: + href = (anchor.get("href") or "").strip() + num = self._extract_first_int(href.replace("#season-", "")) + if num is None or num in seen: + continue + seen.add(num) + numbers.append(num) + + seasons = [f"Staffel {n}" for n in sorted(numbers)] + self._season_cache[title] = list(seasons) + return list(seasons) + + def episodes_for(self, title: str, season: str) -> List[str]: + title = (title or "").strip() + season = (season or "").strip() + if not title or not season or not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + if title in self._movie_iframe_url and season == "Film": + return ["Stream"] + + cache_key = (title, season) + cached = self._episode_cache.get(cache_key) + if cached is not None: + return list(cached) + + self._ensure_stream_index(title) + episode_numbers = self._season_to_episode_numbers.get((title, season), []) + episodes: List[str] = [] + season_number = self._extract_first_int(season) or 0 + for ep_no in episode_numbers: + label = f"Episode {ep_no}" + ep_title = self._episode_title_by_number.get((title, season_number, ep_no), "") + if ep_title: + label = f"Episode {ep_no}: {ep_title}" + episodes.append(label) + + self._episode_cache[cache_key] = list(episodes) + return list(episodes) + + def available_hosters_for(self, title: str, season: str, episode: str) -> List[str]: + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + if not title or not season or not episode: + return [] + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return [] + + self._ensure_title_index(title) + self._ensure_stream_index(title) + hosters = self._episode_to_hosters.get((title, season, episode), {}) + return sorted(hosters.keys(), key=lambda value: value.casefold()) + + def set_preferred_hosters(self, hosters: List[str]) -> None: + normalized = [hoster.strip().lower() for hoster in hosters if hoster and hoster.strip()] + if normalized: + self._preferred_hosters = normalized + + def reset_preferred_hosters(self) -> None: + self._preferred_hosters = list(self._default_preferred_hosters) + + def stream_link_for(self, title: str, season: str, episode: str) -> Optional[str]: + title = (title or "").strip() + season = (season or "").strip() + episode = (episode or "").strip() + if not title or not season or not episode: + return None + if not REQUESTS_AVAILABLE or BeautifulSoup is None: + return None + + self._ensure_title_index(title) + self._ensure_stream_index(title) + hosters = self._episode_to_hosters.get((title, season, episode), {}) + if not hosters: + return None + + preferred = [h.casefold() for h in (self._preferred_hosters or [])] + if preferred: + for preferred_name in preferred: + for actual_name, url in hosters.items(): + if actual_name.casefold() == preferred_name: + return url + + # Wenn nichts passt: deterministisch den ersten. + first_name = sorted(hosters.keys(), key=lambda value: value.casefold())[0] + return hosters.get(first_name) + + def resolve_stream_link(self, link: str) -> Optional[str]: + try: + from resolveurl_backend import resolve as resolve_with_resolveurl + except Exception: + resolve_with_resolveurl = None + if callable(resolve_with_resolveurl): + resolved = resolve_with_resolveurl(link) + return resolved or link + return link + + +# Alias für die automatische Plugin-Erkennung. +Plugin = TopstreamfilmPlugin diff --git a/dist/plugin.video.viewit/regex_patterns.py b/dist/plugin.video.viewit/regex_patterns.py new file mode 100644 index 0000000..c3c0b08 --- /dev/null +++ b/dist/plugin.video.viewit/regex_patterns.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 +"""Shared regex pattern constants. + +Keep common patterns in one place to avoid accidental double-escaping (e.g. \"\\\\d\"). +""" + +SEASON_EPISODE_TAG = r"S\s*(\d+)\s*E\s*(\d+)" +SEASON_EPISODE_URL = r"/staffel-(\d+)/episode-(\d+)" +STAFFEL_NUM_IN_URL = r"/staffel-(\d+)" +DIGITS = r"(\d+)" + diff --git a/dist/plugin.video.viewit/requirements.txt b/dist/plugin.video.viewit/requirements.txt new file mode 100644 index 0000000..e35775c --- /dev/null +++ b/dist/plugin.video.viewit/requirements.txt @@ -0,0 +1,2 @@ +beautifulsoup4>=4.12 +requests>=2.31 diff --git a/dist/plugin.video.viewit/resolveurl_backend.py b/dist/plugin.video.viewit/resolveurl_backend.py new file mode 100644 index 0000000..5b9a17a --- /dev/null +++ b/dist/plugin.video.viewit/resolveurl_backend.py @@ -0,0 +1,43 @@ +"""Optionales ResolveURL-Backend für das Kodi-Addon. + +Wenn `script.module.resolveurl` installiert ist, kann damit eine Hoster-URL +zu einer abspielbaren Media-URL (inkl. evtl. Header-Suffix) aufgelöst werden. +""" + +from __future__ import annotations + +from typing import Optional + + +def resolve(url: str) -> Optional[str]: + if not url: + return None + try: + import resolveurl # type: ignore + except Exception: + return None + + try: + hosted = getattr(resolveurl, "HostedMediaFile", None) + if callable(hosted): + hmf = hosted(url) + valid = getattr(hmf, "valid_url", None) + if callable(valid) and not valid(): + return None + resolver = getattr(hmf, "resolve", None) + if callable(resolver): + result = resolver() + return str(result) if result else None + except Exception: + pass + + try: + resolve_fn = getattr(resolveurl, "resolve", None) + if callable(resolve_fn): + result = resolve_fn(url) + return str(result) if result else None + except Exception: + return None + + return None + diff --git a/dist/plugin.video.viewit/resources/logo.png b/dist/plugin.video.viewit/resources/logo.png new file mode 100644 index 0000000..d11893e Binary files /dev/null and b/dist/plugin.video.viewit/resources/logo.png differ diff --git a/dist/plugin.video.viewit/resources/settings.xml b/dist/plugin.video.viewit/resources/settings.xml new file mode 100644 index 0000000..efe74a3 --- /dev/null +++ b/dist/plugin.video.viewit/resources/settings.xml @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dist/plugin.video.viewit/tmdb.py b/dist/plugin.video.viewit/tmdb.py new file mode 100644 index 0000000..830e770 --- /dev/null +++ b/dist/plugin.video.viewit/tmdb.py @@ -0,0 +1,652 @@ +from __future__ import annotations + +from dataclasses import dataclass +import json +import threading +from typing import Callable, Dict, List, Optional, Tuple +from urllib.parse import urlencode + +try: # pragma: no cover - optional dependency + import requests +except ImportError: # pragma: no cover + requests = None + + +TMDB_API_BASE = "https://api.themoviedb.org/3" +TMDB_IMAGE_BASE = "https://image.tmdb.org/t/p" +_TMDB_THREAD_LOCAL = threading.local() + + +def _get_tmdb_session() -> "requests.Session | None": + """Returns a per-thread shared requests Session. + + We use thread-local storage because ViewIt prefetches TMDB metadata using threads. + `requests.Session` is not guaranteed to be thread-safe, but reusing a session within + the same thread keeps connections warm. + """ + + if requests is None: + return None + sess = getattr(_TMDB_THREAD_LOCAL, "session", None) + if sess is None: + sess = requests.Session() + setattr(_TMDB_THREAD_LOCAL, "session", sess) + return sess + + +@dataclass(frozen=True) +class TmdbCastMember: + name: str + role: str + thumb: str + + +@dataclass(frozen=True) +class TmdbShowMeta: + tmdb_id: int + plot: str + poster: str + fanart: str + rating: float + votes: int + cast: List[TmdbCastMember] + + +def _image_url(path: str, *, size: str) -> str: + path = (path or "").strip() + if not path: + return "" + return f"{TMDB_IMAGE_BASE}/{size}{path}" + + +def _fetch_credits( + *, + kind: str, + tmdb_id: int, + api_key: str, + language: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, +) -> List[TmdbCastMember]: + if requests is None or not tmdb_id: + return [] + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/{kind}/{tmdb_id}/credits?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /{kind}/{{id}}/credits request_failed error={exc!r}") + return [] + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /{kind}/{{id}}/credits status={status}") + if status != 200: + return [] + try: + payload = response.json() or {} + except Exception: + return [] + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /{kind}/{{id}}/credits body={dumped[:2000]}") + + cast_payload = payload.get("cast") or [] + if callable(log): + log(f"TMDB CREDITS /{kind}/{{id}}/credits cast={len(cast_payload)}") + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + + # Viele Kodi-Skins zeigen bei fehlendem Thumbnail Platzhalter-Köpfe. + # Bevorzugt daher Cast-Einträge mit Bild; nur wenn gar keine Bilder existieren, + # geben wir Namen ohne Bild zurück. + if with_images: + return with_images[:30] + return without_images[:30] + + +def _parse_cast_payload(cast_payload: object) -> List[TmdbCastMember]: + if not isinstance(cast_payload, list): + return [] + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + if not isinstance(entry, dict): + continue + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + if with_images: + return with_images[:30] + return without_images[:30] + + +def _tmdb_get_json( + *, + url: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, + session: "requests.Session | None" = None, +) -> Tuple[int | None, object | None, str]: + """Fetches TMDB JSON with optional shared session. + + Returns: (status_code, payload_or_none, body_text_or_empty) + """ + + if requests is None: + return None, None, "" + if callable(log): + log(f"TMDB GET {url}") + sess = session or _get_tmdb_session() or requests.Session() + try: + response = sess.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR request_failed url={url} error={exc!r}") + return None, None, "" + + status = getattr(response, "status_code", None) + payload: object | None = None + body_text = "" + try: + payload = response.json() + except Exception: + try: + body_text = (response.text or "").strip() + except Exception: + body_text = "" + + if callable(log): + log(f"TMDB RESPONSE status={status} url={url}") + if log_responses: + if payload is not None: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY url={url} body={dumped[:2000]}") + elif body_text: + log(f"TMDB RESPONSE_BODY url={url} body={body_text[:2000]}") + return status, payload, body_text + + +def fetch_tv_episode_credits( + *, + tmdb_id: int, + season_number: int, + episode_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> List[TmdbCastMember]: + """Lädt Cast für eine konkrete Episode (/tv/{id}/season/{n}/episode/{e}/credits).""" + if requests is None: + return [] + api_key = (api_key or "").strip() + if not api_key or not tmdb_id: + return [] + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}/episode/{episode_number}/credits?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /tv/{{id}}/season/{{n}}/episode/{{e}}/credits request_failed error={exc!r}") + return [] + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}}/episode/{{e}}/credits status={status}") + if status != 200: + return [] + try: + payload = response.json() or {} + except Exception: + return [] + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}}/episode/{{e}}/credits body={dumped[:2000]}") + + cast_payload = payload.get("cast") or [] + if callable(log): + log(f"TMDB CREDITS /tv/{{id}}/season/{{n}}/episode/{{e}}/credits cast={len(cast_payload)}") + with_images: List[TmdbCastMember] = [] + without_images: List[TmdbCastMember] = [] + for entry in cast_payload: + name = (entry.get("name") or "").strip() + role = (entry.get("character") or "").strip() + thumb = _image_url(entry.get("profile_path") or "", size="w185") + if not name: + continue + member = TmdbCastMember(name=name, role=role, thumb=thumb) + if thumb: + with_images.append(member) + else: + without_images.append(member) + if with_images: + return with_images[:30] + return without_images[:30] + + +def lookup_tv_show( + *, + title: str, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, + include_cast: bool = False, +) -> Optional[TmdbShowMeta]: + """Sucht eine TV-Show bei TMDB und liefert Plot + Poster-URL (wenn vorhanden).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key: + return None + query = (title or "").strip() + if not query: + return None + + params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "query": query, + "include_adult": "false", + "page": "1", + } + url = f"{TMDB_API_BASE}/search/tv?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json( + url=url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + results = (payload or {}).get("results") if isinstance(payload, dict) else [] + results = results or [] + if callable(log): + log(f"TMDB RESPONSE /search/tv status={status} results={len(results)}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /search/tv body={body_text[:2000]}") + + if status != 200: + return None + if not results: + return None + + normalized_query = query.casefold() + best = None + for candidate in results: + name = (candidate.get("name") or "").casefold() + original_name = (candidate.get("original_name") or "").casefold() + if name == normalized_query or original_name == normalized_query: + best = candidate + break + if best is None: + best = results[0] + + tmdb_id = int(best.get("id") or 0) + plot = (best.get("overview") or "").strip() + poster = _image_url(best.get("poster_path") or "", size="w342") + fanart = _image_url(best.get("backdrop_path") or "", size="w780") + try: + rating = float(best.get("vote_average") or 0.0) + except Exception: + rating = 0.0 + try: + votes = int(best.get("vote_count") or 0) + except Exception: + votes = 0 + if not tmdb_id: + return None + cast: List[TmdbCastMember] = [] + if include_cast and tmdb_id: + detail_params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "append_to_response": "credits", + } + detail_url = f"{TMDB_API_BASE}/tv/{tmdb_id}?{urlencode(detail_params)}" + d_status, d_payload, d_body = _tmdb_get_json( + url=detail_url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}} status={d_status}") + if log_responses and d_payload is None and d_body: + log(f"TMDB RESPONSE_BODY /tv/{{id}} body={d_body[:2000]}") + if d_status == 200 and isinstance(d_payload, dict): + credits = d_payload.get("credits") or {} + cast = _parse_cast_payload((credits or {}).get("cast")) + if not plot and not poster and not fanart and not rating and not votes and not cast: + return None + return TmdbShowMeta( + tmdb_id=tmdb_id, + plot=plot, + poster=poster, + fanart=fanart, + rating=rating, + votes=votes, + cast=cast, + ) + + +@dataclass(frozen=True) +class TmdbMovieMeta: + tmdb_id: int + plot: str + poster: str + fanart: str + runtime_minutes: int + rating: float + votes: int + cast: List[TmdbCastMember] + + +def _fetch_movie_details( + *, + tmdb_id: int, + api_key: str, + language: str, + timeout: int, + log: Callable[[str], None] | None, + log_responses: bool, + include_cast: bool, +) -> Tuple[int, List[TmdbCastMember]]: + """Fetches /movie/{id} and (optionally) bundles credits via append_to_response=credits.""" + if requests is None or not tmdb_id: + return 0, [] + api_key = (api_key or "").strip() + if not api_key: + return 0, [] + params: Dict[str, str] = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + } + if include_cast: + params["append_to_response"] = "credits" + url = f"{TMDB_API_BASE}/movie/{tmdb_id}?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json(url=url, timeout=timeout, log=log, log_responses=log_responses) + if callable(log): + log(f"TMDB RESPONSE /movie/{{id}} status={status}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /movie/{{id}} body={body_text[:2000]}") + if status != 200 or not isinstance(payload, dict): + return 0, [] + try: + runtime = int(payload.get("runtime") or 0) + except Exception: + runtime = 0 + cast: List[TmdbCastMember] = [] + if include_cast: + credits = payload.get("credits") or {} + cast = _parse_cast_payload((credits or {}).get("cast")) + return runtime, cast + + +def lookup_movie( + *, + title: str, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, + include_cast: bool = False, +) -> Optional[TmdbMovieMeta]: + """Sucht einen Film bei TMDB und liefert Plot + Poster-URL (wenn vorhanden).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key: + return None + query = (title or "").strip() + if not query: + return None + + params = { + "api_key": api_key, + "language": (language or "de-DE").strip(), + "query": query, + "include_adult": "false", + "page": "1", + } + url = f"{TMDB_API_BASE}/search/movie?{urlencode(params)}" + status, payload, body_text = _tmdb_get_json( + url=url, + timeout=timeout, + log=log, + log_responses=log_responses, + ) + results = (payload or {}).get("results") if isinstance(payload, dict) else [] + results = results or [] + if callable(log): + log(f"TMDB RESPONSE /search/movie status={status} results={len(results)}") + if log_responses and payload is None and body_text: + log(f"TMDB RESPONSE_BODY /search/movie body={body_text[:2000]}") + + if status != 200: + return None + if not results: + return None + + normalized_query = query.casefold() + best = None + for candidate in results: + name = (candidate.get("title") or "").casefold() + original_name = (candidate.get("original_title") or "").casefold() + if name == normalized_query or original_name == normalized_query: + best = candidate + break + if best is None: + best = results[0] + + tmdb_id = int(best.get("id") or 0) + plot = (best.get("overview") or "").strip() + poster = _image_url(best.get("poster_path") or "", size="w342") + fanart = _image_url(best.get("backdrop_path") or "", size="w780") + runtime_minutes = 0 + try: + rating = float(best.get("vote_average") or 0.0) + except Exception: + rating = 0.0 + try: + votes = int(best.get("vote_count") or 0) + except Exception: + votes = 0 + if not tmdb_id: + return None + cast: List[TmdbCastMember] = [] + runtime_minutes, cast = _fetch_movie_details( + tmdb_id=tmdb_id, + api_key=api_key, + language=language, + timeout=timeout, + log=log, + log_responses=log_responses, + include_cast=include_cast, + ) + if not plot and not poster and not fanart and not rating and not votes and not cast: + return None + return TmdbMovieMeta( + tmdb_id=tmdb_id, + plot=plot, + poster=poster, + fanart=fanart, + runtime_minutes=runtime_minutes, + rating=rating, + votes=votes, + cast=cast, + ) + + +@dataclass(frozen=True) +class TmdbEpisodeMeta: + plot: str + thumb: str + runtime_minutes: int + + +@dataclass(frozen=True) +class TmdbSeasonMeta: + plot: str + poster: str + + +def lookup_tv_season_summary( + *, + tmdb_id: int, + season_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> Optional[TmdbSeasonMeta]: + """Lädt Staffel-Meta (Plot + Poster).""" + if requests is None: + return None + + api_key = (api_key or "").strip() + if not api_key or not tmdb_id: + return None + + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception: + return None + status = getattr(response, "status_code", None) + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status}") + if status != 200: + return None + try: + payload = response.json() or {} + except Exception: + return None + if callable(log) and log_responses: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={dumped[:2000]}") + + plot = (payload.get("overview") or "").strip() + poster_path = (payload.get("poster_path") or "").strip() + poster = f"{TMDB_IMAGE_BASE}/w342{poster_path}" if poster_path else "" + if not plot and not poster: + return None + return TmdbSeasonMeta(plot=plot, poster=poster) + + +def lookup_tv_season( + *, + tmdb_id: int, + season_number: int, + api_key: str, + language: str = "de-DE", + timeout: int = 15, + log: Callable[[str], None] | None = None, + log_responses: bool = False, +) -> Optional[Dict[int, TmdbEpisodeMeta]]: + """Lädt Episoden-Meta für eine Staffel: episode_number -> (plot, thumb).""" + if requests is None: + return None + api_key = (api_key or "").strip() + if not api_key or not tmdb_id or season_number is None: + return None + params = {"api_key": api_key, "language": (language or "de-DE").strip()} + url = f"{TMDB_API_BASE}/tv/{tmdb_id}/season/{season_number}?{urlencode(params)}" + if callable(log): + log(f"TMDB GET {url}") + try: + response = requests.get(url, timeout=timeout) + except Exception as exc: # pragma: no cover + if callable(log): + log(f"TMDB ERROR /tv/{{id}}/season/{{n}} request_failed error={exc!r}") + return None + + status = getattr(response, "status_code", None) + payload = None + body_text = "" + try: + payload = response.json() or {} + except Exception: + try: + body_text = (response.text or "").strip() + except Exception: + body_text = "" + + episodes = (payload or {}).get("episodes") or [] + if callable(log): + log(f"TMDB RESPONSE /tv/{{id}}/season/{{n}} status={status} episodes={len(episodes)}") + if log_responses: + if payload is not None: + try: + dumped = json.dumps(payload, ensure_ascii=False) + except Exception: + dumped = str(payload) + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={dumped[:2000]}") + elif body_text: + log(f"TMDB RESPONSE_BODY /tv/{{id}}/season/{{n}} body={body_text[:2000]}") + + if status != 200 or not episodes: + return None + + result: Dict[int, TmdbEpisodeMeta] = {} + for entry in episodes: + try: + ep_number = int(entry.get("episode_number") or 0) + except Exception: + continue + if not ep_number: + continue + plot = (entry.get("overview") or "").strip() + runtime_minutes = 0 + try: + runtime_minutes = int(entry.get("runtime") or 0) + except Exception: + runtime_minutes = 0 + still_path = (entry.get("still_path") or "").strip() + thumb = f"{TMDB_IMAGE_BASE}/w300{still_path}" if still_path else "" + if not plot and not thumb and not runtime_minutes: + continue + result[ep_number] = TmdbEpisodeMeta(plot=plot, thumb=thumb, runtime_minutes=runtime_minutes) + return result or None diff --git a/docs/PLUGIN_SYSTEM.md b/docs/PLUGIN_SYSTEM.md new file mode 100644 index 0000000..2867799 --- /dev/null +++ b/docs/PLUGIN_SYSTEM.md @@ -0,0 +1,91 @@ +## ViewIt Plugin-System + +Dieses Dokument beschreibt, wie das Plugin-System von **ViewIt** funktioniert und wie die Community neue Integrationen hinzufügen kann. + +### Überblick + +ViewIt lädt Provider-Integrationen dynamisch aus `source/kodi_addon/plugins/*.py`. Jede Datei enthält eine Klasse, die von `BasisPlugin` erbt. Beim Start werden alle Plugins instanziiert und nur aktiv genutzt, wenn sie verfügbar sind. + +### Aktuelle Plugins + +- `serienstream_plugin.py` – Serienstream (s.to) +- `topstreamfilm_plugin.py` – Topstreamfilm +- `einschalten_plugin.py` – Einschalten +- `aniworld_plugin.py` – Aniworld +- `_template_plugin.py` – Vorlage für neue Plugins + +### Plugin-Discovery (Ladeprozess) + +Der Loader in `source/kodi_addon/default.py`: + +1. Sucht alle `*.py` in `source/kodi_addon/plugins/` +2. Überspringt Dateien, die mit `_` beginnen +3. Lädt Module dynamisch +4. Instanziert Klassen, die von `BasisPlugin` erben +5. Ignoriert Plugins mit `is_available = False` + +Damit bleiben fehlerhafte Plugins isoliert und blockieren nicht das gesamte Add-on. + +### BasisPlugin – verpflichtende Methoden + +Definiert in `source/kodi_addon/plugin_interface.py`: + +- `async search_titles(query: str) -> list[str]` +- `seasons_for(title: str) -> list[str]` +- `episodes_for(title: str, season: str) -> list[str]` + +### Optionale Features (Capabilities) + +Plugins können zusätzliche Features anbieten: + +- `capabilities() -> set[str]` + - `popular_series`: liefert beliebte Serien + - `genres`: Genre-Liste verfügbar + - `latest_episodes`: neue Episoden verfügbar +- `popular_series() -> list[str]` +- `genres() -> list[str]` +- `titles_for_genre(genre: str) -> list[str]` +- `latest_episodes(page: int = 1) -> list[LatestEpisode]` (wenn angeboten) + +ViewIt zeigt im UI nur die Features an, die ein Plugin tatsächlich liefert. + +### Plugin-Struktur (empfohlen) + +Eine Integration sollte typischerweise bieten: + +- Konstante `BASE_URL` +- `search_titles()` mit Provider-Suche +- `seasons_for()` und `episodes_for()` mit HTML-Parsing +- `stream_link_for()` optional für direkte Playback-Links +- Optional: `available_hosters_for()` oder Provider-spezifische Helfer + +Als Startpunkt dient `source/kodi_addon/plugins/_template_plugin.py`. + +### Community-Erweiterungen (Workflow) + +1. Fork/Branch erstellen +2. Neue Datei unter `source/kodi_addon/plugins/` hinzufügen (z. B. `meinprovider_plugin.py`) +3. Klasse erstellen, die `BasisPlugin` implementiert +4. In Kodi testen (ZIP bauen, installieren) +5. PR öffnen + +### Qualitätsrichtlinien + +- Keine Netzwerkzugriffe im Import-Top-Level +- Netzwerkzugriffe nur in Methoden (z. B. `search_titles`) +- Fehler sauber abfangen und verständliche Fehlermeldungen liefern +- Kein globaler Zustand, der across instances überrascht +- Provider-spezifische Parser in Helper-Funktionen kapseln + +### Debugging & Logs + +Hilfreiche Logs werden nach `userdata/addon_data/plugin.video.viewit/logs/` geschrieben. +Provider sollten URL-Logging optional halten (Settings). + +### ZIP-Build + +``` +./scripts/build_kodi_zip.sh +``` + +Das ZIP liegt anschließend unter `install/plugin.video.viewit-.zip`. diff --git a/scripts/__pycache__/test_einschalten_api.cpython-312.pyc b/scripts/__pycache__/test_einschalten_api.cpython-312.pyc new file mode 100644 index 0000000..46a3480 Binary files /dev/null and b/scripts/__pycache__/test_einschalten_api.cpython-312.pyc differ diff --git a/scripts/__pycache__/test_tmdb.cpython-312.pyc b/scripts/__pycache__/test_tmdb.cpython-312.pyc new file mode 100644 index 0000000..37da5a3 Binary files /dev/null and b/scripts/__pycache__/test_tmdb.cpython-312.pyc differ diff --git a/scripts/build_install_addon.sh b/scripts/build_install_addon.sh new file mode 100755 index 0000000..a0ab01c --- /dev/null +++ b/scripts/build_install_addon.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +SRC_ADDON_DIR="${ROOT_DIR}/addon" +INSTALL_DIR="${ROOT_DIR}/dist" + +ADDON_XML="${SRC_ADDON_DIR}/addon.xml" +if [[ ! -f "${ADDON_XML}" ]]; then + echo "Missing: ${ADDON_XML}" >&2 + exit 1 +fi + +ADDON_ID="$(python3 - "${ADDON_XML}" <<'PY' +import sys +import xml.etree.ElementTree as ET + +tree = ET.parse(sys.argv[1]) +root = tree.getroot() +print(root.attrib.get("id", "plugin.unknown")) +PY +)" + +DEST_DIR="${INSTALL_DIR}/${ADDON_ID}" + +mkdir -p "${INSTALL_DIR}" +rm -rf "${DEST_DIR}" +mkdir -p "${DEST_DIR}" + +# Copy add-on files (single source of truth: addon/) +if command -v rsync >/dev/null 2>&1; then + rsync -a --delete \ + --exclude '__pycache__/' \ + --exclude '*.pyc' \ + "${SRC_ADDON_DIR}/" "${DEST_DIR}/" +else + cp -a "${SRC_ADDON_DIR}/." "${DEST_DIR}/" + find "${DEST_DIR}" -type d -name '__pycache__' -prune -exec rm -rf {} + || true + find "${DEST_DIR}" -type f -name '*.pyc' -delete || true +fi + +echo "${DEST_DIR}" diff --git a/scripts/build_kodi_zip.sh b/scripts/build_kodi_zip.sh new file mode 100755 index 0000000..4ae5971 --- /dev/null +++ b/scripts/build_kodi_zip.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +INSTALL_DIR="${ROOT_DIR}/dist" +SRC_ADDON_DIR="${ROOT_DIR}/addon" +ADDON_XML="${SRC_ADDON_DIR}/addon.xml" + +if [[ ! -f "${ADDON_XML}" ]]; then + echo "Missing: ${ADDON_XML}" >&2 + exit 1 +fi + +ADDON_ID="$(python3 - "${ADDON_XML}" <<'PY' +import sys +import xml.etree.ElementTree as ET + +tree = ET.parse(sys.argv[1]) +root = tree.getroot() +print(root.attrib.get("id", "plugin.unknown")) +PY +)" + +ADDON_VERSION="$(python3 - "${ADDON_XML}" <<'PY' +import sys +import xml.etree.ElementTree as ET + +tree = ET.parse(sys.argv[1]) +root = tree.getroot() +print(root.attrib.get("version", "0.0.0")) +PY +)" + +ZIP_NAME="${ADDON_ID}-${ADDON_VERSION}.zip" +ZIP_PATH="${INSTALL_DIR}/${ZIP_NAME}" + +ADDON_DIR="$("${ROOT_DIR}/scripts/build_install_addon.sh" >/dev/null; echo "${INSTALL_DIR}/${ADDON_ID}")" + +rm -f "${ZIP_PATH}" +(cd "${INSTALL_DIR}" && zip -r "${ZIP_NAME}" "$(basename "${ADDON_DIR}")" >/dev/null) + +echo "${ZIP_PATH}" diff --git a/scripts/test_tmdb.py b/scripts/test_tmdb.py new file mode 100755 index 0000000..4101507 --- /dev/null +++ b/scripts/test_tmdb.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import sys + +try: + from source.kodi_addon.tmdb import lookup_tv_show +except Exception: + sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "source", "kodi_addon")) + from tmdb import lookup_tv_show # type: ignore[import-not-found] + + +def main() -> int: + parser = argparse.ArgumentParser(description="Manueller Test fuer tmdb.py (Plot + Poster).") + parser.add_argument("title", nargs="?", default="Dark", help="Serientitel (Default: Dark)") + parser.add_argument("--key", default=os.environ.get("TMDB_API_KEY", ""), help="TMDB API Key (oder env TMDB_API_KEY)") + parser.add_argument("--lang", default=os.environ.get("TMDB_LANGUAGE", "de-DE"), help="Sprache, z.B. de-DE") + parser.add_argument("--log-responses", action="store_true", help="Antwort-JSON (gekürzt) loggen") + args = parser.parse_args() + + if not args.key: + print( + "Fehlt: --key oder env TMDB_API_KEY\n" + "Beispiel: TMDB_API_KEY=DEIN_KEY ./scripts/test_tmdb.py\n" + "Oder: ./scripts/test_tmdb.py \"Dark\" --key DEIN_KEY", + file=sys.stderr, + ) + return 2 + + meta = lookup_tv_show( + title=args.title, + api_key=args.key, + language=args.lang, + log=print, + log_responses=args.log_responses, + ) + if not meta: + print("Kein Treffer / keine Meta-Daten.") + return 1 + + print("\nRESULT") + print("plot:", (meta.plot or "")[:500]) + print("poster:", meta.poster) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main())