diff --git a/poetry.lock b/poetry.lock index 771d9a28..59161318 100644 --- a/poetry.lock +++ b/poetry.lock @@ -472,13 +472,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -679,13 +679,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -696,7 +696,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httptools" @@ -1846,13 +1846,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyperf" -version = "2.7.0" +version = "2.8.0" description = "Python module to run and analyze benchmarks" optional = false python-versions = ">=3.7" files = [ - {file = "pyperf-2.7.0-py3-none-any.whl", hash = "sha256:dce63053b916b73d8736a77404309328f938851b5c2c5e8493cde910ce37e362"}, - {file = "pyperf-2.7.0.tar.gz", hash = "sha256:4201c6601032f374e9c900c6d2544a2f5891abedc1a96eec0e7b2338a6247589"}, + {file = "pyperf-2.8.0-py3-none-any.whl", hash = "sha256:1a775b5a09882f18bf876430ef78e07646f773f50774546f5f6a8b34d60e3968"}, + {file = "pyperf-2.8.0.tar.gz", hash = "sha256:b30a20465819daf102b6543b512f6799a5a879ff2a123981e6cd732d0e6a7a79"}, ] [package.dependencies] @@ -1863,13 +1863,13 @@ dev = ["importlib-metadata", "tox"] [[package]] name = "pyright" -version = "1.1.382.post1" +version = "1.1.383" description = "Command line wrapper for pyright" optional = false python-versions = ">=3.7" files = [ - {file = "pyright-1.1.382.post1-py3-none-any.whl", hash = "sha256:21a4749dd1740e209f88d3a601e9f40748670d39481ea32b9d77edf7f3f1fb2e"}, - {file = "pyright-1.1.382.post1.tar.gz", hash = "sha256:66a5d4e83be9452853d73e9dd9e95ba0ac3061845270e4e331d0070a597d3445"}, + {file = "pyright-1.1.383-py3-none-any.whl", hash = "sha256:d864d1182a313f45aaf99e9bfc7d2668eeabc99b29a556b5344894fd73cb1959"}, + {file = "pyright-1.1.383.tar.gz", hash = "sha256:1df7f12407f3710c9c6df938d98ec53f70053e6c6bbf71ce7bcb038d42f10070"}, ] [package.dependencies] @@ -2338,13 +2338,13 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] [[package]] name = "rich" -version = "13.8.1" +version = "13.9.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, - {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, ] [package.dependencies] @@ -2638,6 +2638,21 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "sqlmodel" +version = "0.0.22" +description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness." +optional = false +python-versions = ">=3.7" +files = [ + {file = "sqlmodel-0.0.22-py3-none-any.whl", hash = "sha256:a1ed13e28a1f4057cbf4ff6cdb4fc09e85702621d3259ba17b3c230bfb2f941b"}, + {file = "sqlmodel-0.0.22.tar.gz", hash = "sha256:7d37c882a30c43464d143e35e9ecaf945d88035e20117bf5ec2834a23cbe505e"}, +] + +[package.dependencies] +pydantic = ">=1.10.13,<3.0.0" +SQLAlchemy = ">=2.0.14,<2.1.0" + [[package]] name = "srt" version = "3.5.3" @@ -2766,13 +2781,13 @@ weaviate = ["weaviate-client (>=4.5.4,<5.0.0)"] [[package]] name = "textual" -version = "0.81.0" +version = "0.82.0" description = "Modern Text User Interface framework" optional = false python-versions = "<4.0.0,>=3.8.1" files = [ - {file = "textual-0.81.0-py3-none-any.whl", hash = "sha256:5f94e3bf185a1693c31f5e2bcbc413c8ed093a53abc96a956c65322d48f9543c"}, - {file = "textual-0.81.0.tar.gz", hash = "sha256:b438f7e6bb143fdec379170ccc6709a32d273bd998e70179537109263de7c818"}, + {file = "textual-0.82.0-py3-none-any.whl", hash = "sha256:4ce00dc898fab3a829aa835071f4e8284aad57f13870e871d0ac516cc34b3645"}, + {file = "textual-0.82.0.tar.gz", hash = "sha256:db93a2bb3ba9fdb2a536272cd5c8144c9c9682df0370a99361b670c6191b0a4d"}, ] [package.dependencies] @@ -2786,24 +2801,24 @@ syntax = ["tree-sitter (>=0.20.1,<0.21.0)", "tree-sitter-languages (==1.10.2)"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] @@ -3261,4 +3276,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "42f7ca2f6421e5c1b7e3d6727f6e595eefb0efc9f167072d793ec72d3dfb8c97" +content-hash = "19ac5eec8c0bba864f8eb1a8cae25adac361f7ddc89d3cdd32def76a88f4cb3a" diff --git a/pyproject.toml b/pyproject.toml index cc590509..d04c7b72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ subliminal = "^2.2.1" rank-torrent-name = "^1.0.2" jsonschema = "^4.23.0" scalar-fastapi = "^1.0.3" +sqlmodel = "^0.0.22" [tool.poetry.group.dev.dependencies] pyright = "^1.1.352" diff --git a/src/controllers/default.py b/src/controllers/default.py index 94d96d8c..e67089a9 100644 --- a/src/controllers/default.py +++ b/src/controllers/default.py @@ -103,22 +103,21 @@ async def get_stats(_: Request): payload = {} with db.Session() as session: - movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked == True)).scalar_one() - episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked == True)).scalar_one() + movies_symlinks = session.execute(select(func.count(Movie.id)).where(Movie.symlinked == True)).scalar_one() + episodes_symlinks = session.execute(select(func.count(Episode.id)).where(Episode.symlinked == True)).scalar_one() total_symlinks = movies_symlinks + episodes_symlinks - total_movies = session.execute(select(func.count(Movie._id))).scalar_one() - total_shows = session.execute(select(func.count(Show._id))).scalar_one() - total_seasons = session.execute(select(func.count(Season._id))).scalar_one() - total_episodes = session.execute(select(func.count(Episode._id))).scalar_one() - total_items = session.execute(select(func.count(MediaItem._id))).scalar_one() + total_movies = session.execute(select(func.count(Movie.id))).scalar_one() + total_shows = session.execute(select(func.count(Show.id))).scalar_one() + total_seasons = session.execute(select(func.count(Season.id))).scalar_one() + total_episodes = session.execute(select(func.count(Episode.id))).scalar_one() + total_items = session.execute(select(func.count(MediaItem.id))).scalar_one() - # Select only the IDs of incomplete items _incomplete_items = session.execute( select(MediaItem._id) .where(MediaItem.last_state != States.Completed) ).scalars().all() - + incomplete_retries = {} if _incomplete_items: media_items = session.query(MediaItem).filter(MediaItem._id.in_(_incomplete_items)).all() @@ -127,7 +126,7 @@ async def get_stats(_: Request): states = {} for state in States: - states[state] = session.execute(select(func.count(MediaItem._id)).where(MediaItem.last_state == state)).scalar_one() + states[state] = session.execute(select(func.count(MediaItem.id)).where(MediaItem.last_state == state)).scalar_one() payload["total_items"] = total_items payload["total_movies"] = total_movies diff --git a/src/controllers/items.py b/src/controllers/items.py index e213f562..232bed94 100644 --- a/src/controllers/items.py +++ b/src/controllers/items.py @@ -74,11 +74,11 @@ async def get_items( if search: search_lower = search.lower() if search_lower.startswith("tt"): - query = query.where(MediaItem.imdb_id == search_lower) + query = query.where(MediaItem.ids["imdb_id"] == search_lower) else: query = query.where( (func.lower(MediaItem.title).like(f"%{search_lower}%")) | - (func.lower(MediaItem.imdb_id).like(f"%{search_lower}%")) + (func.lower(MediaItem.ids["imdb_id"]).like(f"%{search_lower}%")) ) if state: @@ -168,7 +168,7 @@ async def add_items( with db.Session() as _: for id in valid_ids: item = MediaItem({"imdb_id": id, "requested_by": "riven", "requested_at": datetime.now()}) - request.app.program.em.add_item(item) + request.app.program.em.add_item(item, "ApiAdd") return {"success": True, "message": f"Added {len(valid_ids)} item(s) to the queue"} @@ -180,7 +180,7 @@ async def add_items( async def get_item(request: Request, id: int): with db.Session() as session: try: - item = session.execute(select(MediaItem).where(MediaItem._id == id)).unique().scalar_one() + item = session.execute(select(MediaItem).where(MediaItem.id == id)).unique().scalar_one() except NoResultFound: raise HTTPException(status_code=404, detail="Item not found") return {"success": True, "item": item.to_extended_dict()} @@ -195,7 +195,7 @@ async def get_items_by_imdb_ids(request: Request, imdb_ids: str): with db.Session() as session: items = [] for id in ids: - item = session.execute(select(MediaItem).where(MediaItem.imdb_id == id)).unique().scalar_one() + item = session.execute(select(MediaItem).where(MediaItem.ids["imdb_id"] == id)).unique().scalar_one() if item: items.append(item) return {"success": True, "items": [item.to_extended_dict() for item in items]} @@ -219,7 +219,7 @@ async def reset_items( clear_streams(media_item) reset_media_item(media_item) except Exception as e: - logger.error(f"Failed to reset item with id {media_item._id}: {str(e)}") + logger.error(f"Failed to reset item with id {media_item.id}: {str(e)}") continue except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) @@ -257,7 +257,7 @@ async def remove_item(request: Request, ids: str): if not media_items: raise ValueError("Invalid item ID(s) provided. Some items may not exist.") for media_item in media_items: - logger.debug(f"Removing item {media_item.title} with ID {media_item._id}") + logger.debug(f"Removing item {media_item.title} with ID {media_item.id}") request.app.program.em.cancel_job(media_item) await asyncio.sleep(0.1) # Ensure cancellation is processed clear_streams(media_item) @@ -375,7 +375,7 @@ def set_torrent_rd(request: Request, id: int, torrent_id: str): # items = [] # return_dict = {} # for id in ids: -# items.append(session.execute(select(MediaItem).where(MediaItem._id == id)).unique().scalar_one()) +# items.append(session.execute(select(MediaItem).where(MediaItem.id == id)).unique().scalar_one()) # if any(item for item in items if item.type in ["Season", "Episode"]): # raise HTTPException(status_code=400, detail="Only shows and movies can be manually scraped currently") # for item in items: @@ -393,7 +393,7 @@ def set_torrent_rd(request: Request, id: int, torrent_id: str): # async def download(request: Request, id: str, hash: str): # downloader = request.app.program.services.get(Downloader).service # with db.Session() as session: -# item = session.execute(select(MediaItem).where(MediaItem._id == id)).unique().scalar_one() +# item = session.execute(select(MediaItem).where(MediaItem.id == id)).unique().scalar_one() # item.reset(True) # downloader.download_cached(item, hash) # request.app.program.add_to_queue(item) diff --git a/src/controllers/scrape.py b/src/controllers/scrape.py index deb833da..db459a88 100644 --- a/src/controllers/scrape.py +++ b/src/controllers/scrape.py @@ -33,7 +33,7 @@ async def scrape(request: Request, imdb_id: str, season: int = None, episode: in with db.Session() as session: media_item = session.execute( select(MediaItem).where( - MediaItem.imdb_id == imdb_id, + MediaItem.ids["imdb_id"] == imdb_id, MediaItem.type.in_(["movie", "show"]) ) ).unique().scalar_one_or_none() diff --git a/src/program/content/listrr.py b/src/program/content/listrr.py index 03203ccc..b17d6910 100644 --- a/src/program/content/listrr.py +++ b/src/program/content/listrr.py @@ -68,8 +68,8 @@ def run(self) -> Generator[MediaItem, None, None]: listrr_items = movie_items + show_items non_existing_items = _filter_existing_items(listrr_items) - new_non_recurring_items = [item for item in non_existing_items if item.imdb_id not in self.recurring_items] - self.recurring_items.update([item.imdb_id for item in new_non_recurring_items]) + new_non_recurring_items = [item for item in non_existing_items if item.ids["imdb_id"] not in self.recurring_items] + self.recurring_items.update([item.ids["imdb_id"] for item in new_non_recurring_items]) if new_non_recurring_items: logger.info(f"Fetched {len(new_non_recurring_items)} new items from Listrr") diff --git a/src/program/content/mdblist.py b/src/program/content/mdblist.py index af00aa36..fe6068bf 100644 --- a/src/program/content/mdblist.py +++ b/src/program/content/mdblist.py @@ -54,18 +54,18 @@ def run(self) -> Generator[MediaItem, None, None]: else: items = list_items_by_url(list, self.settings.api_key) for item in items: - if hasattr(item, "error") or not item or item.imdb_id is None: + if hasattr(item, "error") or not item or item.ids["imdb_id"] is None: continue - if item.imdb_id.startswith("tt"): + if item.ids["imdb_id"].startswith("tt"): items_to_yield.append(MediaItem( - {"imdb_id": item.imdb_id, "requested_by": self.key} + {"imdb_id": item.ids["imdb_id"], "requested_by": self.key} )) except RateLimitExceeded: pass non_existing_items = _filter_existing_items(items_to_yield) - new_non_recurring_items = [item for item in non_existing_items if item.imdb_id not in self.recurring_items and isinstance(item, MediaItem)] - self.recurring_items.update([item.imdb_id for item in new_non_recurring_items]) + new_non_recurring_items = [item for item in non_existing_items if item.ids["imdb_id"] not in self.recurring_items and isinstance(item, MediaItem)] + self.recurring_items.update([item.ids["imdb_id"] for item in new_non_recurring_items]) if new_non_recurring_items: logger.info(f"Found {len(new_non_recurring_items)} new items to fetch") diff --git a/src/program/content/overseerr.py b/src/program/content/overseerr.py index ff18a911..901716fb 100644 --- a/src/program/content/overseerr.py +++ b/src/program/content/overseerr.py @@ -59,8 +59,8 @@ def run(self): overseerr_items: list[MediaItem] = self.get_media_requests() non_existing_items = _filter_existing_items(overseerr_items) - new_non_recurring_items = [item for item in non_existing_items if item.imdb_id not in self.recurring_items and isinstance(item, MediaItem)] - self.recurring_items.update([item.imdb_id for item in new_non_recurring_items]) + new_non_recurring_items = [item for item in non_existing_items if item.ids["imdb_id"] not in self.recurring_items and isinstance(item, MediaItem)] + self.recurring_items.update([item.ids["imdb_id"] for item in new_non_recurring_items]) if self.settings.use_webhook: logger.debug("Webhook is enabled. Running Overseerr once before switching to webhook only mode") diff --git a/src/program/content/plex_watchlist.py b/src/program/content/plex_watchlist.py index 4853029f..57961cef 100644 --- a/src/program/content/plex_watchlist.py +++ b/src/program/content/plex_watchlist.py @@ -71,8 +71,8 @@ def run(self) -> Generator[MediaItem, None, None]: plex_items: set[str] = set(watchlist_items) | set(rss_items) items_to_yield: list[MediaItem] = [MediaItem({"imdb_id": imdb_id, "requested_by": self.key}) for imdb_id in plex_items if imdb_id and imdb_id.startswith("tt")] non_existing_items = _filter_existing_items(items_to_yield) - new_non_recurring_items = [item for item in non_existing_items if item.imdb_id not in self.recurring_items and isinstance(item, MediaItem)] - self.recurring_items.update([item.imdb_id for item in new_non_recurring_items]) + new_non_recurring_items = [item for item in non_existing_items if item.ids["imdb_id"] not in self.recurring_items and isinstance(item, MediaItem)] + self.recurring_items.update([item.ids["imdb_id"] for item in new_non_recurring_items]) if new_non_recurring_items: logger.info(f"Found {len(new_non_recurring_items)} new items to fetch") diff --git a/src/program/content/trakt.py b/src/program/content/trakt.py index f7314ed2..637132a4 100644 --- a/src/program/content/trakt.py +++ b/src/program/content/trakt.py @@ -97,10 +97,10 @@ def run(self): new_non_recurring_items = [ item for item in non_existing_items - if item.imdb_id not in self.recurring_items + if item.ids["imdb_id"] not in self.recurring_items and isinstance(item, MediaItem) ] - self.recurring_items.update(item.imdb_id for item in new_non_recurring_items) + self.recurring_items.update(item.ids["imdb_id"] for item in new_non_recurring_items) if new_non_recurring_items: logger.log("TRAKT", f"Found {len(new_non_recurring_items)} new items to fetch") diff --git a/src/program/db/db_functions.py b/src/program/db/db_functions.py index b49c7e50..7bef6e53 100644 --- a/src/program/db/db_functions.py +++ b/src/program/db/db_functions.py @@ -3,16 +3,16 @@ from typing import TYPE_CHECKING, List import alembic -from sqlalchemy import delete, func, insert, select, text, union_all +import sqlalchemy +from sqlalchemy import cast, delete, func, insert, select, text, union_all from sqlalchemy.orm import Session, aliased, joinedload from program.libraries.symlink import fix_broken_symlinks from program.media.stream import Stream, StreamBlacklistRelation, StreamRelation from program.settings.manager import settings_manager -from utils import alembic_dir -from utils.logger import logger +from loguru import logger -from .db import alembic, db +from .db import alembic, db, script_location, ensure_alembic_version_table if TYPE_CHECKING: from program.media.item import MediaItem @@ -25,7 +25,7 @@ def get_media_items_by_ids(media_item_ids: list[int]): with db.Session() as session: for media_item_id in media_item_ids: - item_type = session.execute(select(MediaItem.type).where(MediaItem._id==media_item_id)).scalar_one() + item_type = session.execute(select(MediaItem.type).where(MediaItem.id==media_item_id)).scalar_one() if not item_type: continue item = None @@ -33,24 +33,24 @@ def get_media_items_by_ids(media_item_ids: list[int]): case "movie": item = session.execute( select(Movie) - .where(MediaItem._id == media_item_id) + .where(MediaItem.id == media_item_id) ).unique().scalar_one() case "show": item = session.execute( select(Show) - .where(MediaItem._id == media_item_id) + .where(MediaItem.id == media_item_id) .options(joinedload(Show.seasons).joinedload(Season.episodes)) ).unique().scalar_one() case "season": item = session.execute( select(Season) - .where(Season._id == media_item_id) + .where(Season.id == media_item_id) .options(joinedload(Season.episodes)) ).unique().scalar_one() case "episode": item = session.execute( select(Episode) - .where(Episode._id == media_item_id) + .where(Episode.id == media_item_id) ).unique().scalar_one() if item: items.append(item) @@ -63,7 +63,7 @@ def get_parent_items_by_ids(media_item_ids: list[int]): with db.Session() as session: items = [] for media_item_id in media_item_ids: - item = session.execute(select(MediaItem).where(MediaItem._id == media_item_id, MediaItem.type.in_(["movie", "show"]))).unique().scalar_one_or_none() + item = session.execute(select(MediaItem).where(MediaItem.id == media_item_id, MediaItem.type.in_(["movie", "show"]))).unique().scalar_one_or_none() if item: items.append(item) return items @@ -72,7 +72,7 @@ def get_item_by_imdb_id(imdb_id: str): """Retrieve a MediaItem of type 'movie' or 'show' by an IMDb ID.""" from program.media.item import MediaItem with db.Session() as session: - item = session.execute(select(MediaItem).where(MediaItem.imdb_id == imdb_id, MediaItem.type.in_(["movie", "show"]))).unique().scalar_one_or_none() + item = session.execute(select(MediaItem).where(MediaItem.ids["imdb_id"] == imdb_id, MediaItem.type.in_(["movie", "show"]))).unique().scalar_one_or_none() return item def delete_media_item(item: "MediaItem"): @@ -134,11 +134,11 @@ def reset_streams(item: "MediaItem", active_stream_hash: str = None): blacklist_stream(item, stream, session) session.execute( - delete(StreamRelation).where(StreamRelation.parent_id == item._id) + delete(StreamRelation).where(StreamRelation.parent_id == item.id) ) session.execute( - delete(StreamBlacklistRelation).where(StreamBlacklistRelation.media_item_id == item._id) + delete(StreamBlacklistRelation).where(StreamBlacklistRelation.media_item_id == item.id) ) item.active_stream = {} session.commit() @@ -148,10 +148,10 @@ def clear_streams(item: "MediaItem"): with db.Session() as session: item = session.merge(item) session.execute( - delete(StreamRelation).where(StreamRelation.parent_id == item._id) + delete(StreamRelation).where(StreamRelation.parent_id == item.id) ) session.execute( - delete(StreamBlacklistRelation).where(StreamBlacklistRelation.media_item_id == item._id) + delete(StreamBlacklistRelation).where(StreamBlacklistRelation.media_item_id == item.id) ) session.commit() @@ -160,27 +160,27 @@ def blacklist_stream(item: "MediaItem", stream: Stream, session: Session = None) close_session = False if session is None: session = db.Session() - item = session.execute(select(type(item)).where(type(item)._id == item._id)).unique().scalar_one() + item = session.execute(select(type(item)).where(type(item).id == item.id)).unique().scalar_one() close_session = True try: item = session.merge(item) association_exists = session.query( session.query(StreamRelation) - .filter(StreamRelation.parent_id == item._id) - .filter(StreamRelation.child_id == stream._id) + .filter(StreamRelation.parent_id == item.id) + .filter(StreamRelation.child_id == stream.id) .exists() ).scalar() if association_exists: session.execute( delete(StreamRelation) - .where(StreamRelation.parent_id == item._id) - .where(StreamRelation.child_id == stream._id) + .where(StreamRelation.parent_id == item.id) + .where(StreamRelation.child_id == stream.id) ) session.execute( insert(StreamBlacklistRelation) - .values(media_item_id=item._id, stream_id=stream._id) + .values(media_item_id=item.id, stream_id=stream.id) ) item.store_state() session.commit() @@ -204,7 +204,7 @@ def filter_existing_streams(media_item_id: int, scraped_streams: List[Stream]) - existing_streams = session.execute( select(Stream.infohash) .join(Stream.parents) - .where(MediaItem._id == media_item_id) + .where(MediaItem.id == media_item_id) .where(Stream.infohash.in_(scraped_hashes)) ).scalars().all() existing_hashes = set(existing_streams) @@ -216,14 +216,14 @@ def get_stream_count(media_item_id: int) -> int: """Get the count of streams for a given MediaItem.""" with db.Session() as session: return session.execute( - select(func.count(Stream._id)) - .filter(Stream.parents.any(MediaItem._id == media_item_id)) + select(func.count(Stream.id)) + .filter(Stream.parents.any(MediaItem.id == media_item_id)) ).scalar_one() def load_streams_in_pages(session: Session, media_item_id: int, page_number: int, page_size: int = 5): """Load a specific page of streams for a given MediaItem.""" from program.media.item import MediaItem - stream_query = session.query(Stream._id, Stream.infohash).filter(Stream.parents.any(MediaItem._id == media_item_id)) + stream_query = session.query(Stream.id, Stream.infohash).filter(Stream.parents.any(MediaItem.id == media_item_id)) stream_chunk = stream_query.limit(page_size).offset(page_number * page_size).all() for stream_id, infohash in stream_chunk: @@ -233,13 +233,13 @@ def load_streams_in_pages(session: Session, media_item_id: int, page_number: int def _get_item_ids(session, item): from program.media.item import Episode, Season if item.type == "show": - show_id = item._id + show_id = item.id season_alias = aliased(Season, flat=True) - season_query = select(Season._id.label('id')).where(Season.parent_id == show_id) + season_query = select(Season.id.label('id')).where(Season.parent_id == show_id) episode_query = ( - select(Episode._id.label('id')) - .join(season_alias, Episode.parent_id == season_alias._id) + select(Episode.id.label('id')) + .join(season_alias, Episode.parent_id == season_alias.id) .where(season_alias.parent_id == show_id) ) @@ -248,28 +248,28 @@ def _get_item_ids(session, item): return show_id, related_ids elif item.type == "season": - season_id = item._id + season_id = item.id episode_ids = session.execute( - select(Episode._id) + select(Episode.id) .where(Episode.parent_id == season_id) ).scalars().all() return season_id, episode_ids elif item.type == "episode": - return item._id, [] + return item.id, [] elif hasattr(item, "parent"): - parent_id = item.parent._id + parent_id = item.parent.id return parent_id, [] - return item._id, [] + return item.id, [] def _ensure_item_exists_in_db(item: "MediaItem") -> bool: from program.media.item import MediaItem, Movie, Show if isinstance(item, (Movie, Show)): with db.Session() as session: - if item._id is None: - return session.execute(select(func.count(MediaItem._id)).where(MediaItem.imdb_id == item.imdb_id)).scalar_one() != 0 - return session.execute(select(func.count(MediaItem._id)).where(MediaItem._id == item._id)).scalar_one() != 0 - return bool(item and item._id) + if item.id is None: + return session.execute(select(func.count(MediaItem.id)).where(cast(MediaItem.ids["imdb_id"].astext, sqlalchemy.String) == item.ids["imdb_id"])).scalar_one() != 0 + return session.execute(select(func.count(MediaItem.id)).where(MediaItem.id == item.id)).scalar_one() != 0 + return bool(item and item.id) def _filter_existing_items(items: list["MediaItem"]) -> list["MediaItem"]: """Return a list of MediaItems that do not exist in the database.""" @@ -277,22 +277,22 @@ def _filter_existing_items(items: list["MediaItem"]) -> list["MediaItem"]: with db.Session() as session: existing_items = set( session.execute( - select(MediaItem.imdb_id) - .where(MediaItem.imdb_id.in_([item.imdb_id for item in items])) + select(MediaItem.ids["imdb_id"]) + .where(MediaItem.ids["imdb_id"].in_([item.ids["imdb_id"] for item in items])) ).scalars().all() ) - return [item for item in items if item.imdb_id not in existing_items] + return [item for item in items if item.ids["imdb_id"] not in existing_items] def _get_item_type_from_db(item: "MediaItem") -> str: from program.media.item import MediaItem with db.Session() as session: - if item._id is None: - return session.execute(select(MediaItem.type).where((MediaItem.imdb_id==item.imdb_id) & (MediaItem.type.in_(["show", "movie"])))).scalar_one() - return session.execute(select(MediaItem.type).where(MediaItem._id==item._id)).scalar_one() + if item.id is None: + return session.execute(select(MediaItem.type).where((cast(MediaItem.ids["imdb_id"].astext, sqlalchemy.String)==item.ids["imdb_id"]) & (MediaItem.type.in_(["show", "movie"])))).scalar_one() + return session.execute(select(MediaItem.type).where(MediaItem.id==item.id)).scalar_one() def _store_item(item: "MediaItem"): from program.media.item import Episode, Movie, Season, Show - if isinstance(item, (Movie, Show, Season, Episode)) and item._id is not None: + if isinstance(item, (Movie, Show, Season, Episode)) and item.id is not None: with db.Session() as session: item.store_state() session.merge(item) @@ -305,7 +305,7 @@ def _store_item(item: "MediaItem"): def _imdb_exists_in_db(imdb_id: str) -> bool: from program.media.item import MediaItem with db.Session() as session: - return session.execute(select(func.count(MediaItem._id)).where(MediaItem.imdb_id == imdb_id)).scalar_one() != 0 + return session.execute(select(func.count(MediaItem.id)).where(cast(MediaItem.ids['imdb_id'].astext, sqlalchemy.String) == imdb_id)).scalar_one() != 0 def _get_item_from_db(session, item: "MediaItem"): from program.media.item import Episode, MediaItem, Movie, Season, Show @@ -317,27 +317,27 @@ def _get_item_from_db(session, item: "MediaItem"): case "movie": r = session.execute( select(Movie) - .where(MediaItem.imdb_id == item.imdb_id) + .where(cast(MediaItem.ids['imdb_id'].astext, sqlalchemy.String) == item.ids["imdb_id"]) ).unique().scalar_one() return r case "show": r = session.execute( select(Show) - .where(MediaItem.imdb_id == item.imdb_id) + .where(cast(MediaItem.ids['imdb_id'].astext, sqlalchemy.String) == item.ids["imdb_id"]) .options(joinedload(Show.seasons).joinedload(Season.episodes)) ).unique().scalar_one() return r case "season": r = session.execute( select(Season) - .where(Season._id == item._id) + .where(Season.id == item.id) .options(joinedload(Season.episodes)) ).unique().scalar_one() return r case "episode": r = session.execute( select(Episode) - .where(Episode._id == item._id) + .where(Episode.id == item.id) ).unique().scalar_one() return r case _: @@ -396,8 +396,33 @@ def _run_thread_with_db_item(fn, service, program, input_item: "MediaItem" = Non def hard_reset_database(): """Resets the database to a fresh state.""" + from sqlalchemy import create_engine + from sqlalchemy.engine.url import make_url + logger.log("DATABASE", "Starting Hard Reset of Database") + # Get the database connection string from settings + db_url = settings_manager.settings.database.host + url = make_url(db_url) + + # Create an engine for the default database to drop and recreate the target database + default_db_url = url.set(database="postgres") + default_engine = create_engine(default_db_url, isolation_level="AUTOCOMMIT") + + try: + # Drop and recreate the database + with default_engine.connect() as connection: + connection.execute(text(f"DROP DATABASE IF EXISTS {url.database}")) + connection.execute(text(f"CREATE DATABASE {url.database}")) + logger.log("DATABASE", f"Database '{url.database}' dropped and recreated") + except sqlalchemy.exc.OperationalError as e: + logger.error(f"OperationalError: {e}") + exit(0) + + # Reconnect to the new database + db.engine.dispose() + db.engine = create_engine(db_url) + # Disable foreign key checks temporarily with db.engine.connect() as connection: if db.engine.name == 'sqlite': @@ -406,19 +431,6 @@ def hard_reset_database(): connection.execute(text("SET CONSTRAINTS ALL DEFERRED")) try: - for table in reversed(db.Model.metadata.sorted_tables): - try: - table.drop(connection, checkfirst=True) - logger.log("DATABASE", f"Dropped table: {table.name}") - except Exception as e: - logger.log("DATABASE", f"Error dropping table {table.name}: {str(e)}") - - try: - connection.execute(text("DROP TABLE IF EXISTS alembic_version")) - logger.log("DATABASE", "Alembic version table dropped") - except Exception as e: - logger.log("DATABASE", f"Error dropping alembic_version table: {str(e)}") - db.Model.metadata.create_all(connection) logger.log("DATABASE", "All tables recreated") @@ -436,9 +448,10 @@ def hard_reset_database(): try: logger.log("DATABASE", "Removing Alembic Directory") - shutil.rmtree(alembic_dir, ignore_errors=True) - os.makedirs(alembic_dir, exist_ok=True) - alembic.init(alembic_dir) + shutil.rmtree(script_location, ignore_errors=True) + os.makedirs(script_location, exist_ok=True) + alembic.init(script_location) + ensure_alembic_version_table() logger.log("DATABASE", "Alembic reinitialized") except Exception as e: logger.log("DATABASE", f"Error reinitializing Alembic: {str(e)}") diff --git a/src/program/downloaders/torbox.py b/src/program/downloaders/torbox.py index 590b540c..9777d0fc 100644 --- a/src/program/downloaders/torbox.py +++ b/src/program/downloaders/torbox.py @@ -73,7 +73,7 @@ def validate(self) -> bool: def run(self, item: MediaItem) -> bool: """Download media item from torbox.app""" return_value = False - stream_count = get_stream_count(item._id) + stream_count = get_stream_count(item.id) processed_stream_hashes = set() # Track processed stream hashes stream_hashes = {} @@ -82,7 +82,7 @@ def run(self, item: MediaItem) -> bool: for page_number in range(total_pages): with db.Session() as session: - for stream_id, infohash, stream in load_streams_in_pages(session, item._id, page_number, page_size=number_of_rows_per_page): + for stream_id, infohash, stream in load_streams_in_pages(session, item.id, page_number, page_size=number_of_rows_per_page): stream_hash_lower = infohash.lower() if stream_hash_lower in processed_stream_hashes: diff --git a/src/program/indexers/trakt.py b/src/program/indexers/trakt.py index 5cb9b223..9aa445f2 100644 --- a/src/program/indexers/trakt.py +++ b/src/program/indexers/trakt.py @@ -29,7 +29,7 @@ def copy_attributes(source, target): """Copy attributes from source to target.""" attributes = ["file", "folder", "update_folder", "symlinked", "is_anime", "symlink_path", "subtitles", "requested_by", "requested_at", "overseerr_id", "active_stream", "requested_id"] for attr in attributes: - target.set(attr, getattr(source, attr, None)) + setattr(target, attr, getattr(source, attr, None)) def copy_items(self, itema: MediaItem, itemb: MediaItem): """Copy attributes from itema to itemb recursively.""" @@ -48,7 +48,7 @@ def copy_items(self, itema: MediaItem, itemb: MediaItem): itemb.set("is_anime", is_anime) elif itemb.type == "movie": self.copy_attributes(itema, itemb) - itemb.set("is_anime", is_anime) + itemb.is_anime = is_anime else: logger.error(f"Item types {itema.type} and {itemb.type} do not match cant copy metadata") return itemb @@ -58,11 +58,11 @@ def run(self, in_item: MediaItem, log_msg: bool = True) -> Generator[Union[Movie if not in_item: logger.error("Item is None") return - if not (imdb_id := in_item.imdb_id): + if not (imdb_id := in_item.ids["imdb_id"]): logger.error(f"Item {in_item.log_string} does not have an imdb_id, cannot index it") return - if in_item.imdb_id in self.failed_ids: + if in_item.ids["imdb_id"] in self.failed_ids: return item_type = in_item.type if in_item.type != "mediaitem" else None @@ -74,19 +74,19 @@ def run(self, in_item: MediaItem, log_msg: bool = True) -> Generator[Union[Movie elif item.type == "movie": pass else: - logger.error(f"Indexed IMDb Id {item.imdb_id} returned the wrong item type: {item.type}") - self.failed_ids.add(in_item.imdb_id) + logger.error(f"Indexed IMDb Id {item.ids['imdb_id']} returned the wrong item type: {item.type}") + self.failed_ids.add(in_item.ids["imdb_id"]) return else: - logger.error(f"Failed to index item with imdb_id: {in_item.imdb_id}") - self.failed_ids.add(in_item.imdb_id) + logger.error(f"Failed to index item with imdb_id: {in_item.ids['imdb_id']}") + self.failed_ids.add(in_item.ids["imdb_id"]) return item = self.copy_items(in_item, item) item.indexed_at = datetime.now() if log_msg: # used for mapping symlinks to database, need to hide this log message - logger.debug(f"Indexed IMDb id ({in_item.imdb_id}) as {item.type.title()}: {item.log_string}") + logger.debug(f"Indexed IMDb id ({in_item.ids['imdb_id']}) as {item.type.title()}: {item.log_string}") yield item @staticmethod diff --git a/src/program/libraries/symlink.py b/src/program/libraries/symlink.py index 4f0ef36e..b1077f8a 100644 --- a/src/program/libraries/symlink.py +++ b/src/program/libraries/symlink.py @@ -304,9 +304,9 @@ def get_items_from_filepath(session: Session, filepath: str) -> list["Movie"] | for ep_num in episode_numbers: query = ( session.query(Episode) - .join(Season, Episode.parent_id == Season._id) - .join(Show, Season.parent_id == Show._id) - .filter(Show.imdb_id == imdb_id, Season.number == season_number, Episode.number == ep_num) + .join(Season, Episode.parent_id == Season.id) + .join(Show, Season.parent_id == Show.id) + .filter(Show.ids["imdb_id"] == imdb_id, Season.number == season_number, Episode.number == ep_num) ) episode_item = query.with_entities(Episode).first() if episode_item: diff --git a/src/program/media/item.py b/src/program/media/item.py index 82dd42ea..3b0e31e4 100644 --- a/src/program/media/item.py +++ b/src/program/media/item.py @@ -1,65 +1,180 @@ -"""MediaItem class""" import json from datetime import datetime from pathlib import Path from typing import List, Optional, Self import sqlalchemy -from RTN import parse -from sqlalchemy import Index +from sqlalchemy import ( + Boolean, + Column, + DateTime, + ForeignKey, + Index, + Integer, + String, + func, +) +from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Mapped, mapped_column, object_session, relationship import utils.websockets.manager as ws_manager from program.db.db import db from program.media.state import States from program.media.subtitle import Subtitle +from program.settings.models import RivenSettingsModel from utils.logger import logger -from ..db.db_functions import blacklist_stream, reset_streams +from ..db.db_functions import blacklist_stream from .stream import Stream +EPOCH = datetime.fromtimestamp(0) + + +class ProfileDataLink(db.Model): + __tablename__ = 'profiledatalink' + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + data_id = Column(Integer, ForeignKey('profiledata.id')) + profile_name = Column(String, ForeignKey('profile.name')) + +class Profile(db.Model): + __tablename__ = 'profile' + name: Mapped[str] = mapped_column(sqlalchemy.String, primary_key=True) + profile_data = relationship("ProfileData", secondary="profiledatalink", back_populates="profile") + model = mapped_column(JSONB, nullable=False) + + def __init__(self, model: RivenSettingsModel) -> None: + self.model = model.to_dict() + self.name = model.profile + + @property + def settings_model(self) -> RivenSettingsModel: + return RivenSettingsModel(**self.model) + + @settings_model.setter + def settings_model(self, value: RivenSettingsModel): + self.model = value.model_dump_json() + +class ProfileData(db.Model): + __tablename__ = 'profiledata' + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + parent_id = Column(Integer, ForeignKey('mediaitem.id', ondelete="CASCADE")) + parent: Mapped["MediaItem"] = relationship("MediaItem", back_populates="profiles") + + profile: Mapped[Profile] = relationship("Profile", secondary="profiledatalink", back_populates="profile_data") + last_state = Column(sqlalchemy.Enum(States)) + last_try = Column(DateTime) + + scraped_at = Column(DateTime) + scraped_times = Column(Integer) + streams: Mapped[List["Stream"]] = relationship(secondary="streamrelation", back_populates="parents") + blacklisted_streams: Mapped[List["Stream"]] = relationship(secondary="streamblacklistrelation", back_populates="blacklisted_parents") + + active_stream_id = Column(Integer, ForeignKey('stream.id')) + active_stream: Mapped[Optional["Stream"]] = relationship("Stream") + download_path = Column(String) + + symlink_path = Column(String) + symlinked_times = Column(Integer) + + subtitles: Mapped[List["Subtitle"]] = relationship(back_populates="parent") + + def __init__(self, profile: Profile) -> None: + self.last_state: States = States.Requested + + self.profile: Profile = profile + + self.scraped_at: datetime = EPOCH + self.scraped_times: int = 0 + self.streams: List[Stream] = [] + self.blacklisted_streams: List[Stream] = [] + + self.active_stream: Stream | None = None + self.download_path: Path | None = None + + self.symlink_path: Path | None = None + self.symlinked_times: int = 0 + + self.subtitles: List[Subtitle] = [] + + @property + def state(self): + return self._determine_state() + + @property + def is_scraped(self): + session = object_session(self) + if session: + session.refresh(self, attribute_names=['blacklisted_streams']) # Prom: Ensure these reflect the state of whats in the db. + return (len(self.streams) > 0 + and any(not stream in self.blacklisted_streams for stream in self.streams)) + + @property + def log_string(self): + return self.parent.log_string + + def _determine_state(self) -> States: + if self.symlink_path: + return States.Completed + elif self.download_path: + return States.Downloaded + elif self.is_scraped(): + return States.Scraped + return States.Requested + + def is_stream_blacklisted(self, stream: Stream): + """Check if a stream is blacklisted for this item.""" + session = object_session(self) + if session: + session.refresh(self, attribute_names=['blacklisted_streams']) + return stream in self.blacklisted_streams + + def blacklist_stream(self, stream: Stream): + value = blacklist_stream(self, stream) + if value: + logger.debug(f"Blacklisted stream {stream.infohash} for {self.parent.log_string}") + return value + + def reset(self, soft_reset: bool = False): + self.type = "profiledata" + self.scraped_at = EPOCH + self.scraped_times = 0 + self.streams = [] + self.blacklisted_streams = [] + if not soft_reset: + self.active_stream = None + self.download_path = None + self.symlink_path = None + self.symlinked_times = 0 + self.subtitles = [] class MediaItem(db.Model): - """MediaItem class""" - __tablename__ = "MediaItem" - _id: Mapped[int] = mapped_column(primary_key=True) - item_id: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False) - number: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, nullable=True) - type: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False) - requested_at: Mapped[Optional[datetime]] = mapped_column(sqlalchemy.DateTime, default=datetime.now()) - requested_by: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - requested_id: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, nullable=True) - indexed_at: Mapped[Optional[datetime]] = mapped_column(sqlalchemy.DateTime, nullable=True) - scraped_at: Mapped[Optional[datetime]] = mapped_column(sqlalchemy.DateTime, nullable=True) - scraped_times: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, default=0) - active_stream: Mapped[Optional[dict]] = mapped_column(sqlalchemy.JSON, nullable=True) - streams: Mapped[list[Stream]] = relationship(secondary="StreamRelation", back_populates="parents", lazy="select", cascade="all") - blacklisted_streams: Mapped[list[Stream]] = relationship(secondary="StreamBlacklistRelation", back_populates="blacklisted_parents", lazy="select", cascade="all") - symlinked: Mapped[Optional[bool]] = mapped_column(sqlalchemy.Boolean, default=False) - symlinked_at: Mapped[Optional[datetime]] = mapped_column(sqlalchemy.DateTime, nullable=True) - symlinked_times: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, default=0) - symlink_path: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - file: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - folder: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - alternative_folder: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - aliases: Mapped[Optional[dict]] = mapped_column(sqlalchemy.JSON, default={}) - is_anime: Mapped[Optional[bool]] = mapped_column(sqlalchemy.Boolean, default=False) - title: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - imdb_id: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - tvdb_id: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - tmdb_id: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - network: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - country: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - language: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - aired_at: Mapped[Optional[datetime]] = mapped_column(sqlalchemy.DateTime, nullable=True) - year: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, nullable=True) - genres: Mapped[Optional[List[str]]] = mapped_column(sqlalchemy.JSON, nullable=True) - key: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - guid: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - update_folder: Mapped[Optional[str]] = mapped_column(sqlalchemy.String, nullable=True) - overseerr_id: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, nullable=True) - last_state: Mapped[Optional[States]] = mapped_column(sqlalchemy.Enum(States), default=States.Unknown) - subtitles: Mapped[list[Subtitle]] = relationship(Subtitle, back_populates="parent", lazy="joined", cascade="all, delete-orphan") + __tablename__ = 'mediaitem' + + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + type = Column(String) + last_state = Column(sqlalchemy.Enum(States)) + + title = Column(String) + year = Column(Integer) + genres = Column(JSONB) + language = Column(String) + + ids = Column(JSONB, default={"imdb": "", "tvdb": "", "tmdb": ""}) + network = Column(String) + country = Column(String) + aired_at = Column(DateTime) + + requested_at = Column(DateTime) + requested_by = Column(String) + requested_id = Column(Integer) + indexed_at = Column(DateTime) + + aliases = Column(JSONB) + is_anime = Column(Boolean) + + overseerr_id = Column(Integer) + + profiles: Mapped[List["ProfileData"]] = relationship("ProfileData", back_populates="parent") __mapper_args__ = { "polymorphic_identity": "mediaitem", @@ -68,13 +183,13 @@ class MediaItem(db.Model): } __table_args__ = ( - Index('ix_mediaitem_item_id', 'item_id'), + Index('ix_mediaitem_id', 'id'), Index('ix_mediaitem_type', 'type'), Index('ix_mediaitem_requested_by', 'requested_by'), Index('ix_mediaitem_title', 'title'), - Index('ix_mediaitem_imdb_id', 'imdb_id'), - Index('ix_mediaitem_tvdb_id', 'tvdb_id'), - Index('ix_mediaitem_tmdb_id', 'tmdb_id'), + Index('ix_mediaitem_ids_imdb_id', func.cast(func.jsonb_extract_path_text(ids, 'imdb'), sqlalchemy.String)), + Index('ix_mediaitem_ids_tvdb_id', func.cast(func.jsonb_extract_path_text(ids, 'tvdb'), sqlalchemy.String)), + Index('ix_mediaitem_ids_tmdb_id', func.cast(func.jsonb_extract_path_text(ids, 'tmdb'), sqlalchemy.String)), Index('ix_mediaitem_network', 'network'), Index('ix_mediaitem_country', 'country'), Index('ix_mediaitem_language', 'language'), @@ -88,75 +203,43 @@ def __init__(self, item: dict | None) -> None: if item is None: return self.requested_at = item.get("requested_at", datetime.now()) - self.requested_by = item.get("requested_by") - self.requested_id = item.get("requested_id") - + self.requested_by = item.get("requested_by", "unknown") + self.requested_id = item.get("requested_id", None) self.indexed_at = None - - self.scraped_at = None - self.scraped_times = 0 - self.active_stream = item.get("active_stream", {}) - self.streams: List[Stream] = [] - self.blacklisted_streams: List[Stream] = [] - - self.symlinked = False - self.symlinked_at = None - self.symlinked_times = 0 - - self.file = None - self.folder = None self.is_anime = item.get("is_anime", False) - # Media related self.title = item.get("title") - self.imdb_id = item.get("imdb_id") - if self.imdb_id: - self.imdb_link = f"https://www.imdb.com/title/{self.imdb_id}/" - if not hasattr(self, "item_id"): - self.item_id = self.imdb_id - self.tvdb_id = item.get("tvdb_id") - self.tmdb_id = item.get("tmdb_id") - self.network = item.get("network") - self.country = item.get("country") - self.language = item.get("language") - self.aired_at = item.get("aired_at") - self.year = item.get("year") + self.ids = { "imdb_id": item.get("imdb_id"), + "tvdb_id": item.get("tvdb_id", ""), + "tmdb_id": item.get("tmdb_id", "") } + + self.network = item.get("network", "") + self.country = item.get("country", "") + self.language = item.get("language", "") + self.aired_at = item.get("aired_at", EPOCH) + self.year = item.get("year", 1970) self.genres = item.get("genres", []) self.aliases = item.get("aliases", {}) - # Plex related - self.key = item.get("key") - self.guid = item.get("guid") - self.update_folder = item.get("update_folder") - - # Overseerr related self.overseerr_id = item.get("overseerr_id") - #Post processing - self.subtitles = item.get("subtitles", []) + self.profiles: list[ProfileData] = [] + with db.Session() as session: + db_profiles = session.query(Profile).all() + for profile in db_profiles: + data = ProfileData(profile) + self.profiles.append(data) def store_state(self) -> None: - if self.last_state and self.last_state != self._determine_state(): + _state = self._determine_state() + if self.last_state and self.last_state != _state: ws_manager.send_item_update(json.dumps(self.to_dict())) - self.last_state = self._determine_state() - - def is_stream_blacklisted(self, stream: Stream): - """Check if a stream is blacklisted for this item.""" - session = object_session(self) - if session: - session.refresh(self, attribute_names=['blacklisted_streams']) - return stream in self.blacklisted_streams - - def blacklist_stream(self, stream: Stream): - value = blacklist_stream(self, stream) - if value: - logger.debug(f"Blacklisted stream {stream.infohash} for {self.log_string}") - return value + self.last_state = _state @property def is_released(self) -> bool: """Check if an item has been released.""" - if self.aired_at and self.aired_at <= datetime.now(): + if self.aired_at != EPOCH and self.aired_at <= datetime.now(): return True return False @@ -165,131 +248,41 @@ def state(self): return self._determine_state() def _determine_state(self): - if self.key or self.update_folder == "updated": + if all(profile.last_state == States.Completed for profile in self.profiles): return States.Completed - elif self.symlinked: - return States.Symlinked - elif self.file and self.folder: - return States.Downloaded - elif self.is_scraped(): - return States.Scraped - elif self.title and self.is_released: - return States.Indexed - elif self.title: + elif any(profile.last_state == States.Completed for profile in self.profiles) and any(profile.last_state != States.Completed for profile in self.profiles): + return States.PartiallyCompleted + elif not self.is_released: return States.Unreleased - elif self.imdb_id and self.requested_by: + else: return States.Requested - return States.Unknown def copy_other_media_attr(self, other): """Copy attributes from another media item.""" - self.title = getattr(other, "title", None) - self.tvdb_id = getattr(other, "tvdb_id", None) - self.tmdb_id = getattr(other, "tmdb_id", None) - self.network = getattr(other, "network", None) - self.country = getattr(other, "country", None) - self.language = getattr(other, "language", None) - self.aired_at = getattr(other, "aired_at", None) - self.genres = getattr(other, "genres", []) - self.is_anime = getattr(other, "is_anime", False) - self.overseerr_id = getattr(other, "overseerr_id", None) - - def is_scraped(self): - session = object_session(self) - if session: - session.refresh(self, attribute_names=['blacklisted_streams']) # Prom: Ensure these reflect the state of whats in the db. - return (len(self.streams) > 0 - and any(not stream in self.blacklisted_streams for stream in self.streams)) + self.title = getattr(other, "title") + self.ids = getattr(other, "ids") + self.network = getattr(other, "network") + self.country = getattr(other, "country") + self.language = getattr(other, "language") + self.aired_at = getattr(other, "aired_at") + self.genres = getattr(other, "genres") + self.is_anime = getattr(other, "is_anime") + self.overseerr_id = getattr(other, "overseerr_id") def to_dict(self): """Convert item to dictionary (API response)""" - return { - "id": str(self._id), - "title": self.title, - "type": self.__class__.__name__, - "imdb_id": self.imdb_id if hasattr(self, "imdb_id") else None, - "tvdb_id": self.tvdb_id if hasattr(self, "tvdb_id") else None, - "tmdb_id": self.tmdb_id if hasattr(self, "tmdb_id") else None, - "state": self.last_state.name, - "imdb_link": self.imdb_link if hasattr(self, "imdb_link") else None, - "aired_at": str(self.aired_at), - "genres": self.genres if hasattr(self, "genres") else None, - "is_anime": self.is_anime if hasattr(self, "is_anime") else False, - "guid": self.guid, - "requested_at": str(self.requested_at), - "requested_by": self.requested_by, - "scraped_at": str(self.scraped_at), - "scraped_times": self.scraped_times, - } - - def to_extended_dict(self, abbreviated_children=False): - """Convert item to extended dictionary (API response)""" - dict = self.to_dict() - match self: - case Show(): - dict["seasons"] = ( - [season.to_extended_dict() for season in self.seasons] - if not abbreviated_children - else self.represent_children - ) - case Season(): - dict["episodes"] = ( - [episode.to_extended_dict() for episode in self.episodes] - if not abbreviated_children - else self.represent_children - ) - dict["language"] = self.language if hasattr(self, "language") else None - dict["country"] = self.country if hasattr(self, "country") else None - dict["network"] = self.network if hasattr(self, "network") else None - dict["active_stream"] = ( - self.active_stream if hasattr(self, "active_stream") else None - ) - dict["streams"] = getattr(self, "streams", []) - dict["blacklisted_streams"] = getattr(self, "blacklisted_streams", []) - dict["number"] = self.number if hasattr(self, "number") else None - dict["symlinked"] = self.symlinked if hasattr(self, "symlinked") else None - dict["symlinked_at"] = ( - self.symlinked_at if hasattr(self, "symlinked_at") else None - ) - dict["symlinked_times"] = ( - self.symlinked_times if hasattr(self, "symlinked_times") else None - ) - dict["is_anime"] = self.is_anime if hasattr(self, "is_anime") else None - dict["update_folder"] = ( - self.update_folder if hasattr(self, "update_folder") else None - ) - dict["file"] = self.file if hasattr(self, "file") else None - dict["folder"] = self.folder if hasattr(self, "folder") else None - dict["symlink_path"] = self.symlink_path if hasattr(self, "symlink_path") else None - dict["subtitles"] = getattr(self, "subtitles", []) - return dict - - def __iter__(self): - for attr, _ in vars(self).items(): - yield attr + return json.dumps(self.__dict__) def __eq__(self, other): - if type(other) == type(self): - return self._id == other._id - return False + return type(self) == type(other) and self.id == other.id - def copy(self, other): - if other is None: - return None - self._id = getattr(other, "_id", None) - self.imdb_id = getattr(other, "imdb_id", None) + def copy(self, other: "MediaItem"): + self.id = getattr(other, "id", None) + self.ids = getattr(other, "ids", None) if hasattr(self, "number"): self.number = getattr(other, "number", None) return self - def get(self, key, default=None): - """Get item attribute""" - return getattr(self, key, default) - - def set(self, key, value): - """Set item attribute""" - _set_nested_attr(self, key, value) - def get_top_title(self) -> str: """Get the top title of the item.""" if self.type == "season": @@ -302,10 +295,10 @@ def get_top_title(self) -> str: def get_top_imdb_id(self) -> str: """Get the imdb_id of the item at the top of the hierarchy.""" if self.type == "season": - return self.parent.imdb_id + return self.parent.ids["imdb_id"] elif self.type == "episode": - return self.parent.parent.imdb_id - return self.imdb_id + return self.parent.parent.ids["imdb_id"] + return self.ids["imdb_id"] def get_aliases(self) -> dict: """Get the aliases of the item.""" @@ -316,8 +309,6 @@ def get_aliases(self) -> dict: else: return self.aliases - def __hash__(self): - return hash(self.item_id) def reset(self, soft_reset: bool = False): """Reset item attributes.""" @@ -334,59 +325,25 @@ def reset(self, soft_reset: bool = False): def _reset(self, soft_reset): """Reset item attributes for rescraping.""" - if self.symlink_path: - if Path(self.symlink_path).exists(): - Path(self.symlink_path).unlink() - self.set("symlink_path", None) - - try: - for subtitle in self.subtitles: - subtitle.remove() - except Exception as e: - logger.warning(f"Failed to remove subtitles for {self.log_string}: {str(e)}") - - self.set("file", None) - self.set("folder", None) - self.set("alternative_folder", None) - - if not self.active_stream: - self.active_stream = {} - if not soft_reset: - if self.active_stream.get("infohash", False): - reset_streams(self, self.active_stream["infohash"]) - else: - if self.active_stream.get("infohash", False): - stream = next((stream for stream in self.streams if stream.infohash == self.active_stream["infohash"]), None) - if stream: - self.blacklist_stream(stream) - - self.set("active_stream", {}) - self.set("symlinked", False) - self.set("symlinked_at", None) - self.set("update_folder", None) - self.set("scraped_at", None) - - self.set("symlinked_times", 0) - self.set("scraped_times", 0) + for profile in self.profiles: + profile.reset(soft_reset) logger.debug(f"Item {self.log_string} reset for rescraping") @property def log_string(self): - return self.title or self.imdb_id + return self.title or self.ids["imdb_id"] @property def collection(self): - return self.parent.collection if self.parent else self.item_id - + return self.parent.collection if self.parent else self.ids["imdb_id"] class Movie(MediaItem): - """Movie class""" - __tablename__ = "Movie" - _id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("MediaItem._id"), primary_key=True) + __tablename__ = 'movie' + id = Column(Integer, ForeignKey('mediaitem.id'), primary_key=True) + __mapper_args__ = { "polymorphic_identity": "movie", - "polymorphic_load": "inline", } def copy(self, other): @@ -395,42 +352,26 @@ def copy(self, other): def __init__(self, item): self.type = "movie" - self.file = item.get("file", None) super().__init__(item) - self.item_id = self.imdb_id def __repr__(self): - return f"Movie:{self.log_string}:{self.state.name}" - - def __hash__(self): - return super().__hash__() + return f"Movie:{self.log_string}:{self.last_state.name}" class Show(MediaItem): - """Show class""" - __tablename__ = "Show" - _id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("MediaItem._id"), primary_key=True) + __tablename__ = 'show' + id = Column(Integer, ForeignKey('mediaitem.id'), primary_key=True) seasons: Mapped[List["Season"]] = relationship(back_populates="parent", foreign_keys="Season.parent_id", lazy="joined", cascade="all, delete-orphan", order_by="Season.number") __mapper_args__ = { "polymorphic_identity": "show", - "polymorphic_load": "inline", } def __init__(self, item): super().__init__(item) self.type = "show" - self.locations = item.get("locations", []) self.seasons: list[Season] = item.get("seasons", []) - self.item_id = item.get("imdb_id") self.propagate_attributes_to_childs() - def get_season_index_by_id(self, item_id): - """Find the index of an season by its item_id.""" - for i, season in enumerate(self.seasons): - if season.item_id == item_id: - return i - return None - def _determine_state(self): if all(season.state == States.Completed for season in self.seasons): return States.Completed @@ -441,33 +382,17 @@ def _determine_state(self): for season in self.seasons ): return States.PartiallyCompleted - if all(season.state == States.Symlinked for season in self.seasons): - return States.Symlinked - if all(season.state == States.Downloaded for season in self.seasons): - return States.Downloaded - if self.is_scraped(): - return States.Scraped - if any(season.state == States.Indexed for season in self.seasons): - return States.Indexed - - if all(not season.is_released for season in self.seasons): + if not self.is_released or all(not season.is_released for season in self.seasons): return States.Unreleased - if any(season.state == States.Requested for season in self.seasons): - return States.Requested - return States.Unknown + return States.Requested def store_state(self) -> None: for season in self.seasons: season.store_state() - if self.last_state and self.last_state != self._determine_state(): - ws_manager.send_item_update(json.dumps(self.to_dict())) - self.last_state = self._determine_state() + super().store_state() def __repr__(self): - return f"Show:{self.log_string}:{self.state.name}" - - def __hash__(self): - return super().__hash__() + return f"Show:{self.log_string}:{self.last_state.name}" def copy(self, other): super(Show, self).copy(other) @@ -519,68 +444,51 @@ def propagate(target, source): class Season(MediaItem): - """Season class""" - __tablename__ = "Season" - _id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("MediaItem._id"), primary_key=True) - parent_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("Show._id"), use_existing_column=True) + __tablename__ = "season" + id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("mediaitem.id"), primary_key=True) + number: Mapped[Optional[int]] = mapped_column(sqlalchemy.Integer, nullable=True) + parent_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("show.id"), use_existing_column=True) parent: Mapped["Show"] = relationship(lazy=False, back_populates="seasons", foreign_keys="Season.parent_id") episodes: Mapped[List["Episode"]] = relationship(back_populates="parent", foreign_keys="Episode.parent_id", lazy="joined", cascade="all, delete-orphan", order_by="Episode.number") + __mapper_args__ = { "polymorphic_identity": "season", - "polymorphic_load": "inline", } def store_state(self) -> None: for episode in self.episodes: episode.store_state() - if self.last_state and self.last_state != self._determine_state(): - ws_manager.send_item_update(json.dumps(self.to_dict())) - self.last_state = self._determine_state() + super().store_state() def __init__(self, item): self.type = "season" self.number = item.get("number", None) self.episodes: list[Episode] = item.get("episodes", []) - self.item_id = self.number self.parent = item.get("parent", None) super().__init__(item) - if self.parent and isinstance(self.parent, Show): - self.is_anime = self.parent.is_anime + if self.parent: + self.parent.is_anime = self.is_anime def _determine_state(self): - if len(self.episodes) > 0: - if all(episode.state == States.Completed for episode in self.episodes): - return States.Completed - if any(episode.state == States.Unreleased for episode in self.episodes): - if any(episode.state != States.Unreleased for episode in self.episodes): - return States.Ongoing - if any(episode.state == States.Completed for episode in self.episodes): - return States.PartiallyCompleted - if all(episode.state == States.Symlinked for episode in self.episodes): - return States.Symlinked - if all(episode.file and episode.folder for episode in self.episodes): - return States.Downloaded - if self.is_scraped(): - return States.Scraped - if any(episode.state == States.Indexed for episode in self.episodes): - return States.Indexed - if any(episode.state == States.Unreleased for episode in self.episodes): - return States.Unreleased - if any(episode.state == States.Requested for episode in self.episodes): - return States.Requested - return States.Unknown - else: + if all(episode.state == States.Completed for episode in self.episodes): + return States.Completed + if any(episode.state in [States.Ongoing, States.Unreleased] for episode in self.episodes): + return States.Ongoing + if any( + episode.state in (States.Completed, States.PartiallyCompleted) + for episode in self.episodes + ): + return States.PartiallyCompleted + if not self.is_released or all(not episode.is_released for episode in self.episodes): return States.Unreleased + return States.Requested @property def is_released(self) -> bool: - return any(episode.is_released for episode in self.episodes) + return len(self.episodes) > 0 and any(episode.is_released for episode in self.episodes) def __repr__(self): - return f"Season:{self.number}:{self.state.name}" - - def __hash__(self): - return super().__hash__() + return f"Season:{self.number}:{self.last_state.name}" def copy(self, other, copy_parent=True): super(Season, self).copy(other) @@ -598,16 +506,6 @@ def fill_in_missing_children(self, other: Self): if e.number not in existing_episodes: self.add_episode(e) - def get_episode_index_by_id(self, item_id): - """Find the index of an episode by its item_id.""" - for i, episode in enumerate(self.episodes): - if episode.item_id == item_id: - return i - return None - - def represent_children(self): - return [e.log_string for e in self.episodes] - def add_episode(self, episode): """Add episode to season""" if episode.number in [e.number for e in self.episodes]: @@ -627,31 +525,25 @@ def get_top_title(self) -> str: class Episode(MediaItem): - """Episode class""" - __tablename__ = "Episode" - _id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("MediaItem._id"), primary_key=True) - parent_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("Season._id"), use_existing_column=True) + __tablename__ = 'episode' + id = Column(Integer, ForeignKey('mediaitem.id'), primary_key=True) + number = Column(Integer) + parent_id = Column(Integer, ForeignKey('season.id')) parent: Mapped["Season"] = relationship(back_populates="episodes", foreign_keys="Episode.parent_id", lazy="joined") __mapper_args__ = { "polymorphic_identity": "episode", - "polymorphic_load": "inline", } def __init__(self, item): self.type = "episode" self.number = item.get("number", None) - self.file = item.get("file", None) - self.item_id = self.number# , parent_id=item.get("parent_id")) super().__init__(item) - if self.parent and isinstance(self.parent, Season): - self.is_anime = self.parent.parent.is_anime + if self.parent: + self.parent.is_anime = self.is_anime def __repr__(self): - return f"Episode:{self.number}:{self.state.name}" - - def __hash__(self): - return super().__hash__() + return f"Episode:{self.number}:{self.last_state.name}" def copy(self, other, copy_parent=True): super(Episode, self).copy(other) @@ -659,53 +551,9 @@ def copy(self, other, copy_parent=True): self.parent = Season(item={}).copy(other.parent) return self - def get_file_episodes(self) -> List[int]: - if not self.file or not isinstance(self.file, str): - raise ValueError("The file attribute must be a non-empty string.") - # return list of episodes - return parse(self.file).episodes - @property def log_string(self): return f"{self.parent.log_string}E{self.number:02}" def get_top_title(self) -> str: - return self.parent.parent.title - - def get_top_year(self) -> Optional[int]: - return self.parent.parent.year - - def get_season_year(self) -> Optional[int]: - return self.parent.year - - -def _set_nested_attr(obj, key, value): - if "." in key: - parts = key.split(".", 1) - current_key, rest_of_keys = parts[0], parts[1] - - if not hasattr(obj, current_key): - raise AttributeError(f"Object does not have the attribute '{current_key}'.") - - current_obj = getattr(obj, current_key) - _set_nested_attr(current_obj, rest_of_keys, value) - elif isinstance(obj, dict): - obj[key] = value - else: - setattr(obj, key, value) - - -def copy_item(item): - """Copy an item""" - if isinstance(item, Movie): - return Movie(item={}).copy(item) - elif isinstance(item, Show): - return Show(item={}).copy(item) - elif isinstance(item, Season): - return Season(item={}).copy(item) - elif isinstance(item, Episode): - return Episode(item={}).copy(item) - elif isinstance(item, MediaItem): - return MediaItem(item={}).copy(item) - else: - raise ValueError(f"Cannot copy item of type {type(item)}") \ No newline at end of file + return self.parent.parent.title \ No newline at end of file diff --git a/src/program/media/stream.py b/src/program/media/stream.py index 8a4d5aad..26124981 100644 --- a/src/program/media/stream.py +++ b/src/program/media/stream.py @@ -9,28 +9,27 @@ from utils.logger import logger if TYPE_CHECKING: - from program.media.item import MediaItem - from program.media.state import States + from program.media.item import ProfileData class StreamRelation(db.Model): - __tablename__ = "StreamRelation" + __tablename__ = "streamrelation" - _id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) - parent_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("MediaItem._id", ondelete="CASCADE")) - child_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("Stream._id", ondelete="CASCADE")) + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + parent_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("profiledata.id", ondelete="CASCADE")) + child_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("stream.id", ondelete="CASCADE")) __table_args__ = ( Index('ix_streamrelation_parent_id', 'parent_id'), Index('ix_streamrelation_child_id', 'child_id'), ) - + class StreamBlacklistRelation(db.Model): - __tablename__ = "StreamBlacklistRelation" + __tablename__ = "streamblacklistrelation" - _id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) - media_item_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("MediaItem._id", ondelete="CASCADE")) - stream_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("Stream._id", ondelete="CASCADE")) + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + media_item_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("profiledata.id", ondelete="CASCADE")) + stream_id: Mapped[int] = mapped_column(sqlalchemy.Integer, sqlalchemy.ForeignKey("stream.id", ondelete="CASCADE")) __table_args__ = ( Index('ix_streamblacklistrelation_media_item_id', 'media_item_id'), @@ -38,17 +37,17 @@ class StreamBlacklistRelation(db.Model): ) class Stream(db.Model): - __tablename__ = "Stream" + __tablename__ = "stream" - _id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) + id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True) infohash: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False) raw_title: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False) parsed_title: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False) rank: Mapped[int] = mapped_column(sqlalchemy.Integer, nullable=False) lev_ratio: Mapped[float] = mapped_column(sqlalchemy.Float, nullable=False) - parents: Mapped[list["MediaItem"]] = relationship(secondary="StreamRelation", back_populates="streams") - blacklisted_parents: Mapped[list["MediaItem"]] = relationship(secondary="StreamBlacklistRelation", back_populates="blacklisted_streams") + parents: Mapped[list["ProfileData"]] = relationship(secondary="streamrelation", back_populates="streams") + blacklisted_parents: Mapped[list["ProfileData"]] = relationship(secondary="streamblacklistrelation", back_populates="blacklisted_streams") __table_args__ = ( Index('ix_stream_infohash', 'infohash'), @@ -66,6 +65,6 @@ def __init__(self, torrent: Torrent): def __hash__(self): return self.infohash - + def __eq__(self, other): return isinstance(other, Stream) and self.infohash == other.infohash \ No newline at end of file diff --git a/src/program/media/subtitle.py b/src/program/media/subtitle.py index a8a64799..0082771a 100644 --- a/src/program/media/subtitle.py +++ b/src/program/media/subtitle.py @@ -11,14 +11,14 @@ class Subtitle(db.Model): - __tablename__ = "Subtitle" + __tablename__ = "subtitle" - _id: Mapped[int] = mapped_column(Integer, primary_key=True) + id: Mapped[int] = mapped_column(Integer, primary_key=True) language: Mapped[str] = mapped_column(String) file: Mapped[str] = mapped_column(String, nullable=True) - parent_id: Mapped[int] = mapped_column(Integer, ForeignKey("MediaItem._id", ondelete="CASCADE")) - parent: Mapped["MediaItem"] = relationship("MediaItem", back_populates="subtitles") + parent_id: Mapped[int] = mapped_column(Integer, ForeignKey("profiledata.id", ondelete="CASCADE")) + parent: Mapped["ProfileData"] = relationship("ProfileData", back_populates="subtitles") __table_args__ = ( Index('ix_subtitle_language', 'language'), diff --git a/src/program/program.py b/src/program/program.py index af869329..954ec219 100644 --- a/src/program/program.py +++ b/src/program/program.py @@ -15,7 +15,7 @@ from program.indexers.trakt import TraktIndexer from program.libraries import SymlinkLibrary from program.libraries.symlink import fix_broken_symlinks -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import MediaItem, Movie, Profile, Show from program.media.state import States from program.post_processing import PostProcessing from program.scrapers import Scraping @@ -141,26 +141,11 @@ def start(self): run_migrations() self._init_db_from_symlinks() - with db.Session() as session: - movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked == True)).scalar_one() # noqa - episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked == True)).scalar_one() # noqa - total_symlinks = movies_symlinks + episodes_symlinks - total_movies = session.execute(select(func.count(Movie._id))).scalar_one() - total_shows = session.execute(select(func.count(Show._id))).scalar_one() - total_seasons = session.execute(select(func.count(Season._id))).scalar_one() - total_episodes = session.execute(select(func.count(Episode._id))).scalar_one() - total_items = session.execute(select(func.count(MediaItem._id))).scalar_one() - - logger.log("ITEM", f"Movies: {total_movies} (Symlinks: {movies_symlinks})") - logger.log("ITEM", f"Shows: {total_shows}") - logger.log("ITEM", f"Seasons: {total_seasons}") - logger.log("ITEM", f"Episodes: {total_episodes} (Symlinks: {episodes_symlinks})") - logger.log("ITEM", f"Total Items: {total_items} (Symlinks: {total_symlinks})") - self.executors = [] self.scheduler = BackgroundScheduler() + self.initialize_db() self._schedule_services() - self._schedule_functions() + # self._schedule_functions() super().start() self.scheduler.start() @@ -172,7 +157,7 @@ def _retry_library(self) -> None: count = 0 with db.Session() as session: count = session.execute( - select(func.count(MediaItem._id)) + select(func.count(MediaItem.id)) .where(MediaItem.last_state.not_in([States.Completed, States.Unreleased])) .where(MediaItem.type.in_(["movie", "show"])) ).scalar_one() @@ -300,14 +285,14 @@ def run(self): with db.Session() as session: existing_item: MediaItem | None = DB._get_item_from_db(session, event.item) - processed_item, next_service, items_to_submit = process_event( + processed_item, items_to_submit = process_event( existing_item, event.emitted_by, existing_item if existing_item is not None else event.item ) self.em.remove_item_from_running(event.item) if items_to_submit: - for item_to_submit in items_to_submit: + for item_to_submit, next_service in items_to_submit: if not next_service: self.em.add_event_to_queue(Event("StateTransition", item_to_submit)) else: @@ -330,6 +315,13 @@ def stop(self): self.scheduler.shutdown(wait=False) logger.log("PROGRAM", "Riven has been stopped.") + def initialize_db(self): + with db.Session() as session: + existing = session.query(Profile).filter(Profile.name == settings_manager.settings.ranking.profile).first() + if not existing: + session.add(Profile(settings_manager.settings.ranking)) + session.commit() + def _enhance_item(self, item: MediaItem) -> MediaItem | None: try: enhanced_item = next(self.services[TraktIndexer].run(item, log_msg=False)) @@ -342,7 +334,7 @@ def _init_db_from_symlinks(self): """Initialize the database from symlinks.""" start_time = datetime.now() with db.Session() as session: - res = session.execute(select(func.count(MediaItem._id))).scalar_one() + res = session.execute(select(func.count(MediaItem.id))).scalar_one() added = [] errors = [] if res == 0: @@ -368,7 +360,7 @@ def _init_db_from_symlinks(self): added.append(enhanced_item.item_id) enhanced_item.store_state() session.add(enhanced_item) - log_message = f"Indexed IMDb Id: {enhanced_item.imdb_id} as {enhanced_item.type.title()}: {enhanced_item.log_string}" + log_message = f"Indexed IMDb Id: {enhanced_item.ids['imdb_id']} as {enhanced_item.type.title()}: {enhanced_item.log_string}" except Exception as e: logger.exception(f"Error processing {item.log_string}: {e}") finally: diff --git a/src/program/scrapers/__init__.py b/src/program/scrapers/__init__.py index 771eb1f7..8ffe54c6 100644 --- a/src/program/scrapers/__init__.py +++ b/src/program/scrapers/__init__.py @@ -1,9 +1,8 @@ import threading from datetime import datetime -from typing import Dict, Generator, List, Union +from typing import Dict, Generator, List -from program.media.item import Episode, MediaItem, Movie, Season, Show -from program.media.state import States +from program.media.item import ProfileData from program.media.stream import Stream from program.scrapers.annatar import Annatar from program.scrapers.comet import Comet @@ -44,43 +43,42 @@ def __init__(self): def validate(self): return any(service.initialized for service in self.services.values()) - def run(self, item: MediaItem) -> Generator[MediaItem, None, None]: + def run(self, profile: ProfileData) -> Generator[ProfileData, None, None]: """Scrape an item.""" - if self.can_we_scrape(item): - sorted_streams = self.scrape(item) - for stream in sorted_streams.values(): - if stream not in item.streams: - item.streams.append(stream) - item.set("scraped_at", datetime.now()) - item.set("scraped_times", item.scraped_times + 1) + sorted_streams = self.scrape(profile) + for stream in sorted_streams.values(): + if stream not in profile.streams: + profile.streams.append(stream) + profile.scraped_at = datetime.now() + profile.scraped_times= profile.scraped_times + 1 - if not item.get("streams", []): - logger.log("NOT_FOUND", f"Scraping returned no good results for {item.log_string}") + if not profile.streams: + logger.log("NOT_FOUND", f"Scraping returned no good results for {profile.log_string}") - yield item + yield profile - def scrape(self, item: MediaItem, log = True) -> Dict[str, Stream]: + def scrape(self, profile: ProfileData, log = True) -> Dict[str, Stream]: """Scrape an item.""" threads: List[threading.Thread] = [] results: Dict[str, str] = {} total_results = 0 results_lock = threading.RLock() - def run_service(service, item,): + def run_service(service, profile): nonlocal total_results - service_results = service.run(item) - + service_results = service.run(profile) + if not isinstance(service_results, dict): logger.error(f"Service {service.__class__.__name__} returned invalid results: {service_results}") return - + with results_lock: results.update(service_results) total_results += len(service_results) - for service_name, service in self.services.items(): + for service_name, service in self.services.profiles(): if service.initialized: - thread = threading.Thread(target=run_service, args=(service, item), name=service_name.__name__) + thread = threading.Thread(target=run_service, args=(service, profile), name=service_name.__name__) threads.append(thread) thread.start() @@ -88,48 +86,48 @@ def run_service(service, item,): thread.join() if total_results != len(results): - logger.debug(f"Scraped {item.log_string} with {total_results} results, removed {total_results - len(results)} duplicate hashes") + logger.debug(f"Scraped {profile.log_string} with {total_results} results, removed {total_results - len(results)} duplicate hashes") - sorted_streams: Dict[str, Stream] = _parse_results(item, results, log) + sorted_streams: Dict[str, Stream] = _parse_results(profile, results, log) if sorted_streams and (log and settings_manager.settings.debug): - item_type = item.type.title() + item_type = profile.parent.type.title() top_results = list(sorted_streams.values())[:10] for sorted_tor in top_results: item_info = f"[{item_type}]" - if item.type == "season": - item_info = f"[{item_type} {item.number}]" - elif item.type == "episode": - item_info = f"[{item_type} {item.parent.number}:{item.number}]" + if profile.parent.type == "season": + item_info = f"[{item_type} {profile.parent.number}]" + elif profile.parent.type == "episode": + item_info = f"[{item_type} {profile.parent.parent.number}:{profile.parent.number}]" logger.debug(f"{item_info} Parsed '{sorted_tor.parsed_title}' with rank {sorted_tor.rank} ({sorted_tor.infohash}): '{sorted_tor.raw_title}'") return sorted_streams @classmethod - def can_we_scrape(cls, item: MediaItem) -> bool: - """Check if we can scrape an item.""" - if not item.is_released: - logger.debug(f"Cannot scrape {item.log_string}: Item is not released") + def can_we_scrape(cls, profile: ProfileData) -> bool: + """Check if we can scrape an profile.""" + if not profile.is_released: + logger.debug(f"Cannot scrape {profile.parent.log_string}: item is not released") return False - if not cls.should_submit(item): - logger.debug(f"Cannot scrape {item.log_string}: Item has been scraped recently, backing off") + if not cls.should_submit(profile): + logger.debug(f"Cannot scrape {profile.parent.log_string}: profile has been scraped recently, backing off") return False return True @staticmethod - def should_submit(item: MediaItem) -> bool: + def should_submit(profile: ProfileData) -> bool: """Check if an item should be submitted for scraping.""" settings = settings_manager.settings.scraping scrape_time = 5 * 60 # 5 minutes by default - if item.scraped_times >= 2 and item.scraped_times <= 5: + if profile.scraped_times >= 2 and profile.scraped_times <= 5: scrape_time = settings.after_2 * 60 * 60 - elif item.scraped_times > 5 and item.scraped_times <= 10: + elif profile.scraped_times > 5 and profile.scraped_times <= 10: scrape_time = settings.after_5 * 60 * 60 - elif item.scraped_times > 10: + elif profile.scraped_times > 10: scrape_time = settings.after_10 * 60 * 60 return ( - not item.scraped_at - or (datetime.now() - item.scraped_at).total_seconds() > scrape_time + not profile.scraped_at + or (datetime.now() - profile.scraped_at).total_seconds() > scrape_time ) \ No newline at end of file diff --git a/src/program/scrapers/annatar.py b/src/program/scrapers/annatar.py index 92ce090c..a3492320 100644 --- a/src/program/scrapers/annatar.py +++ b/src/program/scrapers/annatar.py @@ -4,7 +4,7 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException -from program.media.item import MediaItem +from program.media.item import ProfileData from program.settings.manager import settings_manager from program.scrapers.shared import _get_stremio_identifier from utils.logger import logger @@ -55,21 +55,21 @@ def validate(self) -> bool: logger.error(f"Annatar failed to initialize: {e}") return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the Annatar site for the given media items and update the object with scraped streams""" try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.second_limiter: self.second_limiter.limit_hit() except ConnectTimeout: - logger.debug(f"Annatar connection timeout for item: {item.log_string}") + logger.debug(f"Annatar connection timeout for item: {profile.log_string}") except ReadTimeout: - logger.debug(f"Annatar read timeout for item: {item.log_string}") + logger.debug(f"Annatar read timeout for item: {profile.log_string}") except RequestException as e: if e.response.status_code == 525: - logger.error(f"Annatar SSL handshake failed for item: {item.log_string}") + logger.error(f"Annatar SSL handshake failed for item: {profile.log_string}") elif e.response.status_code == 429: if self.second_limiter: self.second_limiter.limit_hit() @@ -79,18 +79,9 @@ def run(self, item: MediaItem) -> Dict[str, str]: logger.error(f"Annatar failed to scrape item with error: {e}", exc_info=True) return {} - def scrape(self, item: MediaItem) -> Dict[str, str]: - """Scrape the given media item""" - data, stream_count = self.api_scrape(item) - if data: - logger.log("SCRAPER", f"Found {len(data)} streams out of {stream_count} for {item.log_string}") - else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") - return data - - def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Annatar` scrape method""" - identifier, scrape_type, imdb_id = _get_stremio_identifier(item) + identifier, scrape_type, imdb_id = _get_stremio_identifier(profile) if identifier is not None: url = f"{self.settings.url}/search/imdb/{scrape_type}/{imdb_id}?{identifier}&{self.query_limits}" @@ -104,13 +95,17 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: response = get(url, timeout=self.timeout) if not response.is_ok or not response.data.media: - return {}, 0 + return {} torrents: Dict[str, str] = {} for stream in response.data.media: if not stream.hash: continue - torrents[stream.hash] = stream.title - return torrents, len(response.data.media) \ No newline at end of file + if torrents: + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") + else: + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") + + return torrents \ No newline at end of file diff --git a/src/program/scrapers/comet.py b/src/program/scrapers/comet.py index a28dd148..2e4e4a06 100644 --- a/src/program/scrapers/comet.py +++ b/src/program/scrapers/comet.py @@ -7,7 +7,7 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException -from program.media.item import MediaItem, Show +from program.media.item import ProfileData from program.settings.manager import settings_manager from program.scrapers.shared import _get_stremio_identifier from utils.logger import logger @@ -58,35 +58,29 @@ def validate(self) -> bool: logger.error(f"Comet failed to initialize: {e}", ) return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the comet site for the given media items and update the object with scraped streams""" - if not item or isinstance(item, Show): - return {} - try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.hour_limiter: self.hour_limiter.limit_hit() else: - logger.warning(f"Comet ratelimit exceeded for item: {item.log_string}") + logger.warning(f"Comet ratelimit exceeded for item: {profile.log_string}") except ConnectTimeout: - logger.warning(f"Comet connection timeout for item: {item.log_string}") + logger.warning(f"Comet connection timeout for item: {profile.log_string}") except ReadTimeout: - logger.warning(f"Comet read timeout for item: {item.log_string}") + logger.warning(f"Comet read timeout for item: {profile.log_string}") except RequestException as e: logger.error(f"Comet request exception: {str(e)}") except Exception as e: logger.error(f"Comet exception thrown: {str(e)}") return {} - def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Comet` scrape method""" - identifier, scrape_type, imdb_id = _get_stremio_identifier(item) - if not imdb_id: - return {} - + identifier, scrape_type, imdb_id = _get_stremio_identifier(profile) url = f"{self.settings.url}/{self.encoded_string}/stream/{scrape_type}/{imdb_id}{identifier or ''}.json" if self.second_limiter: @@ -115,8 +109,8 @@ def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: torrents[infohash] = title if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents diff --git a/src/program/scrapers/jackett.py b/src/program/scrapers/jackett.py index eb80bbbf..d0d5b27d 100644 --- a/src/program/scrapers/jackett.py +++ b/src/program/scrapers/jackett.py @@ -10,7 +10,7 @@ from pydantic import BaseModel from requests import HTTPError, ReadTimeout, RequestException, Timeout -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import ProfileData from program.settings.manager import settings_manager from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded @@ -74,11 +74,11 @@ def validate(self) -> bool: logger.warning("Jackett is not configured and will not be used.") return False - def run(self, item: MediaItem) -> Generator[MediaItem, None, None]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the Jackett site for the given media items and update the object with scraped streams""" try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.second_limiter: self.second_limiter.limit_hit() @@ -88,20 +88,20 @@ def run(self, item: MediaItem) -> Generator[MediaItem, None, None]: logger.error(f"Jackett failed to scrape item with error: {e}") return {} - def scrape(self, item: MediaItem) -> Dict[str, str]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Scrape the given media item""" - data, stream_count = self.api_scrape(item) + data, stream_count = self.api_scrape(profile) if data: - logger.log("SCRAPER", f"Found {len(data)} streams out of {stream_count} for {item.log_string}") + logger.log("SCRAPER", f"Found {len(data)} streams out of {stream_count} for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return data - def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def api_scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Jackett` scrape method""" results_queue = queue.Queue() threads = [ - threading.Thread(target=self._thread_target, args=(item, indexer, results_queue)) + threading.Thread(target=self._thread_target, args=(profile, indexer, results_queue)) for indexer in self.indexers ] @@ -113,26 +113,26 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: results = self._collect_results(results_queue) return self._process_results(results) - def _thread_target(self, item: MediaItem, indexer: JackettIndexer, results_queue: queue.Queue): + def _thread_target(self, profile: ProfileData, indexer: JackettIndexer, results_queue: queue.Queue): """Thread target for searching indexers""" try: start_time = time.perf_counter() - result = self._search_indexer(item, indexer) + result = self._search_indexer(profile, indexer) search_duration = time.perf_counter() - start_time except TypeError as e: - logger.error(f"Invalid Type for {item.log_string}: {e}") + logger.error(f"Invalid Type for {profile.log_string}: {e}") result = [] search_duration = 0 - item_title = item.log_string + item_title = profile.log_string logger.debug(f"Scraped {item_title} from {indexer.title} in {search_duration:.2f} seconds with {len(result)} results") results_queue.put(result) - def _search_indexer(self, item: MediaItem, indexer: JackettIndexer) -> List[Tuple[str, str]]: + def _search_indexer(self, profile: ProfileData, indexer: JackettIndexer) -> List[Tuple[str, str]]: """Search for the given item on the given indexer""" - if isinstance(item, Movie): - return self._search_movie_indexer(item, indexer) - elif isinstance(item, (Show, Season, Episode)): - return self._search_series_indexer(item, indexer) + if profile.parent.type == "movie": + return self._search_movie_indexer(profile, indexer) + elif profile.parent.type in ["show", "season", "episode"]: + return self._search_series_indexer(profile, indexer) else: raise TypeError("Only Movie and Series is allowed!") @@ -153,7 +153,7 @@ def _process_results(self, results: List[Tuple[str, str]]) -> Tuple[Dict[str, st torrents[result[1]] = result[0] return torrents, len(results) - def _search_movie_indexer(self, item: MediaItem, indexer: JackettIndexer) -> List[Tuple[str, str]]: + def _search_movie_indexer(self, profile: ProfileData, indexer: JackettIndexer) -> List[Tuple[str, str]]: """Search for movies on the given indexer""" if indexer.movie_search_capabilities is None: return [] @@ -161,24 +161,24 @@ def _search_movie_indexer(self, item: MediaItem, indexer: JackettIndexer) -> Lis "apikey": self.api_key, "t": "movie", "cat": "2000", - "q": item.title, + "q": profile.parent.title, } if indexer.movie_search_capabilities and "year" in indexer.movie_search_capabilities: - if hasattr(item.aired_at, "year") and item.aired_at.year: params["year"] = item.aired_at.year + if hasattr(profile.parent.aired_at, "year") and profile.parent.aired_at.year: params["year"] = profile.parent.aired_at.year if indexer.movie_search_capabilities and "imdbid" in indexer.movie_search_capabilities: - params["imdbid"] = item.imdb_id + params["imdbid"] = profile.parent.ids["imdb_id"] url = f"{self.settings.url}/api/v2.0/indexers/{indexer.id}/results/torznab/api" return self._fetch_results(url, params, indexer.title, "movie") - def _search_series_indexer(self, item: MediaItem, indexer: JackettIndexer) -> List[Tuple[str, str]]: + def _search_series_indexer(self, profile: ProfileData, indexer: JackettIndexer) -> List[Tuple[str, str]]: """Search for series on the given indexer""" if indexer.tv_search_capabilities is None: return [] - q, season, ep = self._get_series_search_params(item) + q, season, ep = self._get_series_search_params(profile) if not q: - logger.debug(f"No search query found for {item.log_string}") + logger.debug(f"No search query found for {profile.log_string}") return [] params = { @@ -190,19 +190,20 @@ def _search_series_indexer(self, item: MediaItem, indexer: JackettIndexer) -> Li if ep and indexer.tv_search_capabilities and "ep" in indexer.tv_search_capabilities: params["ep"] = ep if season and indexer.tv_search_capabilities and "season" in indexer.tv_search_capabilities: params["season"] = season if indexer.tv_search_capabilities and "imdbid" in indexer.tv_search_capabilities: - params["imdbid"] = item.imdb_id if isinstance(item, (Episode, Show)) else item.parent.imdb_id + params["imdbid"] = profile.parent.get_top_imdb_id() url = f"{self.settings.url}/api/v2.0/indexers/{indexer.id}/results/torznab/api" return self._fetch_results(url, params, indexer.title, "series") - def _get_series_search_params(self, item: MediaItem) -> Tuple[str, int, Optional[int]]: + def _get_series_search_params(self, profile: ProfileData) -> Tuple[str, int, Optional[int]]: """Get search parameters for series""" - if isinstance(item, Show): - return item.get_top_title(), None, None - elif isinstance(item, Season): - return item.get_top_title(), item.number, None - elif isinstance(item, Episode): - return item.get_top_title(), item.parent.number, item.number + title = profile.parent.get_top_title() + if profile.parent.type == "show": + return title, None, None + elif profile.parent.type == "season": + return title, profile.parent.number, None + elif profile.parent.type == "episode": + return title, profile.parent.parent.number, profile.parent.number return "", 0, None def _get_indexers(self) -> List[JackettIndexer]: diff --git a/src/program/scrapers/knightcrawler.py b/src/program/scrapers/knightcrawler.py index 4274ca75..6343a4cf 100644 --- a/src/program/scrapers/knightcrawler.py +++ b/src/program/scrapers/knightcrawler.py @@ -4,7 +4,7 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException -from program.media.item import Episode, MediaItem +from program.media.item import ProfileData from program.scrapers.shared import _get_stremio_identifier from program.settings.manager import settings_manager from utils.logger import logger @@ -48,36 +48,33 @@ def validate(self) -> bool: return False return True - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the knightcrawler site for the given media items and update the object with scraped streams""" - if not item: - return {} - try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.second_limiter: self.second_limiter.limit_hit() except ConnectTimeout: - logger.warning(f"Knightcrawler connection timeout for item: {item.log_string}") + logger.warning(f"Knightcrawler connection timeout for item: {profile.log_string}") except ReadTimeout: - logger.warning(f"Knightcrawler read timeout for item: {item.log_string}") + logger.warning(f"Knightcrawler read timeout for item: {profile.log_string}") except RequestException as e: if e.response.status_code == 429: if self.second_limiter: self.second_limiter.limit_hit() else: - logger.warning(f"Knightcrawler ratelimit exceeded for item: {item.log_string}") + logger.warning(f"Knightcrawler ratelimit exceeded for item: {profile.log_string}") else: logger.error(f"Knightcrawler request exception: {e}") except Exception as e: logger.error(f"Knightcrawler exception thrown: {e}") return {} - def scrape(self, item: MediaItem) -> Dict[str, str]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Knightcrawler` scrape method""" - identifier, scrape_type, imdb_id = _get_stremio_identifier(item) + identifier, scrape_type, imdb_id = _get_stremio_identifier(profile) url = f"{self.settings.url}/{self.settings.filter}/stream/{scrape_type}/{imdb_id}" if identifier: @@ -98,8 +95,8 @@ def scrape(self, item: MediaItem) -> Dict[str, str]: } if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents \ No newline at end of file diff --git a/src/program/scrapers/mediafusion.py b/src/program/scrapers/mediafusion.py index 0eea129c..34cfeafc 100644 --- a/src/program/scrapers/mediafusion.py +++ b/src/program/scrapers/mediafusion.py @@ -6,7 +6,7 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException -from program.media.item import MediaItem +from program.media.item import ProfileData from program.scrapers.shared import _get_stremio_identifier from program.settings.manager import settings_manager from program.settings.models import AppModel @@ -93,32 +93,28 @@ def validate(self) -> bool: logger.error(f"Mediafusion failed to initialize: {e}") return False - def run(self, item: MediaItem) -> Dict[str, str]: - """Scrape the mediafusion site for the given media items - and update the object with scraped streams""" - if not item: - return {} - + def run(self, profile: ProfileData) -> Dict[str, str]: + """Scrape `Mediafusion` for the given profile""" try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.second_limiter: self.second_limiter.limit_hit() else: - logger.warning(f"Mediafusion ratelimit exceeded for item: {item.log_string}") + logger.warning(f"Mediafusion ratelimit exceeded for item: {profile.log_string}") except ConnectTimeout: - logger.warning(f"Mediafusion connection timeout for item: {item.log_string}") + logger.warning(f"Mediafusion connection timeout for item: {profile.log_string}") except ReadTimeout: - logger.warning(f"Mediafusion read timeout for item: {item.log_string}") + logger.warning(f"Mediafusion read timeout for item: {profile.log_string}") except RequestException as e: logger.error(f"Mediafusion request exception: {e}") except Exception as e: logger.error(f"Mediafusion exception thrown: {e}") return {} - def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Mediafusion` scrape method""" - identifier, scrape_type, imdb_id = _get_stremio_identifier(item) + identifier, scrape_type, imdb_id = _get_stremio_identifier(profile) url = f"{self.settings.url}/{self.encrypted_string}/stream/{scrape_type}/{imdb_id}" if identifier: @@ -144,8 +140,8 @@ def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: torrents[info_hash] = raw_title if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents \ No newline at end of file diff --git a/src/program/scrapers/orionoid.py b/src/program/scrapers/orionoid.py index fbd91118..79d5caeb 100644 --- a/src/program/scrapers/orionoid.py +++ b/src/program/scrapers/orionoid.py @@ -1,7 +1,7 @@ """ Orionoid scraper module """ from typing import Dict -from program.media.item import MediaItem +from program.media.item import MediaItem, ProfileData from program.settings.manager import settings_manager from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded @@ -87,11 +87,8 @@ def check_limit(self) -> bool: logger.error(f"Orionoid failed to check limit: {e}") return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the orionoid site for the given media items and update the object with scraped streams.""" - if not item: - return {} - if not self.is_unlimited: limit_hit = self.check_limit() if limit_hit: @@ -99,17 +96,17 @@ def run(self, item: MediaItem) -> Dict[str, str]: return {} try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: self.rate_limiter.limit_hit() except Exception as e: - logger.opt(exception=True).error(f"Orionoid exception for item: {item.log_string} - Exception: {e}") + logger.opt(exception=True).error(f"Orionoid exception for item: {profile.log_string} - Exception: {e}") return {} - def _build_query_params(self, item: MediaItem) -> dict: + def _build_query_params(self, profile: ProfileData) -> dict: """Construct the query parameters for the Orionoid API based on the media item.""" - media_type = "movie" if item.type == "movie" else "show" - imdbid: str = item.get_top_imdb_id() + media_type = "movie" if profile.parent.type == "movie" else "show" + imdbid: str = profile.parent.get_top_imdb_id() if not imdbid: raise ValueError("IMDB ID is missing for the media item") @@ -124,11 +121,11 @@ def _build_query_params(self, item: MediaItem) -> dict: "protocoltorrent": "magnet" } - if item.type == "season": - params["numberseason"] = item.number - elif item.type == "episode": - params["numberseason"] = item.parent.number - params["numberepisode"] = item.number + if profile.parent.type == "season": + params["numberseason"] = profile.parent.number + elif profile.parent.type == "episode": + params["numberseason"] = profile.parent.parent.number + params["numberepisode"] = profile.parent.number if self.settings.cached_results_only: params["access"] = "realdebridtorrent" @@ -140,9 +137,9 @@ def _build_query_params(self, item: MediaItem) -> dict: return params - def scrape(self, item: MediaItem) -> Dict[str, str]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Orionoid` scrape method""" - params = self._build_query_params(item) + params = self._build_query_params(profile) response = get(self.base_url, params=params, timeout=self.timeout, specific_rate_limiter=self.rate_limiter) if not response.is_ok or not hasattr(response.data, "data"): return {} @@ -154,8 +151,8 @@ def scrape(self, item: MediaItem) -> Dict[str, str]: torrents[stream.file.hash] = stream.file.name if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents \ No newline at end of file diff --git a/src/program/scrapers/prowlarr.py b/src/program/scrapers/prowlarr.py index f3ce0be7..525dbf63 100644 --- a/src/program/scrapers/prowlarr.py +++ b/src/program/scrapers/prowlarr.py @@ -11,7 +11,7 @@ from pydantic import BaseModel from requests import HTTPError, ReadTimeout, RequestException, Timeout -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import ProfileData from program.settings.manager import settings_manager from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded @@ -75,39 +75,39 @@ def validate(self) -> bool: logger.warning("Prowlarr is not configured and will not be used.") return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape the Prowlarr site for the given media items and update the object with scraped streams""" - if not item: + if not profile: return {} try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: if self.second_limiter: self.second_limiter.limit_hit() else: - logger.warning(f"Prowlarr ratelimit exceeded for item: {item.log_string}") + logger.warning(f"Prowlarr ratelimit exceeded for item: {profile.parent.log_string}") except RequestException as e: logger.error(f"Prowlarr request exception: {e}") except Exception as e: logger.error(f"Prowlarr failed to scrape item with error: {e}") return {} - def scrape(self, item: MediaItem) -> Dict[str, str]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Scrape the given media item""" - data, stream_count = self.api_scrape(item) - if data: - logger.log("SCRAPER", f"Found {len(data)} streams out of {stream_count} for {item.log_string}") + profile, stream_count = self.api_scrape(profile) + if profile: + logger.log("SCRAPER", f"Found {len(profile)} streams out of {stream_count} for {profile.parent.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") - return data + logger.log("NOT_FOUND", f"No streams found for {profile.parent.log_string}") + return profile - def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def api_scrape(self, profile: ProfileData) -> tuple[Dict[str, str], int]: """Wrapper for `Prowlarr` scrape method""" results_queue = queue.Queue() threads = [ - threading.Thread(target=self._thread_target, args=(item, indexer, results_queue)) + threading.Thread(target=self._thread_target, args=(profile, indexer, results_queue)) for indexer in self.indexers ] @@ -119,25 +119,25 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: results = self._collect_results(results_queue) return self._process_results(results) - def _thread_target(self, item: MediaItem, indexer: ProwlarrIndexer, results_queue: queue.Queue): + def _thread_target(self, profile: ProfileData, indexer: ProwlarrIndexer, results_queue: queue.Queue): try: start_time = time.perf_counter() - result = self._search_indexer(item, indexer) + result = self._search_indexer(profile, indexer) search_duration = time.perf_counter() - start_time except TypeError as e: - logger.error(f"Invalid Type for {item.log_string}: {e}") + logger.error(f"Invalid Type for {profile.parent.log_string}: {e}") result = [] search_duration = 0 - item_title = item.log_string # probably not needed, but since its concurrent, it's better to be safe + item_title = profile.parent.log_string # probably not needed, but since its concurrent, it's better to be safe logger.debug(f"Scraped {item_title} from {indexer.title} in {search_duration:.2f} seconds with {len(result)} results") results_queue.put(result) - def _search_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: + def _search_indexer(self, profile: ProfileData, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: """Search for the given item on the given indexer""" - if isinstance(item, Movie): - return self._search_movie_indexer(item, indexer) - elif isinstance(item, (Show, Season, Episode)): - return self._search_series_indexer(item, indexer) + if profile.parent.type == "movie": + return self._search_movie_indexer(profile, indexer) + elif profile.parent.type in ["show", "season", "episode"]: + return self._search_series_indexer(profile, indexer) else: raise TypeError("Only Movie and Series is allowed!") @@ -161,7 +161,7 @@ def _process_results(self, results: List[Tuple[str, str]]) -> Tuple[Dict[str, st return torrents, len(results) - def _search_movie_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: + def _search_movie_indexer(self, profile: ProfileData, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: """Search for movies on the given indexer""" if indexer.movie_search_capabilities is None: return [] @@ -169,23 +169,23 @@ def _search_movie_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> Li "apikey": self.api_key, "t": "movie", "cat": "2000", - "q": item.title, + "q": profile.parent.title, } if indexer.movie_search_capabilities and "year" in indexer.movie_search_capabilities: - if hasattr(item.aired_at, "year") and item.aired_at.year: params["year"] = item.aired_at.year + if hasattr(profile.parent.aired_at, "year") and profile.parent.aired_at.year: params["year"] = profile.parent.aired_at.year if indexer.movie_search_capabilities and "imdbid" in indexer.movie_search_capabilities: - params["imdbid"] = item.imdb_id + params["imdbid"] = profile.parent.ids["imdb_id"] url = f"{self.settings.url}/api/v1/indexer/{indexer.id}/newznab" return self._fetch_results(url, params, indexer.title, "movie") - def _search_series_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: + def _search_series_indexer(self, profile: ProfileData, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: """Search for series on the given indexer""" if indexer.tv_search_capabilities is None: return [] - q, season, ep = self._get_series_search_params(item) + q, season, ep = self._get_series_search_params(profile) if not q: - logger.debug(f"No search query found for {item.log_string}") + logger.debug(f"No search query found for {profile.log_string}") return [] params = { @@ -197,19 +197,20 @@ def _search_series_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> L if ep and indexer.tv_search_capabilities and "ep" in indexer.tv_search_capabilities: params["ep"] = ep if season and indexer.tv_search_capabilities and "season" in indexer.tv_search_capabilities: params["season"] = season if indexer.tv_search_capabilities and "imdbid" in indexer.tv_search_capabilities: - params["imdbid"] = item.imdb_id if isinstance(item, [Episode, Show]) else item.parent.imdb_id + params["imdbid"] = profile.parent.get_top_imdb_id() url = f"{self.settings.url}/api/v1/indexer/{indexer.id}/newznab" return self._fetch_results(url, params, indexer.title, "series") - def _get_series_search_params(self, item: MediaItem) -> Tuple[str, int, Optional[int]]: + def _get_series_search_params(self, profile: ProfileData) -> Tuple[str, int, Optional[int]]: """Get search parameters for series""" - if isinstance(item, Show): - return item.get_top_title(), None, None - elif isinstance(item, Season): - return item.get_top_title(), item.number, None - elif isinstance(item, Episode): - return item.get_top_title(), item.parent.number, item.number + title = profile.parent.get_top_title() + if profile.parent.type == "show": + return title, None, None + elif profile.parent.type == "season": + return title, profile.parent.number, None + elif profile.parent.type == "episode": + return title, profile.parent.parent.number, profile.parent.number return "", 0, None def _get_indexers(self) -> List[ProwlarrIndexer]: diff --git a/src/program/scrapers/shared.py b/src/program/scrapers/shared.py index ef98bb75..7a2ec802 100644 --- a/src/program/scrapers/shared.py +++ b/src/program/scrapers/shared.py @@ -4,7 +4,7 @@ from RTN import RTN, ParsedData, Torrent, sort_torrents from RTN.exceptions import GarbageTorrent -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import ProfileData from program.media.state import States from program.media.stream import Stream from program.settings.manager import settings_manager @@ -17,31 +17,32 @@ rtn = RTN(settings_model, ranking_model) -def _get_stremio_identifier(item: MediaItem) -> tuple[str | None, str, str]: +def _get_stremio_identifier(profile: ProfileData) -> tuple[str | None, str, str]: """Get the stremio identifier for a media item based on its type.""" - if isinstance(item, Show): - identifier, scrape_type, imdb_id = ":1:1", "series", item.imdb_id - elif isinstance(item, Season): - identifier, scrape_type, imdb_id = f":{item.number}:1", "series", item.parent.imdb_id - elif isinstance(item, Episode): - identifier, scrape_type, imdb_id = f":{item.parent.number}:{item.number}", "series", item.parent.parent.imdb_id - elif isinstance(item, Movie): - identifier, scrape_type, imdb_id = None, "movie", item.imdb_id + imdb_id = profile.parent.get_top_imdb_id() + if profile.parent.type == "show": + identifier, scrape_type, imdb_id = ":1:1", "series", imdb_id + elif profile.parent.type == "season": + identifier, scrape_type, imdb_id = f":{profile.parent.number}:1", "series", imdb_id + elif profile.parent.type == "episode": + identifier, scrape_type, imdb_id = f":{profile.parent.parent.number}:{profile.parent.number}", "series", imdb_id + elif profile.parent.type == "movie": + identifier, scrape_type, imdb_id = None, "movie", imdb_id else: return None, None, None return identifier, scrape_type, imdb_id -def _parse_results(item: MediaItem, results: Dict[str, str], log_msg: bool = True) -> Dict[str, Stream]: +def _parse_results(profile: ProfileData, results: Dict[str, str], log_msg: bool = True) -> Dict[str, Stream]: """Parse the results from the scrapers into Torrent objects.""" torrents: Set[Torrent] = set() processed_infohashes: Set[str] = set() - correct_title: str = item.get_top_title() + correct_title: str = profile.parent.get_top_title() - logger.log("SCRAPER", f"Processing {len(results)} results for {item.log_string}") + logger.log("SCRAPER", f"Processing {len(results)} results for {profile.log_string}") - if item.type in ["show", "season", "episode"]: - needed_seasons: list[int] = _get_needed_seasons(item) + if profile.parent.type in ["show", "season", "episode"]: + needed_seasons: list[int] = _get_needed_seasons(profile.parent) for infohash, raw_title in results.items(): if infohash in processed_infohashes: @@ -52,53 +53,53 @@ def _parse_results(item: MediaItem, results: Dict[str, str], log_msg: bool = Tru raw_title=raw_title, infohash=infohash, correct_title=correct_title, - remove_trash=settings_manager.settings.ranking.options["remove_all_trash"], - aliases=item.get_aliases() if enable_aliases else {} # in some cases we want to disable aliases + remove_trash=profile.profile.model["options"]["remove_all_trash"], + aliases=profile.parent.get_aliases() if enable_aliases else {} # in some cases we want to disable aliases ) - if torrent.data.country and not item.is_anime: - if _get_item_country(item) != torrent.data.country: + if torrent.data.country and not profile.parent.is_anime: + if _get_item_country(profile.parent) != torrent.data.country: if settings_manager.settings.scraping.parse_debug: - logger.debug(f"Skipping torrent for incorrect country with {item.log_string}: {raw_title}") + logger.debug(f"Skipping torrent for incorrect country with {profile.log_string}: {raw_title}") continue - if item.type in ["show", "season", "episode"]: + if profile.parent.type in ["show", "season", "episode"]: if torrent.data.complete: torrents.add(torrent) processed_infohashes.add(infohash) continue - if item.type == "movie": + if profile.parent.type == "movie": # Check if a movie is within a year range of +/- 1 year. # Ex: [2018, 2019, 2020] for a 2019 movie - if _check_item_year(item, torrent.data): + if _check_item_year(profile.parent, torrent.data): torrents.add(torrent) - elif item.type == "show": + elif profile.parent.type == "show": if torrent.data.seasons and not torrent.data.episodes: - # We subtract one because Trakt doesn't always index + # We subtract one because Trakt doesn't always index # shows according to uploaders if len(torrent.data.seasons) >= (len(needed_seasons) - 1): torrents.add(torrent) - elif item.type == "season": + elif profile.parent.type == "season": # If the torrent has the needed seasons and no episodes, we can add it if any(season in torrent.data.seasons for season in needed_seasons) and not torrent.data.episodes: torrents.add(torrent) - elif item.type == "episode": + elif profile.parent.type == "episode": # If the torrent has the season and episode numbers, we can add it if ( - item.number in torrent.data.episodes - and item.parent.number in torrent.data.seasons + profile.parent.number in torrent.data.episodes + and profile.parent.parent.number in torrent.data.seasons ): torrents.add(torrent) # Anime edge cases where no season number is present for single season shows elif ( - len(item.parent.parent.seasons) == 1 + len(profile.parent.parent.parent.seasons) == 1 and not torrent.data.seasons - and item.number in torrent.data.episodes + and profile.parent.number in torrent.data.episodes ): torrents.add(torrent) # If no episodes are present but the needed seasons are, we'll add it @@ -119,11 +120,11 @@ def _parse_results(item: MediaItem, results: Dict[str, str], log_msg: bool = Tru continue except GarbageTorrent as e: if settings_manager.settings.scraping.parse_debug and log_msg: - logger.debug(f"Trashing torrent for {item.log_string}: '{raw_title}'") + logger.debug(f"Trashing torrent for {profile.log_string}: '{raw_title}'") continue if torrents: - logger.log("SCRAPER", f"Processed {len(torrents)} matches for {item.log_string}") + logger.log("SCRAPER", f"Processed {len(torrents)} matches for {profile.log_string}") torrents = sort_torrents(torrents) torrents_dict = {} for torrent in torrents.values(): @@ -134,27 +135,27 @@ def _parse_results(item: MediaItem, results: Dict[str, str], log_msg: bool = Tru # helper functions -def _check_item_year(item: MediaItem, data: ParsedData) -> bool: +def _check_item_year(profile: ProfileData, parsed_data: ParsedData) -> bool: """Check if the year of the torrent is within the range of the item.""" - year_range = [item.aired_at.year - 1, item.aired_at.year, item.aired_at.year + 1] - if item.type == "movie" and data.year: - return data.year in year_range + year_range = [profile.parent.aired_at.year - 1, profile.parent.aired_at.year, profile.parent.aired_at.year + 1] + if profile.parent.type == "movie" and parsed_data.year: + return parsed_data.year in year_range return False -def _get_item_country(item: MediaItem) -> str: +def _get_item_country(profile: ProfileData) -> str: """Get the country code for a country.""" - if item.type == "season": - return item.parent.country.upper() - elif item.type == "episode": - return item.parent.parent.country.upper() - return item.country.upper() + if profile.parent.type == "season": + return profile.parent.country.upper() + elif profile.parent.type == "episode": + return profile.parent.parent.country.upper() + return profile.parent.country.upper() -def _get_needed_seasons(item: Union[Show, Season, Episode]) -> list[int]: +def _get_needed_seasons(profile: ProfileData) -> list[int]: """Get the seasons that are needed for the item.""" - if item.type == "show": - return [season.number for season in item.seasons if season.last_state != States.Completed] - elif item.type == "season": - return [season.number for season in item.parent.seasons if season.last_state != States.Completed] - elif item.type == "episode": - return [season.number for season in item.parent.parent.seasons if season.last_state != States.Completed] + if profile.parent.type == "show": + return [season.number for season in profile.parent.seasons if season.last_state != States.Completed] + elif profile.parent.type == "season": + return [season.number for season in profile.parent.seasons if season.last_state != States.Completed] + elif profile.parent.type == "episode": + return [season.number for season in profile.parent.parent.seasons if season.last_state != States.Completed] return [] diff --git a/src/program/scrapers/torbox.py b/src/program/scrapers/torbox.py index 53e4ecd1..705bec37 100644 --- a/src/program/scrapers/torbox.py +++ b/src/program/scrapers/torbox.py @@ -1,9 +1,9 @@ from typing import Dict from requests import RequestException -from requests.exceptions import ConnectTimeout, ReadTimeout, RetryError +from requests.exceptions import ConnectTimeout -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import ProfileData from program.settings.manager import settings_manager from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded @@ -37,37 +37,37 @@ def validate(self) -> bool: logger.exception(f"Error validating TorBox Scraper: {e}") return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape Torbox with the given media item for streams""" try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: self.rate_limiter.limit_hit() except ConnectTimeout: - logger.log("NOT_FOUND", f"TorBox is caching request for {item.log_string}, will retry later") + logger.log("NOT_FOUND", f"TorBox is caching request for {profile.log_string}, will retry later") except RequestException as e: if e.response and e.response.status_code == 418: - logger.log("NOT_FOUND", f"TorBox has no metadata for item: {item.log_string}, unable to scrape") + logger.log("NOT_FOUND", f"TorBox has no metadata for item: {profile.log_string}, unable to scrape") elif e.response and e.response.status_code == 500: - logger.log("NOT_FOUND", f"TorBox is caching request for {item.log_string}, will retry later") + logger.log("NOT_FOUND", f"TorBox is caching request for {profile.log_string}, will retry later") except Exception as e: logger.error(f"TorBox exception thrown: {e}") return {} - def _build_query_params(self, item: MediaItem) -> str: + def _build_query_params(self, profile: ProfileData) -> str: """Build the query params for the TorBox API""" - params = [f"imdb:{item.imdb_id}"] - if item.type == "show": + params = [f"imdb:{profile.parent.ids['imdb_id']}"] + if profile.parent.type == "show": params.append(f"season=1") - elif item.type == "season": - params.append(f"season={item.number}") - elif item.type == "episode": - params.append(f"season={item.parent.number}&episode={item.number}") + elif profile.parent.type == "season": + params.append(f"season={profile.parent.number}") + elif profile.parent.type == "episode": + params.append(f"season={profile.parent.parent.number}&episode={profile.parent.number}") return "&".join(params) - def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Torbox` scrape method using Torbox API""" - query_params = self._build_query_params(item) + query_params = self._build_query_params(profile) url = f"{self.base_url}/torrents/{query_params}?metadata=false" response = get(url, timeout=self.timeout, specific_rate_limiter=self.rate_limiter) @@ -84,8 +84,8 @@ def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: torrents[info_hash] = raw_title if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents \ No newline at end of file diff --git a/src/program/scrapers/torrentio.py b/src/program/scrapers/torrentio.py index 565b5f47..957c3719 100644 --- a/src/program/scrapers/torrentio.py +++ b/src/program/scrapers/torrentio.py @@ -1,13 +1,10 @@ """ Torrentio scraper module """ from typing import Dict -from requests import ConnectTimeout, ReadTimeout -from requests.exceptions import RequestException - -from program.media.item import MediaItem +from program.media.item import ProfileData +from program.scrapers.shared import _get_stremio_identifier from program.settings.manager import settings_manager from program.settings.models import TorrentioConfig -from program.scrapers.shared import _get_stremio_identifier from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded from utils.request import get, ping @@ -46,19 +43,19 @@ def validate(self) -> bool: return False return True - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, profile: ProfileData) -> Dict[str, str]: """Scrape Torrentio with the given media item for streams""" try: - return self.scrape(item) + return self.scrape(profile) except RateLimitExceeded: self.rate_limiter.limit_hit() except Exception as e: logger.error(f"Torrentio exception thrown: {str(e)}") return {} - def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: + def scrape(self, profile: ProfileData) -> Dict[str, str]: """Wrapper for `Torrentio` scrape method""" - identifier, scrape_type, imdb_id = _get_stremio_identifier(item) + identifier, scrape_type, imdb_id = _get_stremio_identifier(profile) if not imdb_id: return {} @@ -80,8 +77,8 @@ def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: torrents[stream.infoHash] = raw_title if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {profile.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {profile.log_string}") return torrents \ No newline at end of file diff --git a/src/program/scrapers/zilean.py b/src/program/scrapers/zilean.py index a3d67705..ccd30067 100644 --- a/src/program/scrapers/zilean.py +++ b/src/program/scrapers/zilean.py @@ -2,12 +2,8 @@ from typing import Dict -from requests import ConnectTimeout, ReadTimeout -from requests.exceptions import RequestException - -from program.media.item import Episode, MediaItem, Season, Show +from program.media.item import ProfileData from program.settings.manager import settings_manager -from program.settings.models import AppModel from utils.logger import logger from utils.ratelimiter import RateLimiter, RateLimitExceeded from utils.request import get, ping @@ -45,34 +41,34 @@ def validate(self) -> bool: logger.error(f"Zilean failed to initialize: {e}") return False - def run(self, item: MediaItem) -> Dict[str, str]: + def run(self, data: ProfileData) -> Dict[str, str]: """Scrape the Zilean site for the given media items and update the object with scraped items""" try: - return self.scrape(item) + return self.scrape(data) except RateLimitExceeded: self.rate_limiter.limit_hit() except Exception as e: logger.error(f"Zilean exception thrown: {e}") return {} - def _build_query_params(self, item: MediaItem) -> Dict[str, str]: + def _build_query_params(self, data: ProfileData) -> Dict[str, str]: """Build the query params for the Zilean API""" - params = {"Query": item.get_top_title()} - if isinstance(item, MediaItem) and hasattr(item, "year"): - params["Year"] = item.year - if isinstance(item, Show): + params = {"Query": data.get_top_title()} + if hasattr(data.parent, "aired_at"): + params["Year"] = data.parent.aired_at.year + if data.parent.type == "show": params["Season"] = 1 - elif isinstance(item, Season): - params["Season"] = item.number - elif isinstance(item, Episode): - params["Season"] = item.parent.number - params["Episode"] = item.number + elif data.parent.type == "season": + params["Season"] = data.parent.number + elif data.parent.type == "episode": + params["Season"] = data.parent.parent.number + params["Episode"] = data.parent.number return params - def scrape(self, item: MediaItem) -> Dict[str, str]: + def scrape(self, data: ProfileData) -> Dict[str, str]: """Wrapper for `Zilean` scrape method""" url = f"{self.settings.url}/dmm/filtered" - params = self._build_query_params(item) + params = self._build_query_params(data) response = get(url, params=params, timeout=self.timeout, specific_rate_limiter=self.rate_limiter) if not response.is_ok or not response.data: @@ -85,8 +81,8 @@ def scrape(self, item: MediaItem) -> Dict[str, str]: torrents[result.info_hash] = result.raw_title if torrents: - logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}") + logger.log("SCRAPER", f"Found {len(torrents)} streams for {data.log_string}") else: - logger.log("NOT_FOUND", f"No streams found for {item.log_string}") + logger.log("NOT_FOUND", f"No streams found for {data.log_string}") return torrents \ No newline at end of file diff --git a/src/program/settings/models.py b/src/program/settings/models.py index 0b9f0392..4ef4e5f3 100644 --- a/src/program/settings/models.py +++ b/src/program/settings/models.py @@ -351,6 +351,18 @@ class SubliminalConfig(Observable): } } +class RivenSettingsModel(SettingsModel): + def to_dict(self): + return { + "profile": self.profile, + "require": self.require, + "resolutions": self.resolutions, + "options": self.options, + "languages": self.languages, + # "custom_ranks": self.custom_ranks + } + + class PostProcessing(Observable): subliminal: SubliminalConfig = SubliminalConfig() @@ -366,7 +378,7 @@ class AppModel(Observable): downloaders: DownloadersModel = DownloadersModel() content: ContentModel = ContentModel() scraping: ScraperModel = ScraperModel() - ranking: RTNSettingsModel = RTNSettingsModel() + ranking: RivenSettingsModel = RivenSettingsModel() indexer: IndexerModel = IndexerModel() database: DatabaseModel = DatabaseModel() notifications: NotificationsModel = NotificationsModel() diff --git a/src/program/state_transition.py b/src/program/state_transition.py index eeb6018a..504ee28b 100644 --- a/src/program/state_transition.py +++ b/src/program/state_transition.py @@ -1,3 +1,4 @@ +from typing import Tuple from program.content import Listrr, Mdblist, Overseerr, PlexWatchlist from program.content.trakt import TraktContent from program.db.db_functions import _imdb_exists_in_db @@ -5,6 +6,7 @@ from program.indexers.trakt import TraktIndexer from program.libraries import SymlinkLibrary from program.media import Episode, MediaItem, Movie, Season, Show, States +from program.media.item import ProfileData from program.post_processing import PostProcessing, notify from program.post_processing.subliminal import Subliminal from program.scrapers import Scraping @@ -15,85 +17,94 @@ from utils.logger import logger -def process_event(existing_item: MediaItem | None, emitted_by: Service, item: MediaItem) -> ProcessedEvent: +def process_event(existing_item: MediaItem | None, emitted_by: Service, item: MediaItem | ProfileData) -> ProcessedEvent: """Process an event and return the updated item, next service and items to submit.""" next_service: Service = None updated_item = item - no_further_processing: ProcessedEvent = (None, None, []) + no_further_processing: ProcessedEvent = (None, None, ()) items_to_submit = [] - source_services = (Overseerr, PlexWatchlist, Listrr, Mdblist, SymlinkLibrary, TraktContent) - if emitted_by in source_services or item.state in [States.Requested]: - next_service = TraktIndexer - if _imdb_exists_in_db(item.imdb_id) and item.last_state == States.Completed: - logger.debug(f"Item {item.log_string} already exists in the database.") - return no_further_processing - if isinstance(item, Season): - item = item.parent - existing_item = existing_item.parent if existing_item else None - if existing_item and not TraktIndexer.should_submit(existing_item): - return no_further_processing - return None, next_service, [item] + if isinstance(item, MediaItem): + if emitted_by in (Overseerr, PlexWatchlist, Listrr, Mdblist, SymlinkLibrary, TraktContent, "ApiAdd"): + if _imdb_exists_in_db(item.ids["imdb_id"]) and item.last_state == States.Completed: + logger.debug(f"Item {item.log_string} already exists in the database.") + return no_further_processing + if isinstance(item, Season): + item = item.parent + existing_item = existing_item.parent if existing_item else None + if existing_item and not TraktIndexer.should_submit(existing_item): + return no_further_processing + return None, next_service, [(item, TraktIndexer)] - elif item.last_state in [States.PartiallyCompleted, States.Ongoing]: - if item.type == "show": - for season in item.seasons: - if season.last_state not in [States.Completed, States.Unreleased]: - _, _, sub_items = process_event(season, emitted_by, season) + if existing_item and not existing_item.indexed_at: + if item.type in ("show", "season"): + existing_item.fill_in_missing_children(item) + existing_item.copy_other_media_attr(item) + existing_item.indexed_at = item.indexed_at + updated_item = item = existing_item + if item.last_state == States.Completed: + return item, None, [] + else: + for profile in item.profiles: + if profile.last_state != States.Completed: + _, sub_items = process_event(None, emitted_by, profile) + items_to_submit += sub_items + + elif isinstance(item, ProfileData): + profile = item + if profile.last_state == States.Requested: + if Scraping.should_submit(profile): + items_to_submit = [(profile, Scraping)] + else: + if item.parent.type == "show": + _, sub_items = process_event(item.parent, emitted_by, item.parent) items_to_submit += sub_items - elif item.type == "season": - for episode in item.episodes: - if episode.last_state != States.Completed: - _, _, sub_items = process_event(episode, emitted_by, episode) + elif item.parent.type == "season": + _, sub_items = process_event(item.parent, emitted_by, item.parent) items_to_submit += sub_items - elif item.last_state == States.Indexed: - next_service = Scraping - if existing_item: - if not existing_item.indexed_at: - if isinstance(item, (Show, Season)): - existing_item.fill_in_missing_children(item) - existing_item.copy_other_media_attr(item) - existing_item.indexed_at = item.indexed_at - updated_item = item = existing_item - if existing_item.last_state == States.Completed: - return existing_item, None, [] - elif not emitted_by == Scraping and Scraping.can_we_scrape(existing_item): - items_to_submit = [existing_item] - elif item.type == "show": - items_to_submit = [s for s in item.seasons if s.last_state != States.Completed and Scraping.can_we_scrape(s)] - elif item.type == "season": - items_to_submit = [e for e in item.episodes if e.last_state != States.Completed and Scraping.can_we_scrape(e)] + # elif item.last_state == States.Scraped: + # next_service = Downloader + # items_to_submit = [item] - elif item.last_state == States.Scraped: - next_service = Downloader - items_to_submit = [item] + # elif item.last_state == States.Downloaded: + # next_service = Symlinker + # items_to_submit = [item] - elif item.last_state == States.Downloaded: - next_service = Symlinker - items_to_submit = [item] + # elif item.last_state == States.Symlinked: + # next_service = Updater + # items_to_submit = [item] - elif item.last_state == States.Symlinked: - next_service = Updater - items_to_submit = [item] + # elif item.last_state == States.Completed: + # # If a user manually retries an item, lets not notify them again + # if emitted_by not in ["Manual", PostProcessing]: + # notify(item) + # # Avoid multiple post-processing runs + # if not emitted_by == PostProcessing: + # if settings_manager.settings.post_processing.subliminal.enabled: + # next_service = PostProcessing + # if item.type in ["movie", "episode"] and Subliminal.should_submit(item): + # items_to_submit = [item] + # elif item.type == "show": + # items_to_submit = [e for s in item.seasons for e in s.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)] + # elif item.type == "season": + # items_to_submit = [e for e in item.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)] + # if not items_to_submit: + # return no_further_processing + # else: + # return no_further_processing + + return updated_item, items_to_submit - elif item.last_state == States.Completed: - # If a user manually retries an item, lets not notify them again - if emitted_by not in ["Manual", PostProcessing]: - notify(item) - # Avoid multiple post-processing runs - if not emitted_by == PostProcessing: - if settings_manager.settings.post_processing.subliminal.enabled: - next_service = PostProcessing - if item.type in ["movie", "episode"] and Subliminal.should_submit(item): - items_to_submit = [item] - elif item.type == "show": - items_to_submit = [e for s in item.seasons for e in s.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)] - elif item.type == "season": - items_to_submit = [e for e in item.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)] - if not items_to_submit: - return no_further_processing - else: - return no_further_processing - return updated_item, next_service, items_to_submit \ No newline at end of file + # elif item.last_state in [States.PartiallyCompleted, States.Ongoing]: + # if item.type == "show": + # for season in item.seasons: + # if season.last_state not in [States.Completed, States.Unreleased]: + # _, _, sub_items = process_event(season, emitted_by, season) + # items_to_submit += sub_items + # elif item.type == "season": + # for episode in item.episodes: + # if episode.last_state != States.Completed: + # _, _, sub_items = process_event(episode, emitted_by, episode) + # items_to_submit += sub_items \ No newline at end of file diff --git a/src/program/symlink.py b/src/program/symlink.py index 870d34f1..551a0a5e 100644 --- a/src/program/symlink.py +++ b/src/program/symlink.py @@ -208,23 +208,23 @@ def create_folder_path(base_path, *subfolders): return path if isinstance(item, Movie): - movie_folder = f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.imdb_id}}}" + movie_folder = f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.ids['imdb_id']}}}" destination_folder = create_folder_path(movie_path, movie_folder) item.set("update_folder", destination_folder) elif isinstance(item, Show): - folder_name_show = f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.imdb_id}}}" + folder_name_show = f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.ids['imdb_id']}}}" destination_folder = create_folder_path(show_path, folder_name_show) item.set("update_folder", destination_folder) elif isinstance(item, Season): show = item.parent - folder_name_show = f"{show.title.replace('/', '-')} ({show.aired_at.year}) {{imdb-{show.imdb_id}}}" + folder_name_show = f"{show.title.replace('/', '-')} ({show.aired_at.year}) {{imdb-{show.ids['imdb_id']}}}" show_path = create_folder_path(show_path, folder_name_show) folder_season_name = f"Season {str(item.number).zfill(2)}" destination_folder = create_folder_path(show_path, folder_season_name) item.set("update_folder", destination_folder) elif isinstance(item, Episode): show = item.parent.parent - folder_name_show = f"{show.title.replace('/', '-')} ({show.aired_at.year}) {{imdb-{show.imdb_id}}}" + folder_name_show = f"{show.title.replace('/', '-')} ({show.aired_at.year}) {{imdb-{show.ids['imdb_id']}}}" show_path = create_folder_path(show_path, folder_name_show) season = item.parent folder_season_name = f"Season {str(season.number).zfill(2)}" @@ -237,7 +237,7 @@ def _determine_file_name(self, item: Union[Movie, Episode]) -> str | None: """Determine the filename of the symlink.""" filename = None if isinstance(item, Movie): - filename = f"{item.title} ({item.aired_at.year}) " + "{imdb-" + item.imdb_id + "}" + filename = f"{item.title} ({item.aired_at.year}) " + "{imdb-" + item.ids['imdb_id'] + "}" elif isinstance(item, Season): showname = item.parent.title showyear = item.parent.aired_at.year @@ -264,10 +264,10 @@ def delete_item_symlinks(self, item: "MediaItem") -> bool: item_path = None if isinstance(item, Show): base_path = self.library_path_anime_shows if item.is_anime else self.library_path_shows - item_path = base_path / f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.imdb_id}}}" + item_path = base_path / f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.ids['imdb_id']}}}" elif isinstance(item, Movie): base_path = self.library_path_anime_movies if item.is_anime else self.library_path_movies - item_path = base_path / f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.imdb_id}}}" + item_path = base_path / f"{item.title.replace('/', '-')} ({item.aired_at.year}) {{imdb-{item.ids['imdb_id']}}}" return _delete_symlink(item, item_path) def _delete_symlink(item: Union[Movie, Show], item_path: Path) -> bool: diff --git a/src/tests/test_container.py b/src/tests/test_container.py index 64590340..e19cfb35 100644 --- a/src/tests/test_container.py +++ b/src/tests/test_container.py @@ -138,5 +138,5 @@ # # assert len(missing_items) == 4 # assert missing_items[next(iter(missing_items))].state == States.Unknown -# assert missing_items[next(iter(missing_items))].imdb_id == "tt1405406" +# assert missing_items[next(iter(missing_items))].ids["imdb_id"] == "tt1405406" # assert missing_items[next(iter(missing_items))].title == "Test Show" \ No newline at end of file diff --git a/src/tests/test_db_functions.py b/src/tests/test_db_functions.py index 4c8c076a..8b04e081 100644 --- a/src/tests/test_db_functions.py +++ b/src/tests/test_db_functions.py @@ -36,8 +36,8 @@ def test_reset_streams_for_mediaitem_with_no_streams(test_scoped_db_session): reset_streams(media_item) - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item._id).count() == 0 - assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item._id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item.id).count() == 0 + assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item.id).count() == 0 def test_add_new_mediaitem_with_multiple_streams_and_reset_streams(test_scoped_db_session): @@ -64,16 +64,16 @@ def test_add_new_mediaitem_with_multiple_streams_and_reset_streams(test_scoped_d test_scoped_db_session.add(stream2) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=media_item._id, child_id=stream1._id) - stream_relation2 = StreamRelation(parent_id=media_item._id, child_id=stream2._id) + stream_relation1 = StreamRelation(parent_id=media_item.id, child_id=stream1.id) + stream_relation2 = StreamRelation(parent_id=media_item.id, child_id=stream2.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.add(stream_relation2) test_scoped_db_session.commit() reset_streams(media_item) - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item._id).count() == 0 - assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item._id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item.id).count() == 0 + assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item.id).count() == 0 def test_blacklists_active_stream(test_scoped_db_session): media_item = MediaItem({"name":"New MediaItem"}) @@ -89,13 +89,13 @@ def test_blacklists_active_stream(test_scoped_db_session): test_scoped_db_session.add(media_item) test_scoped_db_session.add(stream) test_scoped_db_session.commit() - stream_relation = StreamRelation(parent_id=media_item._id, child_id=stream._id) + stream_relation = StreamRelation(parent_id=media_item.id, child_id=stream.id) test_scoped_db_session.add(stream_relation) test_scoped_db_session.commit() blacklist_stream(media_item, stream) - assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item._id, stream_id=stream._id).count() == 1 + assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item.id, stream_id=stream.id).count() == 1 def test_successfully_resets_streams(test_scoped_db_session): media_item = MediaItem({"name":"New MediaItem"}) @@ -124,16 +124,16 @@ def test_successfully_resets_streams(test_scoped_db_session): test_scoped_db_session.add(stream2) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=media_item._id, child_id=stream1._id) - stream_relation2 = StreamRelation(parent_id=media_item._id, child_id=stream2._id) + stream_relation1 = StreamRelation(parent_id=media_item.id, child_id=stream1.id) + stream_relation2 = StreamRelation(parent_id=media_item.id, child_id=stream2.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.add(stream_relation2) test_scoped_db_session.commit() reset_streams(media_item) - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item._id).count() == 0 - assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item._id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item.id).count() == 0 + assert test_scoped_db_session.query(StreamBlacklistRelation).filter_by(media_item_id=media_item.id).count() == 0 def test_delete_media_item_success(test_scoped_db_session): media_item = MediaItem({"name":"New MediaItem"}) @@ -151,19 +151,19 @@ def test_delete_media_item_success(test_scoped_db_session): test_scoped_db_session.add(stream1) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=media_item._id, child_id=stream1._id) + stream_relation1 = StreamRelation(parent_id=media_item.id, child_id=stream1.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.commit() - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item._id).count() == 1 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 1 + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item.id).count() == 1 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 1 delete_media_item(media_item) - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 0 + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 0 def test_delete_show_with_seasons_and_episodes_success(test_scoped_db_session): show = Show({"title": "New Show"}) @@ -172,14 +172,14 @@ def test_delete_show_with_seasons_and_episodes_success(test_scoped_db_session): test_scoped_db_session.commit() season1 = Season({"number": 1, "parent": show}) - season1.parent_id = show._id + season1.parent_id = show.id test_scoped_db_session.add(season1) test_scoped_db_session.commit() episode1 = Episode({"number": 1}) episode2 = Episode({"number": 2}) - episode1.parent_id = season1._id - episode2.parent_id = season1._id + episode1.parent_id = season1.id + episode2.parent_id = season1.id test_scoped_db_session.add(episode1) test_scoped_db_session.add(episode2) test_scoped_db_session.commit() @@ -195,23 +195,23 @@ def test_delete_show_with_seasons_and_episodes_success(test_scoped_db_session): test_scoped_db_session.add(stream1) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=show._id, child_id=stream1._id) + stream_relation1 = StreamRelation(parent_id=show.id, child_id=stream1.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.commit() - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 1 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 2 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 1 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 1 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 2 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 1 delete_media_item(show) - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 0 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 0 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 0 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 0 def test_delete_show_by_id_with_seasons_and_episodes_success(test_scoped_db_session): show = Show({"title": "New Show"}) @@ -220,14 +220,14 @@ def test_delete_show_by_id_with_seasons_and_episodes_success(test_scoped_db_sess test_scoped_db_session.commit() season1 = Season({"number": 1, "parent": show}) - season1.parent_id = show._id + season1.parent_id = show.id test_scoped_db_session.add(season1) test_scoped_db_session.commit() episode1 = Episode({"number": 1}) episode2 = Episode({"number": 2}) - episode1.parent_id = season1._id - episode2.parent_id = season1._id + episode1.parent_id = season1.id + episode2.parent_id = season1.id test_scoped_db_session.add(episode1) test_scoped_db_session.add(episode2) test_scoped_db_session.commit() @@ -243,23 +243,23 @@ def test_delete_show_by_id_with_seasons_and_episodes_success(test_scoped_db_sess test_scoped_db_session.add(stream1) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=show._id, child_id=stream1._id) + stream_relation1 = StreamRelation(parent_id=show.id, child_id=stream1.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.commit() - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 1 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 2 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 1 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 1 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 2 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 1 - delete_media_item_by_id(show._id) + delete_media_item_by_id(show.id) - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 0 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 0 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 0 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 0 def test_delete_show_by_item_id_with_seasons_and_episodes_success(test_scoped_db_session): show = Show({"title": "New Show"}) @@ -268,14 +268,14 @@ def test_delete_show_by_item_id_with_seasons_and_episodes_success(test_scoped_db test_scoped_db_session.commit() season1 = Season({"number": 1, "parent": show}) - season1.parent_id = show._id + season1.parent_id = show.id test_scoped_db_session.add(season1) test_scoped_db_session.commit() episode1 = Episode({"number": 1}) episode2 = Episode({"number": 2}) - episode1.parent_id = season1._id - episode2.parent_id = season1._id + episode1.parent_id = season1.id + episode2.parent_id = season1.id test_scoped_db_session.add(episode1) test_scoped_db_session.add(episode2) test_scoped_db_session.commit() @@ -291,23 +291,23 @@ def test_delete_show_by_item_id_with_seasons_and_episodes_success(test_scoped_db test_scoped_db_session.add(stream1) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=show._id, child_id=stream1._id) + stream_relation1 = StreamRelation(parent_id=show.id, child_id=stream1.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.commit() - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 1 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 2 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 1 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 1 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 2 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 1 delete_media_item_by_item_id(show.item_id) - assert test_scoped_db_session.query(Show).filter_by(_id=show._id).count() == 0 - assert test_scoped_db_session.query(Season).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 0 + assert test_scoped_db_session.query(Show).filter_by(_id=show.id).count() == 0 + assert test_scoped_db_session.query(Season).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Episode).filter_by(parent_id=season1.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=show.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 0 def test_delete_media_items_by_ids_success(test_scoped_db_session): media_item1 = MediaItem({"name": "New MediaItem 1"}) @@ -339,28 +339,28 @@ def test_delete_media_items_by_ids_success(test_scoped_db_session): test_scoped_db_session.add(stream2) test_scoped_db_session.commit() - stream_relation1 = StreamRelation(parent_id=media_item1._id, child_id=stream1._id) - stream_relation2 = StreamRelation(parent_id=media_item2._id, child_id=stream2._id) + stream_relation1 = StreamRelation(parent_id=media_item1.id, child_id=stream1.id) + stream_relation2 = StreamRelation(parent_id=media_item2.id, child_id=stream2.id) test_scoped_db_session.add(stream_relation1) test_scoped_db_session.add(stream_relation2) test_scoped_db_session.commit() - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item1._id).count() == 1 - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item2._id).count() == 1 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item1._id).count() == 1 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item2._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 1 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream2._id).count() == 1 - assert media_item1._id != media_item2._id + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item1.id).count() == 1 + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item2.id).count() == 1 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item1.id).count() == 1 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item2.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 1 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream2.id).count() == 1 + assert media_item1.id != media_item2.id - delete_media_items_by_ids([media_item1._id, media_item2._id]) + delete_media_items_by_ids([media_item1.id, media_item2.id]) - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item1._id).count() == 0 - assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item2._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item1._id).count() == 0 - assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item2._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream1._id).count() == 0 - assert test_scoped_db_session.query(Stream).filter_by(_id=stream2._id).count() == 0 + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item1.id).count() == 0 + assert test_scoped_db_session.query(MediaItem).filter_by(_id=media_item2.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item1.id).count() == 0 + assert test_scoped_db_session.query(StreamRelation).filter_by(parent_id=media_item2.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream1.id).count() == 0 + assert test_scoped_db_session.query(Stream).filter_by(_id=stream2.id).count() == 0 def test_get_media_items_by_ids_success(test_scoped_db_session): show = Show({"title": "Test Show"}) @@ -369,14 +369,14 @@ def test_get_media_items_by_ids_success(test_scoped_db_session): test_scoped_db_session.commit() season = Season({"number": 1, "parent": show}) - season.parent_id = show._id + season.parent_id = show.id test_scoped_db_session.add(season) test_scoped_db_session.commit() episode1 = Episode({"number": 1}) episode2 = Episode({"number": 2}) - episode1.parent_id = season._id - episode2.parent_id = season._id + episode1.parent_id = season.id + episode2.parent_id = season.id test_scoped_db_session.add(episode1) test_scoped_db_session.add(episode2) test_scoped_db_session.commit() @@ -386,12 +386,12 @@ def test_get_media_items_by_ids_success(test_scoped_db_session): test_scoped_db_session.add(movie) test_scoped_db_session.commit() - media_items = get_media_items_by_ids([show._id, season._id, episode1._id, episode2._id, movie._id]) + media_items = get_media_items_by_ids([show.id, season.id, episode1.id, episode2.id, movie.id]) assert len(media_items) == 5 - assert any(isinstance(item, Show) and item._id == show._id for item in media_items) - assert any(isinstance(item, Season) and item._id == season._id for item in media_items) - assert any(isinstance(item, Episode) and item._id == episode1._id for item in media_items) - assert any(isinstance(item, Episode) and item._id == episode2._id for item in media_items) - assert any(isinstance(item, Movie) and item._id == movie._id for item in media_items) \ No newline at end of file + assert any(isinstance(item, Show) and item.id == show.id for item in media_items) + assert any(isinstance(item, Season) and item.id == season.id for item in media_items) + assert any(isinstance(item, Episode) and item.id == episode1.id for item in media_items) + assert any(isinstance(item, Episode) and item.id == episode2.id for item in media_items) + assert any(isinstance(item, Movie) and item.id == movie.id for item in media_items) \ No newline at end of file diff --git a/src/tests/test_symlink_library.py b/src/tests/test_symlink_library.py index aa4f07ab..5636885c 100644 --- a/src/tests/test_symlink_library.py +++ b/src/tests/test_symlink_library.py @@ -95,6 +95,6 @@ def test_media_item_creation(symlink_library, fs): fs.create_file("/fake/library/movies/Top Gun (1986) tt0092099.mkv") items = list(symlink_library.run()) assert len(items) == 1, "Should create one media item." - assert items[0].imdb_id == "tt0092099", "Media item should have the correct IMDb ID." + assert items[0].ids["imdb_id"] == "tt0092099", "Media item should have the correct IMDb ID." assert isinstance(items[0], Movie), "The created item should be a Movie." assert items[0].state == States.Completed, "The created item should be in the Completed state." diff --git a/src/utils/event_manager.py b/src/utils/event_manager.py index 17e175b2..04dcae83 100644 --- a/src/utils/event_manager.py +++ b/src/utils/event_manager.py @@ -12,7 +12,7 @@ import utils.websockets.manager as ws_manager from program.db.db import db from program.db.db_functions import _get_item_ids, _run_thread_with_db_item -from program.media.item import Season, Show +from program.media.item import ProfileData, Season, Show from program.types import Event @@ -103,7 +103,7 @@ def remove_item_from_queue(self, item): """ with self.mutex: for event in self._queued_events: - if event.item.imdb_id == item.imdb_id: + if event.item.ids["imdb_id"] == item.ids["imdb_id"]: self._queued_events.remove(event) logger.debug(f"Removed {item.log_string} from the queue.") return @@ -128,7 +128,7 @@ def remove_item_from_running(self, item): """ with self.mutex: for event in self._running_events: - if event.item._id == item._id or (event.item.type == "mediaitem" and event.item.imdb_id == item.imdb_id): + if event.item.id == item.id or (event.item.type == "mediaitem" and event.item.ids["imdb_id"] == item.ids["imdb_id"]): self._running_events.remove(event) logger.debug(f"Removed {item.log_string} from the running events.") return @@ -183,9 +183,11 @@ def cancel_job(self, item, suppress_logs=False): for future in self._futures: future_item_id = None future_related_ids = [] - + if hasattr(future, 'event') and hasattr(future.event, 'item'): future_item = future.event.item + if isinstance(future_item, ProfileData): + future_item = future_item.parent future_item_id, future_related_ids = _get_item_ids(session, future_item) if future_item_id in ids_to_cancel or any(rid in ids_to_cancel for rid in future_related_ids): @@ -202,8 +204,8 @@ def cancel_job(self, item, suppress_logs=False): self._futures.remove(future) # Clear from queued and running events - self._queued_events = [event for event in self._queued_events if event.item._id != item._id and event.item.imdb_id != item.imdb_id] - self._running_events = [event for event in self._running_events if event.item._id != item._id and event.item.imdb_id != item.imdb_id] + self._queued_events = [event for event in self._queued_events if event.item.id != item.id and event.item.ids["imdb_id"] != item.ids["imdb_id"]] + self._running_events = [event for event in self._running_events if event.item.id != item.id and event.item.ids["imdb_id"] != item.ids["imdb_id"]] logger.debug(f"Canceled jobs for item {item.log_string} and its children.") @@ -236,7 +238,7 @@ def _id_in_queue(self, _id): Returns: bool: True if the item is in the queue, False otherwise. """ - return any(event.item._id == _id for event in self._queued_events) + return any(event.item.id == _id for event in self._queued_events) def _id_in_running_events(self, _id): """ @@ -248,7 +250,7 @@ def _id_in_running_events(self, _id): Returns: bool: True if the item is in the running events, False otherwise. """ - return any(event.item._id == _id for event in self._running_events) + return any(event.item.id == _id for event in self._running_events) def _imdb_id_in_queue(self, imdb_id): """ @@ -260,7 +262,7 @@ def _imdb_id_in_queue(self, imdb_id): Returns: bool: True if the item is in the queue, False otherwise. """ - return any(event.item.imdb_id == imdb_id for event in self._queued_events) + return any(event.item.ids["imdb_id"] == imdb_id for event in self._queued_events) def _imdb_id_in_running_events(self, imdb_id): """ @@ -272,7 +274,7 @@ def _imdb_id_in_running_events(self, imdb_id): Returns: bool: True if the item is in the running events, False otherwise. """ - return any(event.item.imdb_id == imdb_id for event in self._running_events) + return any(event.item.ids["imdb_id"] == imdb_id for event in self._running_events) def add_event(self, event): """ @@ -300,10 +302,10 @@ def add_event(self, event): return False else: # Items that are not in the database - if self._imdb_id_in_queue(event.item.imdb_id): + if self._imdb_id_in_queue(event.item.ids["imdb_id"]): logger.debug(f"Item {event.item.log_string} is already in the queue, skipping.") return False - elif self._imdb_id_in_running_events(event.item.imdb_id): + elif self._imdb_id_in_running_events(event.item.ids["imdb_id"]): logger.debug(f"Item {event.item.log_string} is already running, skipping.") return False @@ -336,8 +338,8 @@ def get_event_updates(self): return { event_type.lower(): [ { - "item_id": event.item._id, - "imdb_id": event.item.imdb_id, + "item_id": event.item.id, + "imdb_id": event.item.ids["imdb_id"], "title": event.item.log_string, "type": event.item.type, "emitted_by": event.emitted_by if isinstance(event.emitted_by, str) else event.emitted_by.__name__, diff --git a/src/utils/websockets/manager.py b/src/utils/websockets/manager.py index 5d364772..e88ff0e7 100644 --- a/src/utils/websockets/manager.py +++ b/src/utils/websockets/manager.py @@ -31,7 +31,7 @@ async def _send_json(message: json, websocket: WebSocket): def send_event_update(events: list): event_types = ["Scraping", "Downloader", "Symlinker", "Updater", "PostProcessing"] - message = {event_type.lower(): [event.item._id for event in events if event.emitted_by == event_type] for event_type in event_types} + message = {event_type.lower(): [event.item.id for event in events if event.emitted_by == event_type] for event_type in event_types} broadcast({"type": "event_update", "message": message}) def send_health_update(status: str):