From a73fbfd6a6f0e1464cf05e55492c3b69876363c0 Mon Sep 17 00:00:00 2001 From: Gaisberg Date: Tue, 23 Jul 2024 16:29:23 +0300 Subject: [PATCH] fix: fix around 200 ruff errors --- src/controllers/actions.py | 12 ++---- src/controllers/default.py | 15 ++++---- src/controllers/items.py | 14 +++---- src/controllers/settings.py | 2 +- src/controllers/webhooks.py | 8 ++-- src/main.py | 6 +-- src/program/content/__init__.py | 5 --- src/program/content/mdblist.py | 2 +- src/program/content/overseerr.py | 12 +++--- src/program/content/trakt.py | 15 +++----- src/program/db/__init__.py | 1 - src/program/db/db.py | 15 +++++--- src/program/db/db_functions.py | 14 +++---- src/program/downloaders/__init__.py | 7 ++-- src/program/downloaders/alldebrid.py | 40 ++++++++------------ src/program/downloaders/realdebrid.py | 37 +++++++----------- src/program/downloaders/torbox.py | 14 ++----- src/program/indexers/trakt.py | 4 +- src/program/libraries/symlink.py | 24 ++++++------ src/program/media/item.py | 42 ++++++++++----------- src/program/program.py | 54 +++++++++++---------------- src/program/scrapers/__init__.py | 10 ++--- src/program/scrapers/annatar.py | 4 +- src/program/scrapers/comet.py | 16 ++++---- src/program/scrapers/jackett.py | 4 +- src/program/scrapers/knightcrawler.py | 2 +- src/program/scrapers/mediafusion.py | 6 +-- src/program/scrapers/orionoid.py | 3 +- src/program/scrapers/prowlarr.py | 16 ++------ src/program/scrapers/shared.py | 35 +++++++++-------- src/program/scrapers/torbox.py | 7 +--- src/program/scrapers/torrentio.py | 4 +- src/program/scrapers/zilean.py | 6 +-- src/program/settings/models.py | 7 ++-- src/program/state_transition.py | 2 +- src/program/symlink.py | 24 ++++++------ src/program/types.py | 6 ++- src/program/updaters/plex.py | 2 +- src/tests/test_debrid_matching.py | 12 +++--- src/tests/test_ranking.py | 2 +- src/tests/test_settings_migration.py | 15 ++++---- src/tests/test_states_processing.py | 4 +- src/utils/logger.py | 2 - src/utils/ratelimiter.py | 1 + src/utils/request.py | 4 +- 45 files changed, 235 insertions(+), 302 deletions(-) diff --git a/src/controllers/actions.py b/src/controllers/actions.py index 6c4f3b30..0dee9dbb 100644 --- a/src/controllers/actions.py +++ b/src/controllers/actions.py @@ -1,15 +1,9 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict -import pydantic from fastapi import APIRouter, Request -from program.content.overseerr import Overseerr -from program.indexers.trakt import TraktIndexer, get_imdbid_from_tmdb -from program.media.item import MediaItem, Show -from requests import RequestException +from program.media.item import MediaItem from utils.logger import logger -from .models.overseerr import OverseerrWebhook - router = APIRouter( prefix="/actions", responses={404: {"description": "Not found"}}, @@ -21,7 +15,7 @@ async def request(request: Request, imdb_id: str) -> Dict[str, Any]: try: new_item = MediaItem({"imdb_id": imdb_id, "requested_by": "manually"}) request.app.program.add_to_queue(new_item) - except Exception as e: + except Exception: logger.error(f"Failed to create item from imdb_id: {imdb_id}") return {"success": False, "message": "Failed to create item from imdb_id"} diff --git a/src/controllers/default.py b/src/controllers/default.py index b2de087f..1b7b9a0d 100644 --- a/src/controllers/default.py +++ b/src/controllers/default.py @@ -1,16 +1,15 @@ import time +import program.db.db_functions as DB import requests from fastapi import APIRouter, HTTPException, Request -from program.media.item import MediaItem from program.content.trakt import TraktContent +from program.db.db import db +from program.media.item import Episode, MediaItem, Movie, Season, Show from program.media.state import States from program.scrapers import Scraping from program.settings.manager import settings_manager -from program.media.item import Episode, MediaItem, Movie, Season, Show -from program.db.db import db -from sqlalchemy import select, func -import program.db.db_functions as DB +from sqlalchemy import func, select router = APIRouter( responses={404: {"description": "Not found"}}, @@ -102,12 +101,12 @@ async def trakt_oauth_callback(code: str, request: Request): @router.get("/stats") -async def get_stats(request: Request): +async def get_stats(_: Request): payload = {} with db.Session() as session: - movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked == True)).scalar_one() - episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked == True)).scalar_one() + movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked is True)).scalar_one() + episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked is True)).scalar_one() total_symlinks = movies_symlinks + episodes_symlinks total_movies = session.execute(select(func.count(Movie._id))).scalar_one() diff --git a/src/controllers/items.py b/src/controllers/items.py index f9b9672b..4c202530 100644 --- a/src/controllers/items.py +++ b/src/controllers/items.py @@ -1,14 +1,14 @@ from typing import List, Optional import Levenshtein +import program.db.db_functions as DB from fastapi import APIRouter, HTTPException, Request from program.db.db import db -from sqlalchemy import select, func -import program.db.db_functions as DB -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import Episode, MediaItem, Season from program.media.state import States from program.symlink import Symlinker from pydantic import BaseModel +from sqlalchemy import func, select from utils.logger import logger router = APIRouter( @@ -36,7 +36,7 @@ async def get_states(): description="Fetch media items with optional filters and pagination", ) async def get_items( - request: Request, + _: Request, limit: Optional[int] = 50, page: Optional[int] = 1, type: Optional[str] = None, @@ -115,7 +115,7 @@ async def get_items( @router.get("/extended/{item_id}") -async def get_extended_item_info(request: Request, item_id: str): +async def get_extended_item_info(_: Request, item_id: str): with db.Session() as session: item = DB._get_item_from_db(session, MediaItem({"imdb_id":str(item_id)})) if item is None: @@ -199,12 +199,12 @@ async def remove_item( } except Exception as e: logger.error(f"Failed to remove item with {id_type} {item_id or imdb_id}: {e}") - raise HTTPException(status_code=500, detail="Internal server error") + raise HTTPException from e(status_code=500, detail="Internal server error") @router.get("/imdb/{imdb_id}") async def get_imdb_info( - request: Request, + _: Request, imdb_id: str, season: Optional[int] = None, episode: Optional[int] = None, diff --git a/src/controllers/settings.py b/src/controllers/settings.py index 0e321375..64de995e 100644 --- a/src/controllers/settings.py +++ b/src/controllers/settings.py @@ -97,7 +97,7 @@ async def set_settings(settings: List[SetSettings]): settings_manager.load(settings_dict=updated_settings.model_dump()) settings_manager.save() # Ensure the changes are persisted except ValidationError as e: - raise HTTPException( + raise HTTPException from e( status_code=400, detail=f"Failed to update settings: {str(e)}", ) diff --git a/src/controllers/webhooks.py b/src/controllers/webhooks.py index f4fda147..f5db0205 100644 --- a/src/controllers/webhooks.py +++ b/src/controllers/webhooks.py @@ -3,8 +3,8 @@ import pydantic from fastapi import APIRouter, Request from program.content.overseerr import Overseerr -from program.indexers.trakt import TraktIndexer, get_imdbid_from_tmdb -from program.media.item import MediaItem, Show +from program.indexers.trakt import get_imdbid_from_tmdb +from program.media.item import MediaItem from requests import RequestException from utils.logger import logger @@ -38,7 +38,7 @@ async def overseerr(request: Request) -> Dict[str, Any]: if not imdb_id: try: imdb_id = get_imdbid_from_tmdb(req.media.tmdbId) - except RequestException as e: + except RequestException: logger.error(f"Failed to get imdb_id from TMDB: {req.media.tmdbId}") return {"success": False, "message": "Failed to get imdb_id from TMDB", "title": req.subject} if not imdb_id: @@ -59,7 +59,7 @@ async def overseerr(request: Request) -> Dict[str, Any]: try: new_item = MediaItem({"imdb_id": imdb_id, "requested_by": "overseerr"}) request.app.program.add_to_queue(new_item) - except Exception as e: + except Exception: logger.error(f"Failed to create item from imdb_id: {imdb_id}") return {"success": False, "message": "Failed to create item from imdb_id", "title": req.subject} diff --git a/src/main.py b/src/main.py index 5075ad3b..77a3a1f7 100644 --- a/src/main.py +++ b/src/main.py @@ -7,6 +7,7 @@ import traceback import uvicorn +from controllers.actions import router as actions_router from controllers.default import router as default_router from controllers.items import router as items_router @@ -14,7 +15,6 @@ from controllers.settings import router as settings_router from controllers.tmdb import router as tmdb_router from controllers.webhooks import router as webhooks_router -from controllers.actions import router as actions_router from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from program import Program @@ -99,8 +99,8 @@ def run_in_thread(self): self.should_exit = True sys.exit(0) -def signal_handler(sig, frame): - logger.log('PROGRAM','Exiting Gracefully.') +def signal_handler(): + logger.log("PROGRAM","Exiting Gracefully.") app.program.stop() sys.exit(0) diff --git a/src/program/content/__init__.py b/src/program/content/__init__.py index f69b112a..e69de29b 100644 --- a/src/program/content/__init__.py +++ b/src/program/content/__init__.py @@ -1,5 +0,0 @@ -from .listrr import Listrr -from .mdblist import Mdblist -from .overseerr import Overseerr -from .plex_watchlist import PlexWatchlist -from .trakt import TraktContent diff --git a/src/program/content/mdblist.py b/src/program/content/mdblist.py index ff1e9c7d..a29c5469 100644 --- a/src/program/content/mdblist.py +++ b/src/program/content/mdblist.py @@ -5,8 +5,8 @@ from program.media.item import MediaItem from program.settings.manager import settings_manager from utils.logger import logger -from utils.request import get, ping from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class Mdblist: diff --git a/src/program/content/overseerr.py b/src/program/content/overseerr.py index 4f923ed6..553d8e05 100644 --- a/src/program/content/overseerr.py +++ b/src/program/content/overseerr.py @@ -44,8 +44,8 @@ def validate(self) -> bool: ) return False return response.is_ok - except (ConnectionError, RetryError, MaxRetryError, NewConnectionError) as e: - logger.error(f"Overseerr URL is not reachable, or it timed out") + except (ConnectionError, RetryError, MaxRetryError, NewConnectionError): + logger.error("Overseerr URL is not reachable, or it timed out") return False except Exception as e: logger.error(f"Unexpected error during Overseerr validation: {str(e)}") @@ -71,7 +71,7 @@ def run(self): logger.error(f"Unexpected error during fetching requests: {str(e)}") return - if not response.is_ok or not hasattr(response.data, 'pageInfo') or getattr(response.data.pageInfo, 'results', 0) == 0: + if not response.is_ok or not hasattr(response.data, "pageInfo") or getattr(response.data.pageInfo, "results", 0) == 0: return # Lets look at approved items only that are only in the pending state @@ -117,13 +117,13 @@ def get_imdb_id(self, data) -> str: ) except (ConnectionError, RetryError, MaxRetryError) as e: logger.error(f"Failed to fetch media details from overseerr: {str(e)}") - return + return None except Exception as e: logger.error(f"Unexpected error during fetching media details: {str(e)}") - return + return None if not response.is_ok or not hasattr(response.data, "externalIds"): - return + return None imdb_id = getattr(response.data.externalIds, "imdbId", None) if imdb_id: diff --git a/src/program/content/trakt.py b/src/program/content/trakt.py index c78d7510..c741a8be 100644 --- a/src/program/content/trakt.py +++ b/src/program/content/trakt.py @@ -1,16 +1,14 @@ """Trakt content module""" import re import time -from types import SimpleNamespace -from urllib.parse import urlencode, urlparse +from urllib.parse import urlencode -import regex from program.media.item import MediaItem, Movie, Show from program.settings.manager import settings_manager from requests import RequestException from utils.logger import logger -from utils.request import get, post from utils.ratelimiter import RateLimiter +from utils.request import get, post class TraktContent: @@ -43,7 +41,7 @@ def validate(self) -> bool: logger.error("Trakt API key is not set.") return False response = get(f"{self.api_url}/lists/2", additional_headers=self.headers) - if not getattr(response.data, 'name', None): + if not getattr(response.data, "name", None): logger.error("Invalid user settings received from Trakt.") return False return True @@ -222,8 +220,7 @@ def perform_oauth_flow(self) -> str: "client_id": self.settings.oauth_client_id, "redirect_uri": self.settings.oauth_redirect_uri, } - auth_url = f"{self.api_url}/oauth/authorize?{urlencode(params)}" - return auth_url + return f"{self.api_url}/oauth/authorize?{urlencode(params)}" def handle_oauth_callback(self, code: str) -> bool: """Handle the OAuth callback and exchange the code for an access token.""" @@ -358,6 +355,6 @@ def _resolve_short_url(short_url) -> str or None: return None patterns: dict[str, re.Pattern] = { - "user_list": re.compile(r'https://trakt.tv/users/([^/]+)/lists/([^/]+)'), - "short_list": re.compile(r'https://trakt.tv/lists/\d+') + "user_list": re.compile(r"https://trakt.tv/users/([^/]+)/lists/([^/]+)"), + "short_list": re.compile(r"https://trakt.tv/lists/\d+") } \ No newline at end of file diff --git a/src/program/db/__init__.py b/src/program/db/__init__.py index 15bd15ce..e69de29b 100644 --- a/src/program/db/__init__.py +++ b/src/program/db/__init__.py @@ -1 +0,0 @@ -from .db import db \ No newline at end of file diff --git a/src/program/db/db.py b/src/program/db/db.py index 85dbb6af..3269e533 100644 --- a/src/program/db/db.py +++ b/src/program/db/db.py @@ -1,20 +1,22 @@ -from sqla_wrapper import Alembic, SQLAlchemy +import os + +from alembic.autogenerate import compare_metadata +from alembic.runtime.migration import MigrationContext from program.settings.manager import settings_manager +from sqla_wrapper import Alembic, SQLAlchemy from utils import data_dir_path db = SQLAlchemy(settings_manager.settings.database.host) script_location = data_dir_path / "alembic/" -import os + if not os.path.exists(script_location): os.makedirs(script_location) alembic = Alembic(db, script_location) alembic.init(script_location) -from alembic.autogenerate import compare_metadata -from alembic.runtime.migration import MigrationContext # https://stackoverflow.com/questions/61374525/how-do-i-check-if-alembic-migrations-need-to-be-generated def need_upgrade_check() -> bool: @@ -24,10 +26,11 @@ def need_upgrade_check() -> bool: diff = compare_metadata(mc, db.Model.metadata) return diff != [] + def run_migrations() -> None: try: if need_upgrade_check(): alembic.revision("auto-upg") alembic.upgrade() - except: - alembic.upgrade() \ No newline at end of file + except Exception as _: + alembic.upgrade() diff --git a/src/program/db/db_functions.py b/src/program/db/db_functions.py index 7e4f74c9..1bd1a286 100644 --- a/src/program/db/db_functions.py +++ b/src/program/db/db_functions.py @@ -1,9 +1,11 @@ import os + from program.media.item import Episode, MediaItem, Movie, Season, Show -from sqlalchemy import select, func +from program.types import Event +from sqlalchemy import func, select from sqlalchemy.orm import joinedload from utils.logger import logger -from program.types import Event + from .db import db @@ -16,7 +18,7 @@ def _ensure_item_exists_in_db(item:MediaItem) -> bool: def _get_item_type_from_db(item: MediaItem) -> str: with db.Session() as session: if item._id is None: - return session.execute(select(MediaItem.type).where( (MediaItem.imdb_id==item.imdb_id ) & ( (MediaItem.type == 'show') | (MediaItem.type == 'movie') ) )).scalar_one() + return session.execute(select(MediaItem.type).where( (MediaItem.imdb_id==item.imdb_id ) & ( (MediaItem.type == "show") | (MediaItem.type == "movie") ) )).scalar_one() return session.execute(select(MediaItem.type).where(MediaItem._id==item._id)).scalar_one() def _store_item(item: MediaItem): @@ -55,8 +57,7 @@ def _get_item_from_db(session, item: MediaItem): return None def _check_for_and_run_insertion_required(session, item: MediaItem) -> None: - if _ensure_item_exists_in_db(item) == False: - if isinstance(item, (Show, Movie, Season, Episode)): + if _ensure_item_exists_in_db(item) is False and isinstance(item, (Show, Movie, Season, Episode)): item.store_state() session.add(item) session.commit() @@ -82,7 +83,7 @@ def _run_thread_with_db_item(fn, service, program, input_item: MediaItem | None) all_media_items = False program._remove_from_running_items(item, service.__name__) - if all_media_items == True: + if all_media_items is True: for i in res: program._push_event_queue(Event(emitted_by="_run_thread_with_db_item", item=i)) session.commit() @@ -118,7 +119,6 @@ def _run_thread_with_db_item(fn, service, program, input_item: MediaItem | None) reset = os.getenv("HARD_RESET", None) if reset is not None and reset.lower() in ["true","1"]: - print("Hard reset detected, dropping all tables") # Logging isn't initialized here yet. def run_delete(_type): with db.Session() as session: all = session.execute(select(_type).options(joinedload("*"))).unique().scalars().all() diff --git a/src/program/downloaders/__init__.py b/src/program/downloaders/__init__.py index fe060767..089b0994 100644 --- a/src/program/downloaders/__init__.py +++ b/src/program/downloaders/__init__.py @@ -1,9 +1,10 @@ -from .realdebrid import RealDebridDownloader -from .alldebrid import AllDebridDownloader -from .torbox import TorBoxDownloader from program.media.item import MediaItem from utils.logger import logger +from .alldebrid import AllDebridDownloader +from .realdebrid import RealDebridDownloader +from .torbox import TorBoxDownloader + class Downloader: def __init__(self): diff --git a/src/program/downloaders/alldebrid.py b/src/program/downloaders/alldebrid.py index f0cbc45a..01d645ca 100644 --- a/src/program/downloaders/alldebrid.py +++ b/src/program/downloaders/alldebrid.py @@ -14,8 +14,8 @@ from RTN.parser import parse from RTN.patterns import extract_episodes from utils.logger import logger -from utils.request import get, ping, post from utils.ratelimiter import RateLimiter +from utils.request import get, ping, post WANTED_FORMATS = {".mkv", ".mp4", ".avi"} AD_BASE_URL = "https://api.alldebrid.com/v4" @@ -172,9 +172,9 @@ def _chunked(lst: List, n: int) -> Generator[List, None, None]: for stream_chunk in _chunked(filtered_streams, 5): try: - params = {'agent': AD_AGENT} + params = {"agent": AD_AGENT} for i, magnet in enumerate(stream_chunk): - params[f'magnets[{i}]'] = magnet + params[f"magnets[{i}]"] = magnet response = get(f"{AD_BASE_URL}/magnet/instant", params=params, additional_headers=self.auth_headers, proxies=self.proxy, response_type=dict, specific_rate_limiter=self.inner_rate_limit, overall_rate_limiter=self.overall_rate_limiter) if response.is_ok and self._evaluate_stream_response(response.data, processed_stream_hashes, item): @@ -255,7 +255,7 @@ def _is_wanted_movie(self, file: dict, item: Movie) -> bool: return False min_size = self.download_settings.movie_filesize_min * 1_000_000 - max_size = self.download_settings.movie_filesize_max * 1_000_000 if self.download_settings.movie_filesize_max != -1 else float('inf') + max_size = self.download_settings.movie_filesize_max * 1_000_000 if self.download_settings.movie_filesize_max != -1 else float("inf") if not isinstance(file, dict) or file.get("s", 0) < min_size or file.get("s", 0) > max_size or splitext(file.get("n", "").lower())[1] not in WANTED_FORMATS: return False @@ -276,7 +276,7 @@ def _is_wanted_episode(self, file: dict, item: Episode) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") if not isinstance(file, dict) or file.get("s", 0) < min_size or file.get("s", 0) > max_size or splitext(file.get("n", "").lower())[1] not in WANTED_FORMATS: return False @@ -299,7 +299,7 @@ def _is_wanted_season(self, files: list, item: Season) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") filenames = [ file for file in files @@ -344,7 +344,7 @@ def _is_wanted_show(self, files: list, item: Show) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") filenames = [ file for file in files @@ -466,10 +466,7 @@ def set_active_files(self, item: MediaItem) -> None: for file in link.files: if isinstance(file, SimpleNamespace) and hasattr(file, "e"): for subfile in file.e: - if isinstance(item, Movie) and self._is_wanted_movie(subfile, item): - item.set("file", subfile.n) - break - elif isinstance(item, Episode) and self._is_wanted_episode(subfile, item): + if isinstance(item, Movie) and self._is_wanted_movie(subfile, item) or isinstance(item, Episode) and self._is_wanted_episode(subfile, item): item.set("file", subfile.n) break if not item.folder or not item.alternative_folder or not item.file: @@ -493,9 +490,7 @@ def set_active_files(self, item: MediaItem) -> None: for file in link.files: if isinstance(file, SimpleNamespace) and hasattr(file, "e"): for subfile in file.e: - if isinstance(item, Season) and self._is_wanted_season(link.files, item): - break - elif isinstance(item, Show) and self._is_wanted_show(link.files, item): + if isinstance(item, Season) and self._is_wanted_season(link.files, item) or isinstance(item, Show) and self._is_wanted_show(link.files, item): break if isinstance(item, Season) and item.folder: @@ -515,9 +510,7 @@ def set_active_files(self, item: MediaItem) -> None: for file in link.files: if isinstance(file, SimpleNamespace) and hasattr(file, "e"): for subfile in file.e: - if isinstance(item, Season) and self._is_wanted_season(link.files, item): - break - elif isinstance(item, Show) and self._is_wanted_show(link.files, item): + if isinstance(item, Season) and self._is_wanted_season(link.files, item) or isinstance(item, Show) and self._is_wanted_show(link.files, item): break ### API Methods for All-Debrid below @@ -530,7 +523,7 @@ def add_magnet(self, item: MediaItem) -> str: try: hash = item.active_stream.get("hash") params = {"agent": AD_AGENT} - params[f'magnets[0]'] = hash + params["magnets[0]"] = hash response = post( f"{AD_BASE_URL}/magnet/upload", params=params, @@ -626,9 +619,7 @@ def check_season(season): for file in torrent_info.files: if file["selected"] == 1: file_episodes = extract_episodes(Path(file["path"]).name) - if season_number in file_episodes: - matched_episodes.update(file_episodes) - elif one_season and file_episodes: + if season_number in file_episodes or one_season and file_episodes: matched_episodes.update(file_episodes) return len(matched_episodes) >= len(episodes_in_season) // 2 @@ -644,10 +635,9 @@ def check_season(season): if check_season(item): logger.info(f"{item.log_string} already exists in All-Debrid account.") return True - elif isinstance(item, Episode): - if check_episode(): - logger.info(f"{item.log_string} already exists in All-Debrid account.") - return True + elif isinstance(item, Episode) and check_episode(): + logger.info(f"{item.log_string} already exists in All-Debrid account.") + return True logger.debug(f"No matching item found for {item.log_string}") return False \ No newline at end of file diff --git a/src/program/downloaders/realdebrid.py b/src/program/downloaders/realdebrid.py index 555a8263..b2d140ca 100644 --- a/src/program/downloaders/realdebrid.py +++ b/src/program/downloaders/realdebrid.py @@ -6,7 +6,7 @@ from os.path import splitext from pathlib import Path from types import SimpleNamespace -from typing import Generator, List, Union +from typing import Generator, List from program.media.item import Episode, MediaItem, Movie, Season, Show from program.media.state import States @@ -16,8 +16,8 @@ from RTN.parser import parse from RTN.patterns import extract_episodes from utils.logger import logger -from utils.request import get, ping, post from utils.ratelimiter import RateLimiter +from utils.request import get, ping, post WANTED_FORMATS = {".mkv", ".mp4", ".avi"} RD_BASE_URL = "https://api.real-debrid.com/rest/1.0" @@ -72,7 +72,7 @@ def validate(self) -> bool: if response.is_ok: user_info = response.response.json() expiration = user_info.get("expiration", "") - expiration_datetime = datetime.fromisoformat(expiration.replace('Z', '+00:00')).replace(tzinfo=None) + expiration_datetime = datetime.fromisoformat(expiration.replace("Z", "+00:00")).replace(tzinfo=None) time_left = expiration_datetime - datetime.utcnow().replace(tzinfo=None) days_left = time_left.days hours_left, minutes_left = divmod(time_left.seconds // 3600, 60) @@ -125,10 +125,7 @@ def run(self, item: MediaItem) -> Generator[MediaItem, None, None]: @staticmethod def log_item(item: MediaItem) -> None: """Log only the files downloaded for the item based on its type.""" - if isinstance(item, Movie): - if item.file and item.folder: - logger.log("DEBRID", f"Downloaded {item.log_string} with file: {item.file}") - elif isinstance(item, Episode): + if isinstance(item, (Episode, Movie)): if item.file and item.folder: logger.log("DEBRID", f"Downloaded {item.log_string} with file: {item.file}") elif isinstance(item, Season): @@ -245,7 +242,7 @@ def _is_wanted_movie(self, container: dict, item: Movie) -> bool: return False min_size = self.download_settings.movie_filesize_min * 1_000_000 - max_size = self.download_settings.movie_filesize_max * 1_000_000 if self.download_settings.movie_filesize_max != -1 else float('inf') + max_size = self.download_settings.movie_filesize_max * 1_000_000 if self.download_settings.movie_filesize_max != -1 else float("inf") filenames = sorted( (file for file in container.values() if file and file["filesize"] > min_size @@ -277,7 +274,7 @@ def _is_wanted_episode(self, container: dict, item: Episode) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") filenames = [ file for file in container.values() @@ -298,12 +295,7 @@ def _is_wanted_episode(self, container: dict, item: Episode) -> bool: parsed_file = parse(file["filename"], remove_trash=True) if not parsed_file or not parsed_file.episode or 0 in parsed_file.season: continue - if item.number in parsed_file.episode and item.parent.number in parsed_file.season: - item.set("folder", item.active_stream.get("name")) - item.set("alternative_folder", item.active_stream.get("alternative_name")) - item.set("file", file["filename"]) - return True - elif one_season and item.number in parsed_file.episode: + if item.number in parsed_file.episode and item.parent.number in parsed_file.season or one_season and item.number in parsed_file.episode: item.set("folder", item.active_stream.get("name")) item.set("alternative_folder", item.active_stream.get("alternative_name")) item.set("file", file["filename"]) @@ -317,7 +309,7 @@ def _is_wanted_season(self, container: dict, item: Season) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") # Filter and sort files once to improve performance filenames = [ @@ -375,7 +367,7 @@ def _is_wanted_show(self, container: dict, item: Show) -> bool: return False min_size = self.download_settings.episode_filesize_min * 1_000_000 - max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float('inf') + max_size = self.download_settings.episode_filesize_max * 1_000_000 if self.download_settings.episode_filesize_max != -1 else float("inf") # Filter and sort files once to improve performance filenames = [ @@ -649,9 +641,7 @@ def check_season(season): for file in torrent_info.files: if file.selected == 1: file_episodes = extract_episodes(Path(file.path).name) - if season_number in file_episodes: - matched_episodes.update(file_episodes) - elif one_season and file_episodes: + if season_number in file_episodes or one_season and file_episodes: matched_episodes.update(file_episodes) return len(matched_episodes) >= len(episodes_in_season) // 2 @@ -667,10 +657,9 @@ def check_season(season): if check_season(item): logger.info(f"{item.log_string} already exists in Real-Debrid account.") return True - elif isinstance(item, Episode): - if check_episode(): - logger.info(f"{item.log_string} already exists in Real-Debrid account.") - return True + elif isinstance(item, Episode) and check_episode(): + logger.info(f"{item.log_string} already exists in Real-Debrid account.") + return True logger.debug(f"No matching item found for {item.log_string}") return False \ No newline at end of file diff --git a/src/program/downloaders/torbox.py b/src/program/downloaders/torbox.py index 30f9b10f..a8ec40e7 100644 --- a/src/program/downloaders/torbox.py +++ b/src/program/downloaders/torbox.py @@ -1,15 +1,13 @@ import contextlib from datetime import datetime +from pathlib import Path from posixpath import splitext from typing import Generator -from pathlib import Path -from RTN import parse -from RTN.exceptions import GarbageTorrent -from requests import ConnectTimeout -from program.media.state import States from program.media.item import MediaItem +from program.media.state import States from program.settings.manager import settings_manager +from requests import ConnectTimeout from RTN import parse from RTN.exceptions import GarbageTorrent from utils.logger import logger @@ -184,11 +182,7 @@ def find_required_files(self, item, container): ): continue # Check if the file's season matches the item's season or if there's only one season - if season_num in parsed_file.season: - for ep_num in parsed_file.episode: - if ep_num in needed_episodes: - matched_files.append(file) - elif one_season: + if season_num in parsed_file.season or one_season: for ep_num in parsed_file.episode: if ep_num in needed_episodes: matched_files.append(file) diff --git a/src/program/indexers/trakt.py b/src/program/indexers/trakt.py index 445c0c2e..b139071e 100644 --- a/src/program/indexers/trakt.py +++ b/src/program/indexers/trakt.py @@ -188,8 +188,8 @@ def get_imdbid_from_tmdb(tmdb_id: str) -> Optional[str]: def get_imdb_id_from_list(namespaces): for ns in namespaces: - if ns.type == 'movie': + if ns.type == "movie": return ns.movie.ids.imdb - elif ns.type == 'show': + elif ns.type == "show": return ns.show.ids.imdb return None diff --git a/src/program/libraries/symlink.py b/src/program/libraries/symlink.py index 52c7bf4f..a396c938 100644 --- a/src/program/libraries/symlink.py +++ b/src/program/libraries/symlink.py @@ -60,12 +60,12 @@ def process_items(directory: Path, item_class, item_type: str, is_anime: bool = if files ] for path, filename in items: - imdb_id = re.search(r'(tt\d+)', filename) - title = re.search(r'(.+)?( \()', filename) + imdb_id = re.search(r"(tt\d+)", filename) + title = re.search(r"(.+)?( \()", filename) if not imdb_id or not title: logger.error(f"Can't extract {item_type} imdb_id or title at path {path / filename}") continue - item = item_class({'imdb_id': imdb_id.group(), 'title': title.group(1)}) + item = item_class({"imdb_id": imdb_id.group(), "title": title.group(1)}) if settings_manager.settings.force_refresh: item.set("symlinked", True) item.set("update_folder", path) @@ -80,28 +80,28 @@ def process_items(directory: Path, item_class, item_type: str, is_anime: bool = def process_shows(directory: Path, item_type: str, is_anime: bool = False) -> Show: """Process shows in the given directory and yield Show instances.""" for show in os.listdir(directory): - imdb_id = re.search(r'(tt\d+)', show) - title = re.search(r'(.+)?( \()', show) + imdb_id = re.search(r"(tt\d+)", show) + title = re.search(r"(.+)?( \()", show) if not imdb_id or not title: logger.log("NOT_FOUND", f"Can't extract {item_type} imdb_id or title at path {directory / show}") continue - show_item = Show({'imdb_id': imdb_id.group(), 'title': title.group(1)}) + show_item = Show({"imdb_id": imdb_id.group(), "title": title.group(1)}) if is_anime: show_item.is_anime = True seasons = {} for season in os.listdir(directory / show): - if not (season_number := re.search(r'(\d+)', season)): + if not (season_number := re.search(r"(\d+)", season)): logger.log("NOT_FOUND", f"Can't extract season number at path {directory / show / season}") continue - season_item = Season({'number': int(season_number.group())}) + season_item = Season({"number": int(season_number.group())}) episodes = {} for episode in os.listdir(directory / show / season): - if not (episode_number := re.search(r's\d+e(\d+)', episode)): + if not (episode_number := re.search(r"s\d+e(\d+)", episode)): logger.log("NOT_FOUND", f"Can't extract episode number at path {directory / show / season / episode}") # Delete the episode since it can't be indexed os.remove(directory / show / season / episode) continue - episode_item = Episode({'number': int(episode_number.group(1))}) + episode_item = Episode({"number": int(episode_number.group(1))}) if settings_manager.settings.force_refresh: episode_item.set("symlinked", True) episode_item.set("update_folder", f"{directory}/{show}/{season}/{episode}") @@ -114,9 +114,9 @@ def process_shows(directory: Path, item_type: str, is_anime: bool = False) -> Sh episodes[int(episode_number.group(1))] = episode_item if len(episodes) > 0: for i in range(1, max(episodes.keys())+1): - season_item.add_episode(episodes.get(i, Episode({'number': i}))) + season_item.add_episode(episodes.get(i, Episode({"number": i}))) seasons[int(season_number.group())] = season_item if len(seasons) > 0: for i in range(1, max(seasons.keys())+1): - show_item.add_season(seasons.get(i, Season({'number': i}))) + show_item.add_season(seasons.get(i, Season({"number": i}))) yield show_item \ No newline at end of file diff --git a/src/program/media/item.py b/src/program/media/item.py index 470d2fdc..8abe8f37 100644 --- a/src/program/media/item.py +++ b/src/program/media/item.py @@ -2,18 +2,16 @@ from datetime import datetime from typing import List, Optional, Self +import sqlalchemy +from program.db.db import db from program.media.state import States from RTN import Torrent, parse +from sqlalchemy import orm +from sqlalchemy.orm import Mapped, mapped_column, relationship + # from RTN.patterns import extract_episodes from utils.logger import logger -from sqlalchemy.orm import Mapped -from sqlalchemy.orm import mapped_column -from sqlalchemy.orm import relationship -import sqlalchemy -from sqlalchemy import orm - -from program.db.db import db class MediaItem(db.Model): """MediaItem class""" @@ -69,7 +67,7 @@ def __init__(self, item: dict) -> None: # user_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("user_account.id")) # user: Mapped["User"] = relationship(lazy=False, back_populates="addresses") self.requested_at = item.get("requested_at", datetime.now()) - self.requested_by = item.get("requested_by", None) + self.requested_by = item.get("requested_by") self.indexed_at = None @@ -87,28 +85,28 @@ def __init__(self, item: dict) -> None: self.is_anime = item.get("is_anime", False) # Media related - self.title = item.get("title", None) - self.imdb_id = item.get("imdb_id", None) + self.title = item.get("title") + self.imdb_id = item.get("imdb_id") if self.imdb_id: self.imdb_link = f"https://www.imdb.com/title/{self.imdb_id}/" if not hasattr(self, "item_id"): self.item_id = self.imdb_id - self.tvdb_id = item.get("tvdb_id", None) - self.tmdb_id = item.get("tmdb_id", None) - self.network = item.get("network", None) - self.country = item.get("country", None) - self.language = item.get("language", None) - self.aired_at = item.get("aired_at", None) - self.year = item.get("year" , None) + self.tvdb_id = item.get("tvdb_id") + self.tmdb_id = item.get("tmdb_id") + self.network = item.get("network") + self.country = item.get("country") + self.language = item.get("language") + self.aired_at = item.get("aired_at") + self.year = item.get("year") self.genres = item.get("genres", []) # Plex related - self.key = item.get("key", None) - self.guid = item.get("guid", None) - self.update_folder = item.get("update_folder", None) + self.key = item.get("key") + self.guid = item.get("guid") + self.update_folder = item.get("update_folder") # Overseerr related - self.overseerr_id = item.get("overseerr_id", None) + self.overseerr_id = item.get("overseerr_id") def store_state(self) -> None: self.last_state = self._determine_state().name @@ -409,7 +407,7 @@ class Episode(MediaItem): __tablename__ = "Episode" _id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("MediaItem._id"), primary_key=True) parent_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("Season._id"), use_existing_column=True) - parent: Mapped["Season"] = relationship(lazy=False, back_populates='episodes', foreign_keys="Episode.parent_id") + parent: Mapped["Season"] = relationship(lazy=False, back_populates="episodes", foreign_keys="Episode.parent_id") @orm.reconstructor def init_on_load(self): self.streams: Optional[dict[str, Torrent]] = {} diff --git a/src/program/program.py b/src/program/program.py index a332759a..48c6bcb1 100644 --- a/src/program/program.py +++ b/src/program/program.py @@ -26,14 +26,13 @@ from .symlink import Symlinker from .types import Event, Service - if settings_manager.settings.tracemalloc: import tracemalloc -from program.db.db import db, alembic, run_migrations -from sqlalchemy import select, func -from sqlalchemy.orm import joinedload import program.db.db_functions as DB +from program.db.db import db, run_migrations +from sqlalchemy import func, select + class Program(threading.Thread): """Program class""" @@ -123,7 +122,7 @@ def start(self): except Exception as e: logger.exception(f"Failed to initialize services: {e}") - max_worker_env_vars = [var for var in os.environ if var.endswith('_MAX_WORKERS')] + max_worker_env_vars = [var for var in os.environ if var.endswith("_MAX_WORKERS")] if max_worker_env_vars: for var in max_worker_env_vars: logger.log("PROGRAM", f"{var} is set to {os.environ[var]} workers") @@ -160,8 +159,8 @@ def start(self): logger.debug(f"Mapped metadata to {item.type.title()}: {item.log_string}") session.commit() - movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked == True)).scalar_one() - episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked == True)).scalar_one() + movies_symlinks = session.execute(select(func.count(Movie._id)).where(Movie.symlinked is True)).scalar_one() + episodes_symlinks = session.execute(select(func.count(Episode._id)).where(Episode.symlinked is True)).scalar_one() total_symlinks = movies_symlinks + episodes_symlinks total_movies = session.execute(select(func.count(Movie._id))).scalar_one() total_shows = session.execute(select(func.count(Show._id))).scalar_one() @@ -236,37 +235,31 @@ def _schedule_services(self) -> None: logger.log("PROGRAM", f"Scheduled {service_cls.__name__} to run every {update_interval} seconds.") def _id_in_queue(self, id): - for i in self.queued_items: - if i._id == id: - return True - return False + return any(i._id == id for i in self.queued_items) def _id_in_running_items(self, id): - for i in self.running_items: - if i._id == id: - return True - return False + return any(i._id == id for i in self.running_items) def _push_event_queue(self, event): with self.mutex: - if( not event.item in self.queued_items and not event.item in self.running_items): + if( event.item not in self.queued_items and event.item not in self.running_items): if hasattr(event.item, "_id"): if isinstance(event.item, Show): for s in event.item.seasons: if self._id_in_queue(s._id) or self._id_in_running_items(s._id): - return + return None for e in s.episodes: if self._id_in_queue(e._id) or self._id_in_running_items(e._id): - return + return None if isinstance(event.item, Season): for e in event.item.episodes: if self._id_in_queue(e._id) or self._id_in_running_items(e._id): - return + return None if hasattr(event.item, "parent") and ( self._id_in_queue(event.item.parent._id) or self._id_in_running_items(event.item.parent._id) ): - return + return None if hasattr(event.item, "parent") and hasattr(event.item.parent, "parent") and event.item.parent.parent and ( self._id_in_queue(event.item.parent.parent._id) or self._id_in_running_items(event.item.parent.parent._id)): - return + return None self.queued_items.append(event.item) self.event_queue.put(event) if not isinstance(event.item, (Show, Movie, Episode, Season)): @@ -292,23 +285,21 @@ def add_to_running(self, item, service_name): if item is None: return if item not in self.running_items: - if isinstance(item, MediaItem) and not self._id_in_running_items(item._id): - self.running_items.append(item) - elif not isinstance(item, MediaItem): + if isinstance(item, MediaItem) and not self._id_in_running_items(item._id) or not isinstance(item, MediaItem): self.running_items.append(item) logger.log("PROGRAM", f"Item {item.log_string} started running section {service_name}" ) def _process_future_item(self, future: Future, service: Service, orig_item: MediaItem) -> None: """Callback to add the results from a future emitted by a service to the event queue.""" try: - for item in future.result(): + for _item in future.result(): pass if orig_item is not None: logger.log("PROGRAM", f"Service {service.__name__} finished running on {orig_item.log_string}") else: logger.log("PROGRAM", f"Service {service.__name__} finished running.") except TimeoutError: - logger.debug('Service {service.__name__} timeout waiting for result on {orig_item.log_string}') + logger.debug("Service {service.__name__} timeout waiting for result on {orig_item.log_string}") self._remove_from_running_items(orig_item, service.__name__) except Exception: logger.exception(f"Service {service.__name__} failed with exception {traceback.format_exc()}") @@ -347,8 +338,8 @@ def _submit_job(self, service: Service, item: MediaItem | None) -> None: future = cur_executor.submit(func, fn, service, self, item) #cur_executor.submit(func) if item is None else cur_executor.submit(func, item) future.add_done_callback(lambda f: self._process_future_item(f, service, item)) - def display_top_allocators(self, snapshot, key_type='lineno', limit=10): - top_stats = snapshot.compare_to(self.last_snapshot, 'lineno') + def display_top_allocators(self, snapshot, key_type="lineno", limit=10): + top_stats = snapshot.compare_to(self.last_snapshot, "lineno") logger.debug("Top %s lines" % limit) for index, stat in enumerate(top_stats[:limit], 1): @@ -359,7 +350,7 @@ def display_top_allocators(self, snapshot, key_type='lineno', limit=10): % (index, filename, frame.lineno, stat.size / 1024)) line = linecache.getline(frame.filename, frame.lineno).strip() if line: - logger.debug(' %s' % line) + logger.debug(" %s" % line) other = top_stats[limit:] if other: @@ -370,7 +361,6 @@ def display_top_allocators(self, snapshot, key_type='lineno', limit=10): def dump_tracemalloc(self): if self.enable_trace and time.monotonic() - self.malloc_time > 60: - print("Taking Snapshot " + str(time.monotonic() - self.malloc_time) ) self.malloc_time = time.monotonic() snapshot = tracemalloc.take_snapshot() self.display_top_allocators(snapshot) @@ -416,9 +406,9 @@ def stop(self): self.clear_queue() # Clear the queue when stopping if hasattr(self, "executors"): for executor in self.executors: - if not getattr(executor["_executor"], '_shutdown', False): + if not getattr(executor["_executor"], "_shutdown", False): executor["_executor"].shutdown(wait=False) - if hasattr(self, "scheduler") and getattr(self.scheduler, 'running', False): + if hasattr(self, "scheduler") and getattr(self.scheduler, "running", False): self.scheduler.shutdown(wait=False) logger.log("PROGRAM", "Riven has been stopped.") diff --git a/src/program/scrapers/__init__.py b/src/program/scrapers/__init__.py index 04cf2db4..9bcc30aa 100644 --- a/src/program/scrapers/__init__.py +++ b/src/program/scrapers/__init__.py @@ -6,6 +6,7 @@ from program.media.item import Episode, MediaItem, Movie, Season, Show from program.media.state import States from program.scrapers.annatar import Annatar +from program.scrapers.comet import Comet from program.scrapers.jackett import Jackett from program.scrapers.knightcrawler import Knightcrawler from program.scrapers.mediafusion import Mediafusion @@ -15,7 +16,6 @@ from program.scrapers.torbox import TorBoxScraper from program.scrapers.torrentio import Torrentio from program.scrapers.zilean import Zilean -from program.scrapers.comet import Comet from program.settings.manager import settings_manager from RTN import Torrent from utils.logger import logger @@ -47,11 +47,9 @@ def validate(self): def yield_incomplete_children(self, item: MediaItem) -> Union[List[Season], List[Episode]]: if isinstance(item, Season): - res = [e for e in item.episodes if e.state != States.Completed and e.is_released and self.should_submit(e)] - return res + return [e for e in item.episodes if e.state != States.Completed and e.is_released and self.should_submit(e)] if isinstance(item, Show): - res = [s for s in item.seasons if s.state != States.Completed and s.is_released and self.should_submit(s)] - return res + return [s for s in item.seasons if s.state != States.Completed and s.is_released and self.should_submit(s)] return None def partial_state(self, item: MediaItem) -> bool: @@ -77,7 +75,7 @@ def run(self, item: Union[Show, Season, Episode, Movie]) -> Generator[Union[Show return partial_state = self.partial_state(item) - if partial_state != False: + if partial_state is not False: yield partial_state return diff --git a/src/program/scrapers/annatar.py b/src/program/scrapers/annatar.py index d699d7c9..d0b50bf1 100644 --- a/src/program/scrapers/annatar.py +++ b/src/program/scrapers/annatar.py @@ -6,8 +6,8 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import get from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get class Annatar: @@ -95,7 +95,7 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: if isinstance(item, Show): scrape_type = "series" imdb_id = item.imdb_id - identifier = f"season=1" + identifier = "season=1" elif isinstance(item, Season): scrape_type = "series" imdb_id = item.parent.imdb_id diff --git a/src/program/scrapers/comet.py b/src/program/scrapers/comet.py index b814a2b7..8cdc2fc3 100644 --- a/src/program/scrapers/comet.py +++ b/src/program/scrapers/comet.py @@ -1,12 +1,10 @@ """ Comet scraper module """ -from typing import Dict, Union import base64 import json -from urllib.parse import quote +from typing import Dict, Union from program.media.item import Episode, MediaItem, Movie, Season, Show from program.settings.manager import settings_manager -from program.settings.models import CometConfig from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger @@ -29,7 +27,7 @@ def __init__(self): "debridService":"realdebrid", "debridApiKey": settings_manager.settings.downloaders.real_debrid.api_key, "debridStreamProxyPassword":"" - }).encode('utf-8')).decode('utf-8') + }).encode("utf-8")).decode("utf-8") self.initialized = self.validate() if not self.initialized: return @@ -106,7 +104,7 @@ def _determine_scrape(self, item: Union[Show, Season, Episode, Movie]) -> tuple[ elif isinstance(item, Movie): identifier, scrape_type, imdb_id = None, "movie", item.imdb_id else: - logger.error(f"Invalid media item type") + logger.error("Invalid media item type") return None, None, None return identifier, scrape_type, imdb_id except Exception as e: @@ -134,15 +132,15 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: for stream in response.data.streams: # Split the URL by '/playback/' and then split the remaining part by '/' - url_parts = stream.url.split('/playback/') + url_parts = stream.url.split("/playback/") if len(url_parts) != 2: - logger.warning(f'Comet Playback url can\'t be parsed: {stream.url}') + logger.warning(f"Comet Playback url can't be parsed: {stream.url}") - end_parts = url_parts[1].split('/') + end_parts = url_parts[1].split("/") if len(end_parts) != 2: - logger.warning(f'End part of Comet Playback url can\'t be parsed ({end_parts}): {stream.url}') + logger.warning(f"End part of Comet Playback url can't be parsed ({end_parts}): {stream.url}") hash = end_parts[0] diff --git a/src/program/scrapers/jackett.py b/src/program/scrapers/jackett.py index 69ea4e78..a361d9a0 100644 --- a/src/program/scrapers/jackett.py +++ b/src/program/scrapers/jackett.py @@ -155,7 +155,7 @@ def _process_results(self, results: List[Tuple[str, str]]) -> Tuple[Dict[str, st def _search_movie_indexer(self, item: MediaItem, indexer: JackettIndexer) -> List[Tuple[str, str]]: """Search for movies on the given indexer""" - if indexer.movie_search_capabilities == None: + if indexer.movie_search_capabilities is None: return [] params = { "apikey": self.api_key, @@ -173,7 +173,7 @@ def _search_movie_indexer(self, item: MediaItem, indexer: JackettIndexer) -> Lis def _search_series_indexer(self, item: MediaItem, indexer: JackettIndexer) -> List[Tuple[str, str]]: """Search for series on the given indexer""" - if indexer.tv_search_capabilities == None: + if indexer.tv_search_capabilities is None: return [] q, season, ep = self._get_series_search_params(item) diff --git a/src/program/scrapers/knightcrawler.py b/src/program/scrapers/knightcrawler.py index dd0b61b1..5b7b7931 100644 --- a/src/program/scrapers/knightcrawler.py +++ b/src/program/scrapers/knightcrawler.py @@ -7,8 +7,8 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import get, ping from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class Knightcrawler: diff --git a/src/program/scrapers/mediafusion.py b/src/program/scrapers/mediafusion.py index 832e6a49..6824e5ab 100644 --- a/src/program/scrapers/mediafusion.py +++ b/src/program/scrapers/mediafusion.py @@ -3,15 +3,15 @@ from typing import Dict import requests -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import MediaItem from program.scrapers.shared import _get_stremio_identifier from program.settings.manager import settings_manager from program.settings.models import AppModel from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import get, ping from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class Mediafusion: @@ -80,7 +80,7 @@ def validate(self) -> bool: try: response = requests.request("POST", url, json=payload, headers=headers) - self.encrypted_string = json.loads(response.content)['encrypted_str'] + self.encrypted_string = json.loads(response.content)["encrypted_str"] except Exception as e: logger.error(f"Failed to encrypt user data: {e}") return False diff --git a/src/program/scrapers/orionoid.py b/src/program/scrapers/orionoid.py index e64ba5a4..5a135bea 100644 --- a/src/program/scrapers/orionoid.py +++ b/src/program/scrapers/orionoid.py @@ -1,5 +1,4 @@ """ Orionoid scraper module """ -from datetime import datetime from typing import Dict from program.media.item import Episode, MediaItem, Movie, Season, Show @@ -7,8 +6,8 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import get from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get KEY_APP = "D3CH6HMX9KD9EMD68RXRCDUNBDJV5HRR" diff --git a/src/program/scrapers/prowlarr.py b/src/program/scrapers/prowlarr.py index 622df41c..bb1ebe60 100644 --- a/src/program/scrapers/prowlarr.py +++ b/src/program/scrapers/prowlarr.py @@ -163,7 +163,7 @@ def _process_results(self, results: List[Tuple[str, str]]) -> Tuple[Dict[str, st def _search_movie_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: """Search for movies on the given indexer""" - if indexer.movie_search_capabilities == None: + if indexer.movie_search_capabilities is None: return [] params = { "apikey": self.api_key, @@ -180,7 +180,7 @@ def _search_movie_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> Li def _search_series_indexer(self, item: MediaItem, indexer: ProwlarrIndexer) -> List[Tuple[str, str]]: """Search for series on the given indexer""" - if indexer.tv_search_capabilities == None: + if indexer.tv_search_capabilities is None: return [] q, season, ep = self._get_series_search_params(item) @@ -227,15 +227,7 @@ def _get_indexer_from_json(self, json_content: str) -> list[ProwlarrIndexer]: """Parse the indexers from the XML content""" indexer_list = [] for indexer in json.loads(json_content): - indexer_list.append(ProwlarrIndexer(**{ - "title": indexer["name"], - "id": str(indexer["id"]), - "link": indexer["infoLink"], - "type": indexer["protocol"], - "language": indexer["language"], - "movie_search_capabilities": (s[0] for s in indexer["capabilities"]["movieSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "Movies"]) > 0 else None, - "tv_search_capabilities": (s[0] for s in indexer["capabilities"]["tvSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "TV"]) > 0 else None - })) + indexer_list.append(ProwlarrIndexer(title=indexer["name"], id=str(indexer["id"]), link=indexer["infoLink"], type=indexer["protocol"], language=indexer["language"], movie_search_capabilities=(s[0] for s in indexer["capabilities"]["movieSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "Movies"]) > 0 else None, tv_search_capabilities=(s[0] for s in indexer["capabilities"]["tvSearchParams"]) if len([s for s in indexer["capabilities"]["categories"] if s["name"] == "TV"]) > 0 else None)) return indexer_list @@ -274,7 +266,7 @@ def _parse_xml(self, xml_content: str, indexer_title: str) -> list[tuple[str, st infohashes_found = True result_list.append((item.find(".//title").text, infoHash.attrib["value"])) len_data = len(data) - if infohashes_found == False and len_data > 0: + if infohashes_found is False and len_data > 0: logger.warning(f"{self.key} Tracker {indexer_title} may never return infohashes, consider disabling: {len_data} items found, None contain infohash.") return result_list diff --git a/src/program/scrapers/shared.py b/src/program/scrapers/shared.py index 4ad4f43d..559bdbfb 100644 --- a/src/program/scrapers/shared.py +++ b/src/program/scrapers/shared.py @@ -18,7 +18,7 @@ def _get_stremio_identifier(item: MediaItem) -> str: """Get the stremio identifier for a media item based on its type.""" if isinstance(item, Show): - identifier, scrape_type, imdb_id = f":1:1", "series", item.imdb_id + identifier, scrape_type, imdb_id = ":1:1", "series", item.imdb_id elif isinstance(item, Season): identifier, scrape_type, imdb_id = f":{item.number}:1", "series", item.parent.imdb_id elif isinstance(item, Episode): @@ -57,7 +57,7 @@ def _parse_results(item: MediaItem, results: Dict[str, str]) -> Dict[str, Torren continue if isinstance(item, Movie): - if hasattr(item, 'aired_at'): + if hasattr(item, "aired_at"): # If the item has an aired_at date and it's not in the future, we can check the year if item.aired_at <= datetime.now() and item.aired_at.year == torrent.data.year: torrents.add(torrent) @@ -70,10 +70,10 @@ def _parse_results(item: MediaItem, results: Dict[str, str]) -> Dict[str, Torren logger.error(f"No seasons found for {item.log_string}") break if ( - hasattr(torrent.data, 'season') + hasattr(torrent.data, "season") and len(torrent.data.season) >= (len(needed_seasons) - 1) and ( - not hasattr(torrent.data, 'episode') + not hasattr(torrent.data, "episode") or len(torrent.data.episode) == 0 ) or torrent.data.is_complete @@ -82,33 +82,32 @@ def _parse_results(item: MediaItem, results: Dict[str, str]) -> Dict[str, Torren elif isinstance(item, Season): if ( - len(getattr(torrent.data, 'season', [])) == 1 + len(getattr(torrent.data, "season", [])) == 1 and item.number in torrent.data.season and ( - not hasattr(torrent.data, 'episode') + not hasattr(torrent.data, "episode") or len(torrent.data.episode) == 0 ) or torrent.data.is_complete ): torrents.add(torrent) - elif isinstance(item, Episode): - if ( - item.number in torrent.data.episode - and ( - not hasattr(torrent.data, 'season') - or item.parent.number in torrent.data.season - ) - or torrent.data.is_complete - ): - torrents.add(torrent) + elif isinstance(item, Episode) and ( + item.number in torrent.data.episode + and ( + not hasattr(torrent.data, "season") + or item.parent.number in torrent.data.season + ) + or torrent.data.is_complete + ): + torrents.add(torrent) processed_infohashes.add(infohash) - except (ValueError, AttributeError) as e: + except (ValueError, AttributeError): # logger.error(f"Failed to parse: '{raw_title}' - {e}") continue - except GarbageTorrent as e: + except GarbageTorrent: # logger.debug(f"Trashing torrent {infohash}: '{raw_title}'") continue diff --git a/src/program/scrapers/torbox.py b/src/program/scrapers/torbox.py index 329fe005..711552a6 100644 --- a/src/program/scrapers/torbox.py +++ b/src/program/scrapers/torbox.py @@ -1,15 +1,12 @@ -from typing import Dict, Generator +from typing import Dict from program.media.item import Episode, MediaItem, Movie, Season, Show from program.settings.manager import settings_manager -from program.settings.versions import models from requests import RequestException from requests.exceptions import ConnectTimeout, ReadTimeout, RetryError -from RTN import RTN, Torrent, sort_torrents -from RTN.exceptions import GarbageTorrent from utils.logger import logger -from utils.request import get, ping from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class TorBoxScraper: diff --git a/src/program/scrapers/torrentio.py b/src/program/scrapers/torrentio.py index fffff7c2..cef5064e 100644 --- a/src/program/scrapers/torrentio.py +++ b/src/program/scrapers/torrentio.py @@ -7,8 +7,8 @@ from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import get, ping from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class Torrentio: @@ -95,7 +95,7 @@ def _determine_scrape(self, item: Union[Show, Season, Episode, Movie]) -> tuple[ elif isinstance(item, Movie): identifier, scrape_type, imdb_id = None, "movie", item.imdb_id else: - logger.error(f"Invalid media item type") + logger.error("Invalid media item type") return None, None, None return identifier, scrape_type, imdb_id except Exception as e: diff --git a/src/program/scrapers/zilean.py b/src/program/scrapers/zilean.py index a82d827e..8a7adbd5 100644 --- a/src/program/scrapers/zilean.py +++ b/src/program/scrapers/zilean.py @@ -2,14 +2,14 @@ from typing import Dict -from program.media.item import Episode, MediaItem, Movie, Season, Show +from program.media.item import Episode, MediaItem, Season, Show from program.settings.manager import settings_manager from program.settings.models import AppModel from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import ping, get from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.request import get, ping class Zilean: @@ -93,7 +93,7 @@ def api_scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]: url = f"{self.settings.url}/dmm/filtered" params = {"Query": title} - if isinstance(item, MediaItem) and hasattr(item, 'year'): + if isinstance(item, MediaItem) and hasattr(item, "year"): params["Year"] = item.year if isinstance(item, Show): diff --git a/src/program/settings/models.py b/src/program/settings/models.py index 72d1b72f..5be5028f 100644 --- a/src/program/settings/models.py +++ b/src/program/settings/models.py @@ -1,11 +1,10 @@ """Riven settings models""" from pathlib import Path -from typing import Callable, Dict, List, Any +from typing import Any, Callable, Dict, List +from program.settings.migratable import MigratableBaseModel from pydantic import BaseModel, field_validator from RTN.models import CustomRank, SettingsModel - -from program.settings.migratable import MigratableBaseModel from utils import version_file_path @@ -353,7 +352,7 @@ class AppModel(Observable): def __init__(self, **data: Any): current_version = get_version() - existing_version = data.get('version', current_version) + existing_version = data.get("version", current_version) super().__init__(**data) if existing_version < current_version: self.version = current_version diff --git a/src/program/state_transition.py b/src/program/state_transition.py index a0cccf41..d8d8143a 100644 --- a/src/program/state_transition.py +++ b/src/program/state_transition.py @@ -28,7 +28,7 @@ def process_event(existing_item: MediaItem | None, emitted_by: Service, item: Me return no_further_processing return None, next_service, [item] - elif emitted_by == TraktIndexer or item.state == States.Indexed or item.state == States.PartiallyCompleted: + elif item.state in (States.Indexed, States.PartiallyCompleted): next_service = Scraping if existing_item: if not existing_item.indexed_at: diff --git a/src/program/symlink.py b/src/program/symlink.py index 46acb6ad..f79578b0 100644 --- a/src/program/symlink.py +++ b/src/program/symlink.py @@ -268,17 +268,16 @@ def _symlink(self, item: Union[Movie, Episode]) -> bool: def _create_item_folders(self, item: Union[Movie, Show, Season, Episode], filename: str) -> str: """Create necessary folders and determine the destination path for symlinks.""" - is_anime: bool = hasattr(item, 'is_anime') and item.is_anime + is_anime: bool = hasattr(item, "is_anime") and item.is_anime movie_path: Path = self.library_path_movies show_path: Path = self.library_path_shows - if self.settings.separate_anime_dirs: - if is_anime: - if isinstance(item, Movie): - movie_path = self.library_path_anime_movies - elif isinstance(item, (Show, Season, Episode)): - show_path = self.library_path_anime_shows + if self.settings.separate_anime_dirs and is_anime: + if isinstance(item, Movie): + movie_path = self.library_path_anime_movies + elif isinstance(item, (Show, Season, Episode)): + show_path = self.library_path_anime_shows def create_folder_path(base_path, *subfolders): path = os.path.join(base_path, *subfolders) @@ -309,18 +308,17 @@ def create_folder_path(base_path, *subfolders): destination_folder = create_folder_path(show_path, folder_season_name) item.set("update_folder", destination_folder) - destination_path = os.path.join(destination_folder, filename.replace("/", "-")) - return destination_path + return os.path.join(destination_folder, filename.replace("/", "-")) def extract_imdb_id(self, path: Path) -> Optional[str]: """Extract IMDb ID from the file or folder name using regex.""" - match = re.search(r'tt\d+', path.name) + match = re.search(r"tt\d+", path.name) if match: return match.group(0) - match = re.search(r'tt\d+', path.parent.name) + match = re.search(r"tt\d+", path.parent.name) if match: return match.group(0) - match = re.search(r'tt\d+', path.parent.parent.name) + match = re.search(r"tt\d+", path.parent.parent.name) if match: return match.group(0) @@ -330,7 +328,7 @@ def extract_imdb_id(self, path: Path) -> Optional[str]: def extract_season_episode(self, filename: str) -> (Optional[int], Optional[int]): """Extract season and episode numbers from the file name using regex.""" season = episode = None - match = re.search(r'[Ss](\d+)[Ee](\d+)', filename) + match = re.search(r"[Ss](\d+)[Ee](\d+)", filename) if match: season = int(match.group(1)) episode = int(match.group(2)) diff --git a/src/program/types.py b/src/program/types.py index 087845c3..e7978ef4 100644 --- a/src/program/types.py +++ b/src/program/types.py @@ -2,7 +2,11 @@ from typing import Generator, Union from program.content import Listrr, Mdblist, Overseerr, PlexWatchlist, TraktContent -from program.downloaders import RealDebridDownloader, TorBoxDownloader, AllDebridDownloader +from program.downloaders import ( + AllDebridDownloader, + RealDebridDownloader, + TorBoxDownloader, +) from program.libraries import SymlinkLibrary from program.media.item import MediaItem from program.scrapers import ( diff --git a/src/program/updaters/plex.py b/src/program/updaters/plex.py index dc3039af..2c875ff4 100644 --- a/src/program/updaters/plex.py +++ b/src/program/updaters/plex.py @@ -115,7 +115,7 @@ def run(self, item: Union[Movie, Show, Season, Episode]) -> Generator[Union[Movi if len(updated_episodes) == len(items_to_update): logger.log("PLEX", f"Updated section {section_name} with all episodes for {item.log_string}") else: - updated_episodes_log = ', '.join([str(ep.number) for ep in updated_episodes]) + updated_episodes_log = ", ".join([str(ep.number) for ep in updated_episodes]) logger.log("PLEX", f"Updated section {section_name} for episodes {updated_episodes_log} in {item.log_string}") else: logger.log("PLEX", f"Updated section {section_name} for {item.log_string}") diff --git a/src/tests/test_debrid_matching.py b/src/tests/test_debrid_matching.py index da30185d..51185041 100644 --- a/src/tests/test_debrid_matching.py +++ b/src/tests/test_debrid_matching.py @@ -11,7 +11,7 @@ def test_matches_item_movie(): ] ) item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"}) - assert _matches_item(torrent_info, item) == True + assert _matches_item(torrent_info, item) is True def test_matches_item_episode(): torrent_info = SimpleNamespace( @@ -27,7 +27,7 @@ def test_matches_item_episode(): episode.parent = parent_season parent_season.parent = parent_show - assert _matches_item(torrent_info, episode) == True + assert _matches_item(torrent_info, episode) is True def test_matches_item_season(): torrent_info = SimpleNamespace( @@ -44,7 +44,7 @@ def test_matches_item_season(): season.add_episode(episode2) show.add_season(season) - assert _matches_item(torrent_info, season) == True + assert _matches_item(torrent_info, season) is True def test_matches_item_partial_season(): torrent_info = SimpleNamespace( @@ -60,12 +60,12 @@ def test_matches_item_partial_season(): season.add_episode(episode2) show.add_season(season) - assert _matches_item(torrent_info, season) == False + assert _matches_item(torrent_info, season) is False def test_matches_item_no_files(): torrent_info = SimpleNamespace() item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"}) - assert _matches_item(torrent_info, item) == False + assert _matches_item(torrent_info, item) is False def test_matches_item_no_selected_files(): torrent_info = SimpleNamespace( @@ -74,4 +74,4 @@ def test_matches_item_no_selected_files(): ] ) item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"}) - assert _matches_item(torrent_info, item) == False \ No newline at end of file + assert _matches_item(torrent_info, item) is False \ No newline at end of file diff --git a/src/tests/test_ranking.py b/src/tests/test_ranking.py index 79284c55..51f032a3 100644 --- a/src/tests/test_ranking.py +++ b/src/tests/test_ranking.py @@ -1,5 +1,5 @@ import pytest -from RTN import RTN, SettingsModel, Torrent, DefaultRanking +from RTN import RTN, DefaultRanking, SettingsModel, Torrent @pytest.fixture diff --git a/src/tests/test_settings_migration.py b/src/tests/test_settings_migration.py index 4afc5b38..3ed369f7 100644 --- a/src/tests/test_settings_migration.py +++ b/src/tests/test_settings_migration.py @@ -1,6 +1,7 @@ import json import os from pathlib import Path + from program.settings.manager import SettingsManager TEST_VERSION = "9.9.9" @@ -52,14 +53,14 @@ def test_load_and_migrate_settings(): program.settings.models.version_file_path = version_file settings_manager = SettingsManager() - assert settings_manager.settings.debug == True - assert settings_manager.settings.log == True - assert settings_manager.settings.force_refresh == False - assert settings_manager.settings.map_metadata == True - assert settings_manager.settings.tracemalloc == False + assert settings_manager.settings.debug is True + assert settings_manager.settings.log is True + assert settings_manager.settings.force_refresh is False + assert settings_manager.settings.map_metadata is True + assert settings_manager.settings.tracemalloc is False assert settings_manager.settings.downloaders.movie_filesize_min == 200 - assert settings_manager.settings.downloaders.real_debrid.enabled == False - assert settings_manager.settings.downloaders.all_debrid.enabled == True + assert settings_manager.settings.downloaders.real_debrid.enabled is False + assert settings_manager.settings.downloaders.all_debrid.enabled is True assert settings_manager.settings.downloaders.all_debrid.api_key == "12345678" assert settings_manager.settings.downloaders.all_debrid.proxy_url == "https://no_proxy.com" assert settings_manager.settings.database.host == "postgresql+psycopg2://postgres:postgres@localhost/riven" diff --git a/src/tests/test_states_processing.py b/src/tests/test_states_processing.py index c984e96f..6163e108 100644 --- a/src/tests/test_states_processing.py +++ b/src/tests/test_states_processing.py @@ -161,10 +161,10 @@ def test_process_event_transition_shows(state, service, next_service, show): show._determine_state = lambda: state # Manually override the state # Ensure the show has seasons and episodes - if not hasattr(show, 'seasons'): + if not hasattr(show, "seasons"): show.seasons = [] for season in show.seasons: - if not hasattr(season, 'episodes'): + if not hasattr(season, "episodes"): season.episodes = [] # When: The event is processed diff --git a/src/utils/logger.py b/src/utils/logger.py index 5dcbeccc..93c3bd3c 100644 --- a/src/utils/logger.py +++ b/src/utils/logger.py @@ -1,6 +1,5 @@ """Logging utils""" -import logging import os import sys from datetime import datetime @@ -10,7 +9,6 @@ from rich.console import Console from utils import data_dir_path - LOG_ENABLED: bool = settings_manager.settings.log def setup_logger(level): diff --git a/src/utils/ratelimiter.py b/src/utils/ratelimiter.py index 215d4172..ed19ddf3 100644 --- a/src/utils/ratelimiter.py +++ b/src/utils/ratelimiter.py @@ -1,5 +1,6 @@ import time from multiprocessing import Lock + from requests import RequestException diff --git a/src/utils/request.py b/src/utils/request.py index c7d99179..b78342f2 100644 --- a/src/utils/request.py +++ b/src/utils/request.py @@ -9,9 +9,9 @@ from requests.adapters import HTTPAdapter from requests.exceptions import ConnectTimeout, RequestException from urllib3.util.retry import Retry -from xmltodict import parse as parse_xml -from utils.useragents import user_agent_factory from utils.ratelimiter import RateLimiter, RateLimitExceeded +from utils.useragents import user_agent_factory +from xmltodict import parse as parse_xml logger = logging.getLogger(__name__)