diff --git a/.gitignore b/.gitignore index 44a37d5a5..ad223aeec 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,5 @@ data/*.json venv/* .pyright web_ui/static/js/node_modules/ +.idea/* + diff --git a/data/example-config.py b/data/example-config.py index c3c28b103..546e0e8fa 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -350,7 +350,7 @@ "TRACKERS": { # Which trackers do you want to upload to? - # Available tracker: A4K, ACM, AITHER, ANT, AR, ASC, AZ, BHD, BHDTV, BJS, BLU, BT, CBR, CZ, DC, DP, DT, EMUW, FF, FL, FNP, FRIKI, GPW, HDB, HDS, HDT, HHD, HUNO, IHD, IS, ITT, LCD, LDU, LST, LT, LUME, MTV, NBL, OE, OTW, PHD, PT, PTER, PTP, PTS, PTT, R4E, RAS, RF, RTF, SAM, SHRI, SN, SP, SPD, STC, THR, TIK, TL, TLZ, TOS, TTG, TTR, TVC, ULCX, UTP, YOINK, YUS + # Available tracker: A4K, ACM, AITHER, ANT, AR, ASC, AZ, BHD, BHDTV, BJS, BLU, BT, C411, CBR, CZ, DC, DP, DT, EMUW, FF, FL, FNP, FRIKI, GPW, HDB, HDS, HDT, HHD, HUNO, IHD, IS, ITT, LCD, LDU, LST, LT, LUME, MTV, NBL, OE, OTW, PHD, PT, PTER, PTP, PTS, PTT, R4E, RAS, RF, RTF, SAM, SHRI, SN, SP, SPD, STC, THR, TIK, TL, TLZ, TOS, TTG, TTR, TVC, ULCX, UTP, YOINK, YUS # Only add the trackers you want to upload to on a regular basis "default_trackers": "", @@ -466,6 +466,13 @@ "announce_url": "https://t.brasiltracker.org//announce", "anon": False, }, + "C411": { + "link_dir_name": "", + "api_key": "", + "announce_url": "https://c411.org/announce/", + "anon": False, + "modq": False, + }, "CBR": { # Instead of using the tracker acronym for folder name when sym/hard linking, you can use a custom name "link_dir_name": "", diff --git a/data/templates/C411.txt b/data/templates/C411.txt new file mode 100644 index 000000000..1f3621770 --- /dev/null +++ b/data/templates/C411.txt @@ -0,0 +1,65 @@ +[h1]{{ title }} ({{ year }})[/h1] + +{% if category == "TV" %} +[h2]Saison {{ season }} {% if episode %}{% if episode != '0' %}Episode {{ episode }}{% endif %}{% endif %}[/h2] +{% endif %} + +[img]{{ poster }}[/img] + + +[h2]🎬 Informations[/h2] + +[b]Pays :[/b] {{pays}} +[b]Genres :[/b] {{genre}} +[b]Date de sortie :[/b] {{release_date}} +{% if video_duration %}[b]Durée :[/b] {{video_duration}}{% endif %} + +{% if imdb_url %}[url={{ imdb_url }}]IMDb[/url]{% endif %} +{% if tmdb %}[url=https://www.themoviedb.org/{{ category.lower() }}/{{ tmdb }}]TMDB[/url]{% endif %} +{% if tvdb_id %}[url=https://www.thetvdb.com/?id={{ tvdb_id }}&tab=series]TVDB[/url]{% endif %} +{% if tvmaze_id %}[url=https://www.tvmaze.com/shows/{{ tvmaze_id }}]TVmaze[/url]{% endif %} +{% if mal_id %}[url=https://myanimelist.net/anime/{{ mal_id }}]MyAnimeList[/url]{% endif %} + +[h3]📖 Synopsis[/h3] + +{{description}} + +[h2]⚙️ Détails Techniques[/h2] + +[b][color=#3d85c6]Source :[/color][/b] [i]{{ source }} {{ service_longname }}[/i] +[b][color=#3d85c6]Type :[/color][/b] [i]{{ type }}[/i] +[b][color=#3d85c6]Résolution vidéo :[/color][/b][i]{{ resolution }}[/i] +[b][color=#3d85c6]Format vidéo :[/color][/b] [i]{{ container }}[/i] +[b][color=#3d85c6]Codec vidéo :[/color][/b] [i]{{ video_codec }} {{ hdr }}[/i] +[b][color=#3d85c6]Débit vidéo :[/color][/b] [i]{{ mbps|round(2) }} MB/s[/i] + +[h3]🔊 Langue(s)[/h3] + + +[table][tr][th]#[/th][th]Langue[/th][th]Canaux[/th][th]Codec[/th][/tr] + +{% for line in audio_lines_dict %} +[tr][td]{{ loop.index }}[/td][td]{{ line.language }}[/td][td]{{ line.channels }}[/td][td]{{ line.format }} ({{ line.bitrate|round(2) }} KB/s)[/td][/tr] +{% endfor %} +[/table] + +{% if subtitle_lines %} + +[h3]💬 Sous-titre(s)[/h3] +[table][tr][th]#[/th][th]Langue[/th][th]Format[/th][th]Type[/th][/tr] +{% for line in subtitle_lines_dict %} +[tr][td]{{ loop.index }}[/td][td]{{ line.language }}[/td][td]{{ line.format }}[/td][td]{{ line.type + +}}[/td][/tr] +{% endfor %} +[/table] +{% endif %} + +[b][color=#3d85c6]Team :[/color][/b] [i]{{ tag }}[/i] +[b][color=#3d85c6] Taille totale :[/color][/b] {{ size_gib|round(2) }} GB + +{% if images %}{% for image in images %} +[img]{{ image['raw_url'] }}[/img] +{% endfor %}{% endif %} + +[url=https://github.com/Audionut/Upload-Assistant]{{ signature }}[/url] \ No newline at end of file diff --git a/data/templates/FRENCH.txt b/data/templates/FRENCH.txt new file mode 100644 index 000000000..efa304e28 --- /dev/null +++ b/data/templates/FRENCH.txt @@ -0,0 +1,56 @@ +[img]{{ poster }}[/img] + +[b][font=Verdana][color=#3d85c6][size=29]{{ title }}[/size][/font] +[size=18]{{ year }}[/size][/color][/b] + +{% if category == "TV" %} +[b][size=18]S{{ season }}E{{ episode }}[/size][/b] +{% endif %} + +[font=Verdana][size=13][b][color=#3d85c6]Titre original :[/color][/b] [i]{{ original_title }}[/i][/size][/font] +[b][color=#3d85c6]Pays :[/color][/b] [i]{{ pays }}[/i] +[b][color=#3d85c6]Genres :[/color][/b] [i]{{ genre }}[/i] +[b][color=#3d85c6]Date de sortie :[/color][/b] [i]{{ release_date }}[/i] + +{% if category == 'MOVIE' %} +[b][color=#3d85c6]Durée :[/color][/b] [i]{{ video_duration }} Minutes[/i] +{% endif %} + +{% if imdb_url %}[url={{ imdb_url }}]IMDb[/url]{% endif %} +{% if tmdb %}[url=https://www.themoviedb.org/{{ category.lower() }}/{{ tmdb }}]TMDB[/url]{% endif %} +{% if tvdb_id %}[url=https://www.thetvdb.com/?id={{ tvdb_id }}&tab=series]TVDB[/url]{% endif %} +{% if tvmaze_id %}[url=https://www.tvmaze.com/shows/{{ tvmaze_id }}]TVmaze[/url]{% endif %} +{% if mal_id %}[url=https://myanimelist.net/anime/{{ mal_id }}]MyAnimeList[/url]{% endif %} + +[img]https://i.imgur.com/W3pvv6q.png[/img] + +{{ description }} + +[img]https://i.imgur.com/KMZsqZn.png[/img] + +[b][color=#3d85c6]Source :[/color][/b] [i]{{ source }} {{ service_longname }}[/i] +[b][color=#3d85c6]Type :[/color][/b] [i]{{ type }}[/i] +[b][color=#3d85c6]Résolution vidéo :[/color][/b][i]{{ resolution }}[/i] +[b][color=#3d85c6]Format vidéo :[/color][/b] [i]{{ container }}[/i] +[b][color=#3d85c6]Codec vidéo :[/color][/b] [i]{{ video_codec }} {{ hdr }}[/i] +[b][color=#3d85c6]Débit vidéo :[/color][/b] [i]{{ mbps|round(2) }} MB/s[/i] + +[b][color=#3d85c6] Audio(s) :[/color][/b] +{% for line in audio_lines %} +{{ line }} +{% endfor %} + +{% if subtitle_lines %}[b][color=#3d85c6]Sous-titres :[/color][/b] +{% for line in subtitle_lines %} +{{ line }} +{% endfor %} +{% endif %} + +[b][color=#3d85c6]Team :[/color][/b] [i]{{ tag }}[/i] +[b][color=#3d85c6] Taille totale :[/color][/b] {{ size_gib|round(2) }} GB + +{% if images %}{% for image in images %} +[img]{{ image['raw_url'] }}[/img] +{% endfor %}{% endif %} + +[url=https://github.com/Audionut/Upload-Assistant]{{ signature }}[/url] \ No newline at end of file diff --git a/src/trackers/C411.py b/src/trackers/C411.py new file mode 100644 index 000000000..6a8d1733b --- /dev/null +++ b/src/trackers/C411.py @@ -0,0 +1,592 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# https://github.com/Audionut/Upload-Assistant/tree/master + +import aiofiles +import asyncio +import json +import httpx +import src.trackers.FRENCH as fr +from typing import Any +from src.trackers.COMMON import COMMON +from src.console import console +from lxml import etree +import unidecode + +Meta = dict[str, Any] +Config = dict[str, Any] + + +class C411: + def __init__(self, config: Config) -> None: + self.config: Config = config + self.common = COMMON(config) + self.tracker = 'C411' + self.base_url = 'https://c411.org' + self.id_url = f'{self.base_url}/api/torrents' + self.upload_url = f'{self.base_url}/api/torrents' + # self.requests_url = f'{self.base_url}/api/requests/filter' + self.search_url = f'{self.base_url}/api/' + self.torrent_url = f'{self.base_url}/api/' + self.banned_groups: list[str] = [] + pass + + # async def get_cat_id(self, meta: Meta) -> str: + # mediatype video + # return '1' + + async def get_subcat_id(self, meta: Meta) -> str: + sub_cat_id = "0" + genres = meta.get("genres","").lower().replace(' ', '').replace('-', '') + if meta['category'] == 'MOVIE': + sub_cat_id = '1' if meta.get('mal_id') else '6' + if 'animation' in genres: + sub_cat_id = '6' + elif meta['category'] == 'TV': + sub_cat_id = '2' if meta.get('mal_id') else '7' + if "reality" in genres: + sub_cat_id = 5 + + return sub_cat_id + + async def get_option_tag(self, meta: Meta): + obj1 = [] + obj2 = 0 + vff = None + vfq = None + eng = None + audio_track = await fr.get_audio_tracks(meta, True) + source = meta.get('source', "") + type = meta.get('type', "").upper() + + for item in audio_track: + lang = str(item.get('Language', '')).lower() + if lang == "fr-ca": + vfq = True + if lang == "fr-fr": + vff = True + if lang == 'french': + vff = True + if lang == 'fr': + vff = True + if lang in ("en", "en-us", "en-gb"): + eng = True + + if vff and vfq: + obj1.append(4) + if vfq: + obj1.append(5) + if vff: + obj1.append(2) + if eng and not vff and not vfq: # vo + obj1.append(1) + + + if meta['is_disc'] == 'BDMV': + if meta['resolution'] == '2160p': + obj2 = 10 # blu 4k full + else: + obj2 = 11 # blu full + elif meta['is_disc'] == 'DVD': + obj2 = 14 # DVD r5 r9 13 - 14 + + elif type == "REMUX" and source in ("BluRay", "HDDVD"): + if meta['resolution'] == '2160p': + obj2 = 10 # blu 4k remux + else: + obj2 = 12 # blu remux + + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): + obj2 = 15 + + elif type == "ENCODE" and source in ("BluRay", "HDDVD"): + if meta['resolution'] == '2160p': + obj2 = 17 + elif meta['resolution'] == '1080p': + obj2 = 16 + elif meta['resolution'] == '720p': + obj2 = 18 + # else: + # obj2 = 25) + + elif type == "WEBDL": + if meta['resolution'] == '2160p': + obj2 = 26 + elif meta['resolution'] == '1080p': + obj2 = 25 + elif meta['resolution'] == '720p': + obj2 = 27 + else: + obj2 = 24 + + elif type == "WEBRIP": + if meta['resolution'] == '2160p': + obj2 = 30 + elif meta['resolution'] == '1080p': + obj2 = 29 + elif meta['resolution'] == '720p': + obj2 = 31 + else: + obj2 = 28 + elif type == "HDTV": + if meta['resolution'] == '2160p': + obj2 = 21 + elif meta['resolution'] == '1080p': + obj2 = 20 + elif meta['resolution'] == '720p': + obj2 = 22 + else: + obj2 = 19 + + elif type == "DVDRIP": + obj2 = 15 # DVDRIP + + uuid = meta.get('uuid', "").lower() + + if "4klight" in uuid: # and type == "ENCODE" + obj2 = 415 + elif "hdlight" in uuid: # and type == "ENCODE" + if meta['resolution'] == '1080p': + obj2 = 413 + else: + obj2 = 414 + + # vcd/vhs ID= 23 + + options_dict = {} + options_dict[1] = obj1 + # None check is missing, check for correct data structure. + options_dict[2] = [obj2] + + if meta['category'] == 'TV': + if meta.get('no_season', False) is False: + season = str(meta.get('season_int', '')) + if season: + options_dict[7] = 120 + int(season) + # Episode + episode = str(meta.get('episode_int', '')) + if episode: # Episode 0 check is missing + options_dict[6] = 96 + int(episode) + else: + # pas d'épisode, on suppose que c'est une saison complete + options_dict[6] = 96 + return json.dumps(options_dict) + + # https://c411.org/wiki/nommage + async def get_name(self, meta: Meta) -> dict[str, str]: + + type = str(meta.get('type', "")).upper() + title, _ = await fr.get_translation_fr(meta) + year = str(meta.get('year', "")) + manual_year_value = meta.get('manual_year') + if manual_year_value is not None and int(manual_year_value) > 0: + year = str(manual_year_value) + resolution = str(meta.get('resolution', "")) + if resolution == "OTHER": + resolution = "" + audio = await fr.get_audio_name(meta) + language = await fr.build_audio_string(meta) + extra_audio = await fr.get_extra_french_tag(meta, True) + if extra_audio: + language = language.replace("FRENCH", "") + " " + extra_audio + service = "" + season = str(meta.get('season', "")) + episode = str(meta.get('episode', "")) + part = str(meta.get('part', "")) + repack = str(meta.get('repack', "")) + three_d = str(meta.get('3D', "")) + tag = str(meta.get('tag', "")) + source = str(meta.get('source', "")) + uhd = str(meta.get('uhd', "")) + hdr = str(meta.get('hdr', "")).replace('HDR10+', 'HDR10PLUS') + hybrid = 'Hybrid' if meta.get('webdv', "") else "" + video_codec = "" + video_encode = "" + region = "" + dvd_size = "" + if meta.get('is_disc', "") == "BDMV": + video_codec = str(meta.get('video_codec', "")) + region = str(meta.get('region', "") or "") + elif meta.get('is_disc', "") == "DVD": + region = str(meta.get('region', "") or "") + dvd_size = str(meta.get('dvd_size', "")) + else: + video_codec = str(meta.get('video_codec', "")).replace('H.264', 'H264').replace('H.265', 'H265') + video_encode = str(meta.get('video_encode', "")).replace('H.264', 'H264').replace('H.265', 'H265') + #video_encode = "x264" + edition = str(meta.get('edition', "")) + if 'hybrid' in edition.upper(): + edition = edition.replace('Hybrid', '').strip() + + if meta['category'] == "TV": + year = meta['year'] if meta['search_year'] != "" else "" + if meta.get('manual_date'): + # Ignore season and year for --daily flagged shows, just use manual date stored in episode_name + season = '' + episode = '' + if meta.get('no_season', False) is True: + season = '' + if meta.get('no_year', False) is True: + year = '' + #if meta.get('no_aka', False) is True: + # alt_title = '' + + # YAY NAMING FUN + name = "" + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": + if meta['is_disc'] == 'BDMV': + name = f"{title} {year} {three_d} {edition} {hybrid} {repack} {language} {resolution} {uhd} {region} {source} {hdr} {audio} {video_codec}" + elif meta['is_disc'] == 'DVD': + name = f"{title} {year} {repack} {edition} {region} {source} {dvd_size} {audio}" + elif meta['is_disc'] == 'HDDVD': + name = f"{title} {year} {edition} {repack} {language} {resolution} {source} {video_codec} {audio}" + elif type == "REMUX" and source in ("BluRay", "HDDVD"): + name = f"{title} {year} {three_d} {edition} {hybrid} {repack} {language} {resolution} {uhd} {source} REMUX {hdr} {audio} {video_codec}" + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): + name = f"{title} {year} {edition} {repack} {source} REMUX {audio}" + elif type == "ENCODE": + name = f"{title} {year} {edition} {hybrid} {repack} {language} {resolution} {uhd} {source} {hdr} {audio} {video_encode}" + elif type == "WEBDL": + name = f"{title} {year} {edition} {hybrid} {repack} {language} {resolution} {uhd} {service} WEB {hdr} {audio} {video_encode}" + elif type == "WEBRIP": + name = f"{title} {year} {edition} {hybrid} {repack} {language} {resolution} {uhd} {service} WEBRip {hdr} {audio} {video_encode}" + elif type == "HDTV": + name = f"{title} {year} {edition} {repack} {language} {resolution} {source} {audio} {video_encode}" + elif type == "DVDRIP": + name = f"{title} {year} {source} {video_encode} DVDRip {audio}" + + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": + if meta['is_disc'] == 'BDMV': + name = f"{title} {year} {season}{episode} {three_d} {edition} {hybrid} {repack} {language} {resolution} {uhd} {region} {source} {hdr} {audio} {video_codec}" + if meta['is_disc'] == 'DVD': + name = f"{title} {year} {season}{episode}{three_d} {repack} {edition} {region} {source} {dvd_size} {audio}" + elif meta['is_disc'] == 'HDDVD': + name = f"{title} {year} {edition} {repack} {language} {resolution} {source} {video_codec} {audio}" + elif type == "REMUX" and source in ("BluRay", "HDDVD"): + name = f"{title} {year} {season}{episode} {part} {three_d} {edition} {hybrid} {repack} {language} {resolution} {uhd} {source} REMUX {hdr} {audio} {video_codec}" + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): + name = f"{title} {year} {season}{episode} {part} {edition} {repack} {source} REMUX {audio}" + elif type == "ENCODE": + name = f"{title} {year} {season}{episode} {part} {edition} {hybrid} {repack} {language} {resolution} {uhd} {source} {hdr} {audio} {video_encode}" + elif type == "WEBDL": + name = f"{title} {year} {season}{episode} {part} {edition} {hybrid} {repack} {language} {resolution} {uhd} {service} WEB {hdr} {audio} {video_encode}" + elif type == "WEBRIP": + name = f"{title} {year} {season}{episode} {part} {edition} {hybrid} {repack} {language} {resolution} {uhd} {service} WEBRip {hdr} {audio} {video_encode}" + elif type == "HDTV": + name = f"{title} {year} {season}{episode} {part} {edition} {repack} {language} {resolution} {source} {audio} {video_encode}" + elif type == "DVDRIP": + name = f"{title} {year} {season} {source} DVDRip {audio} {video_encode}" + + try: + name = ' '.join(name.split()) + except Exception: + console.print( + "[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") + console.print(f"--category [yellow]{meta['category']}") + console.print(f"--type [yellow]{meta['type']}") + console.print(f"--source [yellow]{meta['source']}") + console.print( + "[bold green]If you specified type, try also specifying source") + raise + + name_notag = name + name = name_notag + tag + name = await fr.clean_name(name) + + if meta['debug']: + console.log("[cyan]get_name cat/type") + console.log(f"CATEGORY: {meta['category']}") + console.log(f"TYPE: {meta['type']}") + console.log("[cyan]get_name meta:") + console.print(f"source : {source}") + console.print(f"type : {type}") + console.print(f"video_codec : {video_codec}") + console.print(f"video_encode : {video_encode}") + console.print(f"NAME : {name}") + + return {'name': name} + + async def get_additional_checks(self, meta: Meta) -> bool: + # Check language requirements: must be French audio OR original audio with French subtitles + french_languages = ["french", "fre", "fra", "fr", + "français", "francais", 'fr-fr', 'fr-ca'] + # check or ignore audio req config + # self.config['TRACKERS'][self.tracker].get('check_for_rules', True): + if not await self.common.check_language_requirements( + meta, + self.tracker, + languages_to_check=french_languages, + check_audio=True, + check_subtitle=True, + require_both=False, + original_language=True, + ): + console.print( + f"[bold red]Language requirements not met for {self.tracker}.[/bold red]") + return False + + return True + + async def search_existing(self, meta: dict[str, Any], _) -> list[str]: + dupes: list[str] = [] + + # Nothing came with the name, we'll look using tmdb_id + tmdb_id = meta.get('tmdb_id','') + title, descr = await fr.get_translation_fr(meta) + params: dict[str, Any] = { + 't': 'search', + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbid': tmdb_id + } + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + response_text = response.text.encode('utf-8') + root = etree.fromstring(response_text) + channel = root[0] + for result in channel: + if result.tag == 'item': + dupe = result[0] + dupes.append(dupe.text) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + await asyncio.sleep(5) + if not dupes: + # Nothing came with tmdn id, we'll check using names just in case + title, descr = await fr.get_translation_fr(meta) + params: dict[str, Any] = { + 't': 'search', + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'q': unidecode.unidecode(title.replace(" ", ".")) + } + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url=self.search_url, params=params) + if response.status_code == 200: + response_text = response.text.encode('utf-8') + root = etree.fromstring(response_text) + channel = root[0] + for result in channel: + if result.tag == 'item': + dupe = result[0] + dupes.append(dupe.text) + else: + console.print(f"[bold red]Failed to search torrents. HTTP Status: {response.status_code}") + except httpx.TimeoutException: + console.print("[bold red]Request timed out after 5 seconds") + except httpx.RequestError as e: + console.print(f"[bold red]Unable to search for existing torrents: {e}") + except Exception as e: + console.print(f"[bold red]Unexpected error: {e}") + await asyncio.sleep(5) + return dupes + + + + async def upload(self, meta: Meta, _disctype: str) -> bool: + description = await fr.get_desc_full(meta, self.tracker) + # Tmdb infos + tmdb_info = {} + tmdb_info['id'] = meta.get("tmdb_id","") + tmdb_info['title'] = meta.get("title","") + tmdb_info['originalTitle'] = meta.get("original_title","") + tmdb_info['overview'] = meta.get("overview","") + tmdb_info['release_date'] = meta.get("release_date","") + tmdb_info['runtime'] = meta.get("runtime","") + tmdb_info['voteAverage'] = meta.get("vote_average","") + if not meta["debug"]: + await self.common.create_torrent_for_upload(meta, self.tracker, 'C411') + torrent_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}].torrent" + mediainfo_file_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt" + + headers = { + "Authorization": f"Bearer {self.config['TRACKERS'][self.tracker]['api_key'].strip()}"} + c411_name = await self.get_name(meta) + dot_name = c411_name["name"].replace(" ", ".") + response = None + + async with aiofiles.open(torrent_file_path, 'rb') as f: + torrent_bytes = await f.read() + async with aiofiles.open(mediainfo_file_path, 'rb') as f: + mediainfo_bytes = await f.read() + data = { + "title": str(dot_name), + "description": await fr.get_desc_full(meta, self.tracker), + "categoryId": "1", + "subcategoryId": str(await self.get_subcat_id(meta)), + # 1 langue , 2 qualite + "options": await self.get_option_tag(meta), + # "isExclusive": "Test Upload-Assistant", + "uploaderNote": "Upload-Assistant", + "tmdbData": str(tmdb_info), + # "rawgData": "Test Upload-Assistant", + } + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/c411_payload.json", 'w', encoding='utf-8') as f: + await f.write(json.dumps(data, indent=4)) + + # Place holder for potential improvement + # files={"torrent": ("torrent.torrent", torrent_bytes, "application/x-bittorrent"),"nfo": ("MEDIAINFO.txt", mediainfo_bytes, "text/plain"),} + files = {"torrent": torrent_bytes, "nfo": mediainfo_bytes, } + + if meta["debug"] is False: + response_data = {} + max_retries = 2 + retry_delay = 5 + timeout = 40.0 + + for attempt in range(max_retries): + try: # noqa: PERF203 + async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client: + response = await client.post( + url=self.upload_url, files=files, data=data, headers=headers + ) + response.raise_for_status() + + response_data = response.json() + + # Verify API success before proceeding + if not response_data.get("success"): + error_msg = response_data.get( + "message", "Unknown error") + meta["tracker_status"][self.tracker][ + "status_message"] = f"API error: {error_msg}" + console.print( + f"[yellow]Upload to {self.tracker} failed: {error_msg}[/yellow]") + return False + + meta["tracker_status"][self.tracker]["status_message"] = ( + await self.process_response_data(response_data) + ) + + torrent_hash = response_data["data"]["infoHash"] + meta["tracker_status"][self.tracker]["torrent_id"] = torrent_hash + await self.download_torrent(meta, torrent_hash) + return True # Success + + except httpx.HTTPStatusError as e: # noqa: PERF203 + if e.response.status_code in [403, 302]: + # Don't retry auth/permission errors + if e.response.status_code == 403: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: Forbidden (403). This may indicate that you do not have upload permission. {e.response.text}" + else: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: Redirect (302). This may indicate a problem with authentication. {e.response.text}" + return False # Auth/permission error + elif e.response.status_code in [401, 404, 422]: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: HTTP {e.response.status_code} - {e.response.text}" + return False + else: + # Retry other HTTP errors + if attempt < max_retries - 1: + console.print( + f"[yellow]{self.tracker}: HTTP {e.response.status_code} error, retrying in {retry_delay} seconds... (attempt {attempt + 1}/{max_retries})[/yellow]" + ) + await asyncio.sleep(retry_delay) + continue + else: + # Final attempt failed + if e.response.status_code == 520: + meta["tracker_status"][self.tracker][ + "status_message" + ] = "data error: Error (520). This is probably a cloudflare issue on the tracker side." + else: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: HTTP {e.response.status_code} - {e.response.text}" + return False # HTTP error after all retries + except httpx.TimeoutException: + if attempt < max_retries - 1: + timeout = timeout * 1.5 # Increase timeout by 50% for next retry + console.print( + f"[yellow]{self.tracker}: Request timed out, retrying in {retry_delay} seconds with {timeout}s timeout... (attempt {attempt + 1}/{max_retries})[/yellow]" + ) + await asyncio.sleep(retry_delay) + continue + else: + meta["tracker_status"][self.tracker][ + "status_message" + ] = "data error: Request timed out after multiple attempts" + return False # Timeout after all retries + except httpx.RequestError as e: + if attempt < max_retries - 1: + console.print( + f"[yellow]{self.tracker}: Request error, retrying in {retry_delay} seconds... (attempt {attempt + 1}/{max_retries})[/yellow]" + ) + await asyncio.sleep(retry_delay) + continue + else: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: Unable to upload. Error: {e}.\nResponse: {response_data}" + return False # Request error after all retries + except json.JSONDecodeError as e: + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"data error: Invalid JSON response from {self.tracker}. Error: {e}" + return False # JSON parsing error + else: + console.print(f"[cyan]{self.tracker} Request Data:") + console.print(data) + meta["tracker_status"][self.tracker][ + "status_message" + ] = f"Debug mode enabled, not uploading: {self.tracker}." + await self.common.create_torrent_for_upload( + meta, + f"{self.tracker}" + "_DEBUG", + f"{self.tracker}" + "_DEBUG", + announce_url="https://fake.tracker", + ) + return True # Debug mode - simulated success + + return False + + async def download_torrent(self, meta: dict[str, Any], torrent_hash: str, ) -> None: + path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DL.torrent" + params: dict[str, Any] = { + "t": "get", + "id": torrent_hash, + "apikey": self.config['TRACKERS'][self.tracker]['api_key'].strip(), + } + + # https://c411.org/api/?t=get&id={{infoHash}}&apikey={{config.API_KEY}} + try: + async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client: + r = await client.get(self.torrent_url, params=params) + + r.raise_for_status() + async with aiofiles.open(path, "wb") as f: + async for chunk in r.aiter_bytes(): + await f.write(chunk) + + return None + + except Exception as e: + console.print( + f"[yellow]Warning: Could not download torrent file: {str(e)}[/yellow]") + console.print( + "[yellow]Download manually from the tracker.[/yellow]") + return None + + async def process_response_data(self, response_data: dict[str, Any]) -> str: + """Returns the success message from the response data as a string.""" + if response_data.get("success") is True: + return str(response_data.get("message", "Upload successful")) + + # For non-success responses, format as string + error_msg = response_data.get("message", "") + if error_msg: + return f"API response: {error_msg}" + return f"API response: {response_data}" \ No newline at end of file diff --git a/src/trackers/FRENCH.py b/src/trackers/FRENCH.py new file mode 100644 index 000000000..e28c65211 --- /dev/null +++ b/src/trackers/FRENCH.py @@ -0,0 +1,747 @@ +# Upload Assistant © 2025 Audionut & wastaken7 — Licensed under UAPL v1.0 +# https://github.com/Audionut/Upload-Assistant/tree/master + +import aiofiles +import httpx +from typing import Any, Optional, cast +from data.config import config +from unidecode import unidecode +#from src.console import console + +async def build_audio_string(meta: dict[str, Any]) -> str: + + # Priority Order: + # 1. MULYi: Exactly 2 audio tracks Dual would be nice + # 2. MULTi: 3 audio tracks + # 3. VOSTFR: Single audio (original lang) + French subs + NO French audio + # 4. VO: Single audio (original lang) + NO French subs + NO French audio + + audio_tracks = await get_audio_tracks(meta, True) + if not audio_tracks: + return '' + + audio_langs = await extract_audio_languages(audio_tracks, meta) + if not audio_langs: + return '' + + language = "" + original_lang = await get_original_language(meta) + has_french_audio = 'FRA' in audio_langs + has_French_subs = await has_french_subs(meta) + num_audio_tracks = len(audio_tracks) + + # DUAL - Exactly 2 audios + if num_audio_tracks == 2 and has_french_audio: + language = "MULTi" + + # MULTI - 3+ audios + if num_audio_tracks >= 3 and has_french_audio: + language = "MULTi" + + # VOSTFR - Single audio (original) + French subs + NO French audio + if num_audio_tracks == 1 and original_lang and not has_french_audio and has_French_subs and audio_langs[0] == original_lang: + language = "VOSTFR" + + # VO - Single audio (original) + NO French subs + NO French audio + if num_audio_tracks == 1 and original_lang and not has_french_audio and not has_French_subs and audio_langs[0] == original_lang: + language = "VO" + + # FRENCH. - Single audio FRENCH + if num_audio_tracks == 1 and has_french_audio and audio_langs[0] == original_lang: + language = "FRENCH" + + return language + + +async def get_extra_french_tag(meta: dict[str, Any], check_origin: bool) -> str: + audio_track = await get_audio_tracks(meta, True) + + vfq = "" + vff = "" + vf = "" + origincountry = meta.get("origin_country", "") + + for _, item in enumerate(audio_track): + title = (item.get("Title") or "").lower() + try: + lang = item.get('Language', "").lower() + except: + lang = "" + if lang == "fr-ca" or "vfq" in title: + vfq = True + elif lang == "fr-fr" or "vff" in title: + vff = True + elif lang == "fr" or "vfi" in title: + vf = True + + if vff and vfq: + return 'VF2' + elif vfq: + if "CA" in origincountry and check_origin: + return 'VOQ' + else: + return 'VFQ' + elif vff: + if "FR" in origincountry and check_origin: + return 'VOF' + else: + return 'VFF' + elif vf: + if "FR" in origincountry and check_origin: + return 'VOF' + else: + return 'VFI' + else: + return "" + + +async def get_audio_tracks(meta: dict[str, Any], filter: bool) -> list[dict[str, Any]]: + + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return [] + + media_info = meta['mediainfo'] + if not isinstance(media_info, dict): + return [] + media_info_dict = cast(dict[str, Any], media_info) + media = media_info_dict.get('media') + if not isinstance(media, dict): + return [] + + media_dict = cast(dict[str, Any], media) + tracks = media_dict.get('track', []) + if not isinstance(tracks, list): + return [] + + audio_tracks: list[dict[str, Any]] = [] + tracks_list = cast(list[Any], tracks) + for track in tracks_list: + if isinstance(track, dict): + track_dict = cast(dict[str, Any], track) + if track_dict.get('@type') == 'Audio': + if filter: + # or not "audio description" in str(track_dict.get('Title') or '').lower() #audio description, AD, description + if "commentary" not in str(track_dict.get('Title') or '').lower(): + audio_tracks.append(track_dict) + else: + audio_tracks.append(track_dict) + + return audio_tracks + + +async def get_subtitle_tracks(meta: dict[str, Any]) -> list[dict[str, Any]]: + + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return [] + + media_info = meta['mediainfo'] + if not isinstance(media_info, dict): + return [] + media_info_dict = cast(dict[str, Any], media_info) + media = media_info_dict.get('media') + if not isinstance(media, dict): + return [] + + media_dict = cast(dict[str, Any], media) + tracks = media_dict.get('track', []) + if not isinstance(tracks, list): + return [] + + audio_tracks: list[dict[str, Any]] = [] + tracks_list = cast(list[Any], tracks) + for track in tracks_list: + if isinstance(track, dict): + track_dict = cast(dict[str, Any], track) + if track_dict.get('@type') == 'Text': + audio_tracks.append(track_dict) + + return audio_tracks + + +async def get_video_tracks(meta: dict[str, Any]) -> list[dict[str, Any]]: + + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return [] + + media_info = meta['mediainfo'] + if not isinstance(media_info, dict): + return [] + media_info_dict = cast(dict[str, Any], media_info) + media = media_info_dict.get('media') + if not isinstance(media, dict): + return [] + + media_dict = cast(dict[str, Any], media) + tracks = media_dict.get('track', []) + if not isinstance(tracks, list): + return [] + + audio_tracks: list[dict[str, Any]] = [] + tracks_list = cast(list[Any], tracks) + for track in tracks_list: + if isinstance(track, dict): + track_dict = cast(dict[str, Any], track) + if track_dict.get('@type') == 'Video': + audio_tracks.append(track_dict) + + return audio_tracks + + +async def extract_audio_languages(audio_tracks: list[dict[str, Any]], meta: dict[str, Any]) -> list[str]: + + audio_langs: list[str] = [] + + for track in audio_tracks: + lang = track.get('Language', '') + if lang: + lang_code = await map_language(str(lang)) + if lang_code and lang_code not in audio_langs: + audio_langs.append(lang_code) + + if not audio_langs and meta.get('audio_languages'): + audio_languages = meta.get('audio_languages') + audio_languages_list: list[Any] = cast( + list[Any], audio_languages) if isinstance(audio_languages, list) else [] + for lang in audio_languages_list: + lang_code = await map_language(str(lang)) + if lang_code and lang_code not in audio_langs: + audio_langs.append(lang_code) + + return audio_langs + + +async def map_language(lang: str) -> str: + if not lang: + return '' + + lang_map = { + 'spa': 'ESP', 'es': 'ESP', 'spanish': 'ESP', 'español': 'ESP', 'castellano': 'ESP', 'es-es': 'ESP', + 'eng': 'ENG', 'en': 'ENG', 'english': 'ENG', 'en-us': 'ENG', 'en-gb': 'ENG', + 'lat': 'LAT', 'latino': 'LAT', 'latin american spanish': 'LAT', 'es-mx': 'LAT', 'es-419': 'LAT', + 'fre': 'FRA', 'fra': 'FRA', 'fr': 'FRA', 'french': 'FRA', 'français': 'FRA', 'fr-fr': 'FRA', 'fr-ca': 'FRA', + 'ger': 'ALE', 'deu': 'ALE', 'de': 'ALE', 'german': 'ALE', 'deutsch': 'ALE', + 'jpn': 'JAP', 'ja': 'JAP', 'japanese': 'JAP', '日本語': 'JAP', + 'kor': 'COR', 'ko': 'COR', 'korean': 'COR', '한국어': 'COR', + 'ita': 'ITA', 'it': 'ITA', 'italian': 'ITA', 'italiano': 'ITA', + 'por': 'POR', 'pt': 'POR', 'portuguese': 'POR', 'português': 'POR', 'pt-br': 'POR', 'pt-pt': 'POR', + 'chi': 'CHI', 'zho': 'CHI', 'zh': 'CHI', 'chinese': 'CHI', 'mandarin': 'CHI', '中文': 'CHI', 'zh-cn': 'CHI', + 'rus': 'RUS', 'ru': 'RUS', 'russian': 'RUS', 'русский': 'RUS', + 'ara': 'ARA', 'ar': 'ARA', 'arabic': 'ARA', + 'hin': 'HIN', 'hi': 'HIN', 'hindi': 'HIN', + 'tha': 'THA', 'th': 'THA', 'thai': 'THA', + 'vie': 'VIE', 'vi': 'VIE', 'vietnamese': 'VIE', + } + + lang_lower = str(lang).lower().strip() + mapped = lang_map.get(lang_lower) + + if mapped: + return mapped + + return lang.upper()[:3] if len(lang) >= 3 else lang.upper() + + +async def get_original_language(meta: dict[str, Any]) -> Optional[str]: + + original_lang = None + + if meta.get('original_language'): + original_lang = str(meta['original_language']) + + if not original_lang: + imdb_info_raw = meta.get('imdb_info') + imdb_info: dict[str, Any] = cast( + dict[str, Any], imdb_info_raw) if isinstance(imdb_info_raw, dict) else {} + imdb_lang: Any = imdb_info.get('language') + + if isinstance(imdb_lang, list): + imdb_lang_list = cast(list[Any], imdb_lang) + imdb_lang = imdb_lang_list[0] if imdb_lang_list else '' + + if imdb_lang: + if isinstance(imdb_lang, dict): + imdb_lang_dict = cast(dict[str, Any], imdb_lang) + imdb_lang_text = imdb_lang_dict.get('text', '') + original_lang = str(imdb_lang_text).strip() + elif isinstance(imdb_lang, str): + original_lang = imdb_lang.strip() + else: + original_lang = str(imdb_lang).strip() + + if original_lang: + return await map_language(str(original_lang)) + + return None + + +async def has_french_subs(meta: dict[str, Any]) -> bool: + + if 'mediainfo' not in meta or 'media' not in meta['mediainfo']: + return False + media_info = meta['mediainfo'] + if not isinstance(media_info, dict): + return False + media_info_dict = cast(dict[str, Any], media_info) + media = media_info_dict.get('media') + if not isinstance(media, dict): + return False + media_dict = cast(dict[str, Any], media) + tracks = media_dict.get('track', []) + if not isinstance(tracks, list): + return False + + tracks_list = cast(list[Any], tracks) + for track in tracks_list: + if not isinstance(track, dict): + continue + track_dict = cast(dict[str, Any], track) + if track_dict.get('@type') == 'Text': + lang = track_dict.get('Language', '') + lang = lang.lower() if isinstance(lang, str) else '' + + title = track_dict.get('Title', '') + title = title.lower() if isinstance(title, str) else '' + + if lang in ["french", "fre", "fra", "fr", "français", "francais", 'fr-fr', 'fr-ca']: + return True + if 'french' in title or 'français' in title or 'francais' in title: + return True + + return False + + +async def map_audio_codec(audio_track: dict[str, Any]) -> str: + codec = str(audio_track.get('Format', '')).upper() + + if 'atmos' in str(audio_track.get('Format_AdditionalFeatures', '')).lower(): + return 'Atmos' + + codec_map = { + 'AAC LC': 'AAC LC', 'AAC': 'AAC', 'AC-3': 'AC3', 'AC3': 'AC3', + 'E-AC-3': 'EAC3', 'EAC3': 'EAC3', 'DTS': 'DTS', + 'DTS-HD MA': 'DTS-HD MA', 'DTS-HD HRA': 'DTS-HD HRA', + 'TRUEHD': 'TrueHD', 'MLP FBA': 'MLP', 'PCM': 'PCM', + 'FLAC': 'FLAC', 'OPUS': 'OPUS', 'MP3': 'MP3', + } + + return codec_map.get(codec, codec) + + +async def get_audio_channels(audio_track: dict[str, Any]) -> str: + channels = audio_track.get('Channels', '') + channel_map = { + '1': 'Mono', '2': '2.0', '3': '3.0', + '4': '3.1', '5': '5.0', '6': '5.1', '8': '7.1', + } + return channel_map.get(str(channels), '0') + + +async def get_audio_name(meta: dict[str, Any]) -> str: + audio_track = await get_audio_tracks(meta, True) + if not audio_track: + return "" + has_french_audio = any(item.get('Language', '') in ( + 'fr', 'fr-fr', 'fr-ca')for item in audio_track) + audio_parts: list[str] = [] + if has_french_audio: + for _, item in enumerate(audio_track): + if item['Language'] == "fr" or item['Language'] == "fr-fr" or item['Language'] == "fr-ca": + codec = await map_audio_codec(item) + channels = await get_audio_channels(item) + audio_parts.append(f"{codec} {channels}") + audio = ' '.join(audio_parts) + return audio + else: + for _, item in enumerate(audio_track): + if item.get('Default') == "Yes": + codec = await map_audio_codec(item) + channels = await get_audio_channels(item) + audio_parts.append(f"{codec} {channels}") + audio = ' '.join(audio_parts) + return audio + return "" + + +async def translate_genre(text: str) -> str: + mapping = { + 'Action': 'Action', + 'Adventure': 'Aventure', + 'Fantasy': 'Fantastique', + 'History': 'Histoire', + 'Horror': 'Horreur', + 'Music': 'Musique', + 'Romance': 'Romance', + 'Science Fiction': 'Science-fiction', + 'TV Movie': 'Téléfilm', + 'Thriller': 'Thriller', + 'War': 'Guerre', + 'Action & Adventure': 'Action & aventure', + 'Animation': 'Animation', + 'Comedy': 'Comédie', + 'Crime': 'Policier', + 'Documentary': 'Documentaire', + 'Drama': 'Drame', + 'Family': 'Famille', + 'Kids': 'Enfants', + 'Mystery': 'Mystère', + 'News': 'Actualités', + 'Reality': 'Réalité', + 'Sci-Fi & Fantasy': 'Science-fiction & fantastique', + 'Soap': 'Feuilletons', + 'Sport': 'Sport', + 'Talk': 'Débats', + 'War & Politics': 'Guerre & politique', + 'Western': 'Western' + } + result = [] + + for word in map(str.strip, text.split(",")): + if word in mapping: + result.append(mapping[word]) + else: + result.append(f"*{word}*") + + return ", ".join(result) + + +async def clean_name(input_str: str) -> str: + ascii_str = unidecode(input_str) + invalid_char = set('<>"/\\|?*') # ! . , : ; @ # $ % ^ & */ \" '_ + result = [] + for char in ascii_str: + if char in invalid_char: + continue + result.append(char) + + return "".join(result) + + +async def get_translation_fr(meta: dict[str, Any]) -> tuple[str, str]: + + fr_title = meta.get("frtitle") + fr_overwiew = meta.get("froverview") + if fr_title and fr_overwiew: + return fr_title, fr_overwiew + + # Try to get from IMDb with priority: country match, then language match + imdb_info_raw = meta.get('imdb_info') + imdb_info: dict[str, Any] = cast( + dict[str, Any], imdb_info_raw) if isinstance(imdb_info_raw, dict) else {} + akas_raw = imdb_info.get('akas', []) + akas: list[Any] = cast(list[Any], akas_raw) if isinstance( + akas_raw, list) else [] + french_title = None + country_match = None + language_match = None + + for aka in akas: + if isinstance(aka, dict): + aka_dict = cast(dict[str, Any], aka) + if aka_dict.get("country") in ["France", "FR"]: + country_match = aka_dict.get("title") + break # Country match takes priority + elif aka_dict.get("language") in ["France", "French", "FR"] and not language_match: + language_match = aka_dict.get("title") + + french_title = country_match or language_match + + tmdb_id = int(meta["tmdb_id"]) + category = str(meta["category"]) + tmdb_title, tmdb_overview = await get_tmdb_translations(tmdb_id, category, "fr") + # fallback in case the translated title is empty + if tmdb_title == '': + tmdb_title = meta.get("title","") + + meta["frtitle"] = french_title or tmdb_title + meta["froverview"] = tmdb_overview + + if french_title is not None: + return french_title, tmdb_overview + else: + return tmdb_title, tmdb_overview + + +async def get_tmdb_translations(tmdb_id: int, category: str, target_language: str) -> tuple[str, str]: + + endpoint = "movie" if category == "MOVIE" else "tv" + url = f"https://api.themoviedb.org/3/{endpoint}/{tmdb_id}/translations" + tmdb_api_key = config['DEFAULT'].get('tmdb_api', False) + async with httpx.AsyncClient(timeout=30) as client: + try: + response = await client.get(url, params={"api_key": tmdb_api_key}) + response.raise_for_status() + data = response.json() + + # Look for target language translation + for translation in data.get('translations', []): + if translation.get('iso_639_1') == target_language: + translated_data = translation.get('data', {}) + translated_desc = translated_data.get('overview') + translated_title = translated_data.get( + 'title') or translated_data.get('name') + + return translated_title or "", translated_desc or "" + return "", "" + + except Exception: + return "", "" + +# unknow return type + + +async def get_desc_full(meta: dict[str, Any], tracker) -> str: + """Return a full tracker description. + + The function used to build the description piece by piece, but now we prefer + to render a Jinja2 template. A few points: + + * If ``meta['description_template']`` is set it will be used first. + * Otherwise a default template named after the tracker (e.g. ``C411``) is + looked up under ``data/templates``. A generic ``FRENCH`` template is also + provided for shared structure. + * If no template is found we fall back to the original hard‑coded logic so + existing behaviour remains unchanged. + """ + import os + from jinja2 import Template + + # gather information that will be useful to both the template and the + # legacy builder + video_track = await get_video_tracks(meta) + if not video_track: + return '' + + mbps = 0.0 + if video_track and video_track[0].get('BitRate'): + try: + mbps = int(video_track[0]['BitRate']) / 1_000_000 + except (ValueError, TypeError): + pass + + title, description = await get_translation_fr(meta) + genre = await translate_genre(meta['combined_genres']) + audio_tracks = await get_audio_tracks(meta, False) + if not audio_tracks: + return '' + + subtitle_tracks = await get_subtitle_tracks(meta) + size_bytes = int(meta.get('source_size') or 0) + size_gib = size_bytes / (1024 ** 3) + poster = str(meta.get('poster', "")) + year = str(meta.get('year', "")) + original_title = str(meta.get('original_title', "")) + pays = str(meta.get('imdb_info', {}).get('country', '')) + release_date = str(meta.get('release_date', "")) + video_duration = str(meta.get('video_duration', "")) + source = str(meta.get('source', "")) + type = str(meta.get('type', "")) + resolution = str(meta.get('resolution', "")) + container = str(meta.get('container', "")) + video_codec = str(meta.get('video_codec', "")) + hdr = str(meta.get('hdr', "")) + if "DV" in hdr: + if video_track and video_track[0].get('HDR_Format_Profile'): + try: + dv = str(video_track[0]['HDR_Format_Profile']).replace('dvhe.0', '').replace('/', '').strip() + hdr = hdr.replace('DV', '') + hdr = f"{hdr} DV{dv}" + except (ValueError, TypeError): + pass + + tag = str(meta.get('tag', "")).replace('-', '') + service_longname = str(meta.get('service_longname', "")) + season = str(meta.get('season_int', '')) + episode = str(meta.get('episode_int', '')) + if episode == '0': + episode = '' + + # pre‑compute the lines that were previously appended to ``desc_parts`` + audio_lines: list[str] = [] + audio_lines_dict = [] + for obj in audio_tracks: + if isinstance(obj, dict): + bitrate = obj.get('BitRate') + kbps = int(bitrate) / 1_000 if bitrate else 0 + + flags: list[str] = [] + if obj.get("Forced") == "Yes": + flags.append("Forced") + if obj.get("Default") == "Yes": + flags.append("Default") + if "commentary" in str(obj.get('Title')).lower(): + flags.append("Commentary") + if " ad" in str(obj.get('Title')).lower(): + flags.append("Audio Description") + line_dict = {} + line_dict['language'] = obj['Language'] + line_dict['format'] = obj['Format'] + line_dict['channels'] = obj['Channels'] + line_dict['bitrate'] = kbps + + + line = f"{obj['Language']} / {obj['Format']} / {obj['Channels']}ch / {kbps:.2f}KB/s" + if flags: + line += " / " + " / ".join(flags) + audio_lines.append(line) + audio_lines_dict.append(line_dict) + else: + audio_lines.append(f"*{obj}*") + + subtitle_lines: list[str] = [] + subtitle_lines_dict = [] + if subtitle_tracks: + for obj in subtitle_tracks: + if isinstance(obj, dict): + flags: list[str] = [] + if obj.get("Forced") == "Yes": + flags.append("Forced") + if obj.get("Default") == "Yes": + flags.append("Default") + line = f"{obj['Language']} / {obj['Format']}" + if flags: + line += " / " + " / ".join(flags) + line_dict = {} + line_dict['language'] = obj['Language'] + line_dict['format'] = obj['Format'] + line_dict['type'] = ", ".join(flags) + subtitle_lines.append(line) + subtitle_lines_dict.append(line_dict) + + else: + subtitle_lines.append(f"*{obj}*") + + images = meta[f'{tracker}_images_key'] if f'{tracker}_images_key' in meta else meta['image_list'] + + context = { + 'poster': poster, + 'title': title, + 'year': year, + 'season': season, + 'episode': episode, + 'original_title': original_title, + 'pays': pays, + 'genre': genre, + 'release_date': release_date, + 'video_duration': video_duration, + 'imdb_url': meta.get('imdb_info', {}).get('imdb_url', ''), + 'tmdb': meta.get('tmdb', ''), + 'category': meta.get('category', ''), + 'tvdb_id': meta.get('tvdb_id', ''), + 'tvmaze_id': meta.get('tvmaze_id', ''), + 'mal_id': meta.get('mal_id', ''), + 'description': description, + 'audio_lines': audio_lines, + 'audio_lines_dict': audio_lines_dict, + 'subtitle_lines': subtitle_lines, + 'subtitle_lines_dict': subtitle_lines_dict, + 'source': source, + 'service_longname': service_longname, + 'type': type, + 'resolution': resolution, + 'container': container, + 'video_codec': video_codec, + 'hdr': hdr, + 'mbps': mbps, + 'tag': tag, + 'size_gib': size_gib, + 'images': images, + 'signature': meta.get('ua_signature', ''), + } + + # try to render a template if one exists + # determine which template to use; prefer explicit setting, then + # tracker-specific file, then fall back to a generic "FRENCH" template. + description_text = '' + primary = meta.get('description_template') or tracker + template_path = os.path.abspath(f"{meta['base_dir']}/data/templates/{primary}.txt") + + if not os.path.exists(template_path): + # try the shared french template + template_path = os.path.abspath(f"{meta['base_dir']}/data/templates/FRENCH.txt") + + if os.path.exists(template_path): + + async with aiofiles.open(template_path, 'r', encoding='utf-8') as description_file: + template_content = await description_file.read() + try: + description_text = Template(template_content).render(**context) + except Exception: + # if rendering fails fall back to the old builder below + description_text = '' + + if not description_text: + # fallback to the original behaviour (preserve before change) + desc_parts: list[str] = [] + desc_parts.append(f"[img]{poster}[/img]") + desc_parts.append( + f"[b][font=Verdana][color=#3d85c6][size=29]{title}[/size][/font]") + desc_parts.append(f"[size=18]{year}[/size][/color][/b]") + + if meta['category'] == "TV": + season = f"S{season}" if season else "" + episode = f"E{episode}" if episode else "" + desc_parts.append(f"[b][size=18]{season}{episode}[/size][/b]") + + desc_parts.append( + f"[font=Verdana][size=13][b][color=#3d85c6]Titre original :[/color][/b] [i]{original_title}[/i][/size][/font]") + desc_parts.append( + f"[b][color=#3d85c6]Pays :[/color][/b] [i]{pays}[/i]") + desc_parts.append(f"[b][color=#3d85c6]Genres :[/color][/b] [i]{genre}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Date de sortie :[/color][/b] [i]{release_date}[/i]") + + if meta['category'] == 'MOVIE': + desc_parts.append( + f"[b][color=#3d85c6]Durée :[/color][/b] [i]{video_duration} Minutes[/i]") + + if meta['imdb_id']: + desc_parts.append(f"[url={meta.get('imdb_info', {}).get('imdb_url', '')}]IMDb[/url]") + if meta['tmdb']: + desc_parts.append( + f"[url=https://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}]TMDB[/url]") + if meta['tvdb_id']: + desc_parts.append( + f"[url=https://www.thetvdb.com/?id={str(meta['tvdb_id'])}&tab=series]TVDB[/url]") + if meta['tvmaze_id']: + desc_parts.append( + f"[url=https://www.tvmaze.com/shows/{str(meta['tvmaze_id'])}]TVmaze[/url]") + if meta['mal_id']: + desc_parts.append( + f"[url=https://myanimelist.net/anime/{str(meta['mal_id'])}]MyAnimeList[/url]") + + desc_parts.append("[img]https://i.imgur.com/W3pvv6q.png[/img]") + desc_parts.append(f"{description}") + desc_parts.append("[img]https://i.imgur.com/KMZsqZn.png[/img]") + desc_parts.append( + f"[b][color=#3d85c6]Source :[/color][/b] [i]{source} {service_longname}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Type :[/color][/b] [i]{type}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Résolution vidéo :[/color][/b][i]{resolution}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Format vidéo :[/color][/b] [i]{container}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Codec vidéo :[/color][/b] [i]{video_codec} {hdr}[/i]") + desc_parts.append( + f"[b][color=#3d85c6]Débit vidéo :[/color][/b] [i]{mbps:.2f} MB/s[/i]") + desc_parts.append("[b][color=#3d85c6] Audio(s) :[/color][/b]") + desc_parts.extend(audio_lines) + if subtitle_lines: + desc_parts.append("[b][color=#3d85c6]Sous-titres :[/color][/b]") + desc_parts.extend(subtitle_lines) + desc_parts.append(f"[b][color=#3d85c6]Team :[/color][/b] [i]{tag}[/i]") + desc_parts.append(f"[b][color=#3d85c6] Taille totale :[/color][/b] {size_gib:.2f} GB") + if images: + screenshots_block = '' + for image in images: + screenshots_block += f"[img]{image['raw_url']}[/img]\n" + desc_parts.append(screenshots_block) + desc_parts.append( + f"[url=https://github.com/Audionut/Upload-Assistant]{meta['ua_signature']}[/url]") + description_text = '\n'.join(part for part in desc_parts if part.strip()) + + # persist to disk for debugging/inspection + async with aiofiles.open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.json", 'w', encoding='utf-8') as description_file: + await description_file.write(description_text) + + return description_text diff --git a/src/trackersetup.py b/src/trackersetup.py index bf9532c1f..b2e49ccfe 100644 --- a/src/trackersetup.py +++ b/src/trackersetup.py @@ -26,6 +26,7 @@ from src.trackers.BJS import BJS from src.trackers.BLU import BLU from src.trackers.BT import BT +from src.trackers.C411 import C411 from src.trackers.CBR import CBR from src.trackers.COMMON import COMMON from src.trackers.CZ import CZ @@ -1337,7 +1338,7 @@ async def make_trumpable_report(self, meta: Meta, tracker: str) -> bool: tracker_class_map: dict[str, type[Any]] = { - 'A4K': A4K, 'ACM': ACM, 'AITHER': AITHER, 'ANT': ANT, 'AR': AR, 'ASC': ASC, 'AZ': AZ, 'BHD': BHD, 'BHDTV': BHDTV, 'BJS': BJS, 'BLU': BLU, 'BT': BT, 'CBR': CBR, +'A4K': A4K, 'ACM': ACM, 'AITHER': AITHER, 'ANT': ANT, 'AR': AR, 'ASC': ASC, 'AZ': AZ, 'BHD': BHD, 'BHDTV': BHDTV, 'BJS': BJS, 'BLU': BLU, 'BT': BT, 'C411': C411, 'CBR': CBR, 'CZ': CZ, 'DC': DC, 'DP': DP, 'DT': DT, 'EMUW': EMUW, 'FNP': FNP, 'FF': FF, 'FL': FL, 'FRIKI': FRIKI, 'GPW': GPW, 'HDB': HDB, 'HDS': HDS, 'HDT': HDT, 'HHD': HHD, 'HUNO': HUNO, 'ITT': ITT, 'IHD': IHD, 'IS': IS, 'LCD': LCD, 'LDU': LDU, 'LST': LST, 'LT': LT, 'LUME': LUME, 'MTV': MTV, 'NBL': NBL, 'OE': OE, 'OTW': OTW, 'PHD': PHD, 'PT': PT, 'PTP': PTP, 'PTER': PTER, 'PTS': PTS, 'PTT': PTT, 'R4E': R4E, 'RAS': RAS, 'RF': RF, 'RTF': RTF, 'SAM': SAM, 'SHRI': SHRI, 'SN': SN, 'SP': SP, 'SPD': SPD, 'STC': STC, 'THR': THR, @@ -1350,7 +1351,7 @@ async def make_trumpable_report(self, meta: Meta, tracker: str) -> bool: } other_api_trackers = { - 'ANT', 'BHDTV', 'DC', 'GPW', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' + 'ANT', 'BHDTV', 'C411', 'DC', 'GPW', 'NBL', 'RTF', 'SN', 'SPD', 'TL', 'TVC' } http_trackers = { diff --git a/src/video.py b/src/video.py index fa6c1c905..0cf2e5eeb 100644 --- a/src/video.py +++ b/src/video.py @@ -153,7 +153,9 @@ async def get_video(self, videoloc: str, mode: str, sorted_filelist: bool = Fals if debug: console.print("[blue]Scanning directory for video files...[/blue]") try: - entries = [e for e in os.listdir(videoloc) if os.path.isfile(os.path.join(videoloc, e))] + # entries = [e for e in os.listdir(videoloc) if os.path.isfile(os.path.join(videoloc, e))] + entries = [os.path.join(dp, f) for dp, dn, filenames in os.walk(videoloc) for f in filenames ] + #entries = list (os.walk(videoloc)) except Exception: entries = []