diff --git a/.github/workflows/ci-core-unit.yml b/.github/workflows/ci-core-unit.yml index 869fd93..90de09f 100644 --- a/.github/workflows/ci-core-unit.yml +++ b/.github/workflows/ci-core-unit.yml @@ -5,6 +5,8 @@ on: pull_request: paths: - "core/**" + - "main.py" + - "locales.json" - "tests/test_inspector.py" - "tests/test_runner.py" - "tests/test_merge_dovi_workflow.py" @@ -12,6 +14,7 @@ on: - "tests/test_media_info_fetcher.py" - "tests/test_encode_models.py" - "tests/test_encode_progress.py" + - "tests/test_encode_hw_devices.py" - "tests/test_encode_hardware.py" - "tests/test_setup_and_config.py" - "tests/test_subprocess_utils.py" @@ -24,6 +27,8 @@ on: - master paths: - "core/**" + - "main.py" + - "locales.json" - "tests/test_inspector.py" - "tests/test_runner.py" - "tests/test_merge_dovi_workflow.py" @@ -31,6 +36,7 @@ on: - "tests/test_media_info_fetcher.py" - "tests/test_encode_models.py" - "tests/test_encode_progress.py" + - "tests/test_encode_hw_devices.py" - "tests/test_encode_hardware.py" - "tests/test_setup_and_config.py" - "tests/test_subprocess_utils.py" @@ -78,6 +84,7 @@ jobs: tests/test_media_info_fetcher.py \ tests/test_encode_models.py \ tests/test_encode_progress.py \ + tests/test_encode_hw_devices.py \ tests/test_encode_hardware.py \ tests/test_setup_and_config.py \ tests/test_subprocess_utils.py \ diff --git a/.github/workflows/ci-encode-integration.yml b/.github/workflows/ci-encode-integration.yml index d8a8866..9007770 100644 --- a/.github/workflows/ci-encode-integration.yml +++ b/.github/workflows/ci-encode-integration.yml @@ -8,6 +8,8 @@ on: pull_request: paths: - "core/workflows/encode/**" + - "tests/test_encode_hw_devices.py" + - "tests/test_encode_hardware.py" - "tests/test_encode_workflow.py" - "tests/conftest.py" - ".github/workflows/ci-encode-integration.yml" @@ -17,6 +19,8 @@ on: - master paths: - "core/workflows/encode/**" + - "tests/test_encode_hw_devices.py" + - "tests/test_encode_hardware.py" - "tests/test_encode_workflow.py" - "tests/conftest.py" - ".github/workflows/ci-encode-integration.yml" @@ -49,4 +53,8 @@ jobs: - name: Run Encode Integration Tests run: | - pytest tests/test_encode_workflow.py -v + pytest \ + tests/test_encode_hw_devices.py \ + tests/test_encode_hardware.py \ + tests/test_encode_workflow.py \ + -v diff --git a/.github/workflows/ci-qt-ui.yml b/.github/workflows/ci-qt-ui.yml index ceac80d..005e3ea 100644 --- a/.github/workflows/ci-qt-ui.yml +++ b/.github/workflows/ci-qt-ui.yml @@ -8,6 +8,8 @@ on: pull_request: paths: - "ui/**" + - "main.py" + - "locales.json" - "core/i18n.py" - "core/config.py" - "core/runner.py" @@ -32,6 +34,8 @@ on: - master paths: - "ui/**" + - "main.py" + - "locales.json" - "core/i18n.py" - "core/config.py" - "core/runner.py" diff --git a/.github/workflows/windows-encode-tests.yml b/.github/workflows/windows-encode-tests.yml new file mode 100644 index 0000000..c2989ea --- /dev/null +++ b/.github/workflows/windows-encode-tests.yml @@ -0,0 +1,59 @@ +name: Windows Encode Tests + +on: + workflow_dispatch: + pull_request: + paths: + - "core/workflows/encode/**" + - "tests/test_encode_hw_devices.py" + - "tests/test_encode_hardware.py" + - "tests/test_encode_workflow.py" + - "tests/conftest.py" + - "scripts/windows_pytest_bootstrap.py" + - ".github/workflows/windows-encode-tests.yml" + push: + branches: + - main + - master + paths: + - "core/workflows/encode/**" + - "tests/test_encode_hw_devices.py" + - "tests/test_encode_hardware.py" + - "tests/test_encode_workflow.py" + - "tests/conftest.py" + - "scripts/windows_pytest_bootstrap.py" + - ".github/workflows/windows-encode-tests.yml" + +jobs: + encode-targeted: + runs-on: windows-latest + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install Test Dependencies + shell: pwsh + run: | + python -m pip install --upgrade pip + python -m pip install pytest + winget install --id Gyan.FFmpeg -e --silent --accept-source-agreements --accept-package-agreements --disable-interactivity + $ffPath = (Get-ChildItem "$env:LOCALAPPDATA\Microsoft\WinGet\Packages\Gyan.FFmpeg*" -Directory | Select-Object -First 1).FullName + $binDir = (Get-ChildItem -Path $ffPath -Recurse -Filter "ffmpeg.exe" | Select-Object -First 1).DirectoryName + echo "$binDir" | Out-File -FilePath $env:GITHUB_PATH -Append -Encoding utf8 + & "$binDir\ffmpeg.exe" -version + & "$binDir\ffprobe.exe" -version + + - name: Run Targeted Encode Tests + shell: pwsh + run: | + python scripts/windows_pytest_bootstrap.py ` + tests/test_encode_hw_devices.py ` + tests/test_encode_hardware.py ` + tests/test_encode_workflow.py diff --git a/README.md b/README.md index 4b03fab..485338c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ FULL Vibecoded App for Proof of Concept - no human code, only human prompts and Interface graphique pour préparer des fichiers vidéo, remuxer sans perte, réencoder avec `ffmpeg`, et fusionner des métadonnées Dolby Vision / HDR10+. -Cette documentation correspond à **Mediarecode v1.4.1**. +Cette documentation correspond à **Mediarecode v2.0.0**. ## Sommaire @@ -34,7 +34,7 @@ Cette documentation correspond à **Mediarecode v1.4.1**. - [Backend remux `ffmpeg` — Branches internes](#backend-remux-ffmpeg--branches-internes) - [Encode workflow — Branches internes](#encode-workflow--branches-internes) - [Fusion DoVi / HDR10+](#fusion-dovi--hdr10-1) - - [Détection HDR d'un fichier](#détection-hdr-dun-fichier) + - [Inspection d'un fichier (ffprobe + mediainfo)](#inspection-dun-fichier-ffprobe--mediainfo) - [Outils externes](#outils-externes) ## Vue rapide @@ -199,6 +199,7 @@ Le tableau de bord affiche : - les dossiers configurés (travail, sortie, app data) - les encodeurs logiciels vus par `ffmpeg -encoders` - les encodeurs matériels réellement testés au runtime (`NVENC`, `AMF`, `VAAPI`, `QSV`) +- les plans d'encodage et badges utiles pour visualiser plus clairement les traitements prepares > Les encodeurs matériels ne sont pas marqués disponibles simplement parce qu'ils apparaissent dans `ffmpeg`. L'application lance un probe réel pour confirmer qu'ils fonctionnent. Les probes sont exécutés en parallèle pour minimiser le délai au démarrage. @@ -221,10 +222,12 @@ Le workflow unifié permet de : - préparer une cover TMDB en mode différé (URL + nom de fichier) ; le téléchargement réel est fait au lancement du workflow - remplacer automatiquement le **titre du conteneur** par le titre formaté TMDB lors de la validation (film : `Titre (Année)`, série : `Titre - SxxExx - Titre épisode`) - choisir pour chaque piste audio un mode `copy`, `aac`, `eac3` ou `flac` +- definir plusieurs traitements video lorsqu'un projet demande une preparation multi-pistes plus fine - choisir pour la vidéo `copy`, `libx265`, `libx264`, `libsvtav1`, `NVENC`, `AMF`, `VAAPI` ou `QSV` — avec support complet **HEVC**, **H.264** et **AV1** sur chaque famille matérielle - presets dédiés par famille matérielle : `NVENC_PRESETS` (p1-p7 + slow/medium/fast/hp/hq), `VAAPI_PRESETS` (compression_level 0-7), `QSV_PRESETS` (veryslow → veryfast), `AMF_PRESETS` (quality/balanced/speed) - **offload matériel complet** : décodage GPU activé automatiquement quand un encodeur matériel compatible est sélectionné (`cuda` pour NVENC, `qsv` pour QSV, `vaapi` pour VAAPI, `d3d11va` pour AMF Windows) — le CPU n'est plus sollicité pour le décodage en chemin pur hardware - configuration VAAPI optimisée : `rc_mode CQP/VBR` selon le mode qualité, `compression_level` exposé via preset, `async_depth 4` pour maximiser le pipeline GPU +- precheck `force-8bit` pour les cibles **H.264** afin d'eviter certains chemins incompatibles - backend de remux nominal : `ffmpeg` Modes d'exécution : @@ -266,6 +269,7 @@ Ergonomie du panneau : - aperçu **cover** cliquable avec modale zoom plein écran (cover TMDB, cover Matroska extraite via `core/matroska_attachment_extractor.py`, pièces jointes image) - **barre de progression à rattrapage exact** : tant que `ffmpeg -progress` n'émet pas encore `out_time`, la progression est estimée via `frame=` et le nombre total d'images du fichier source ; dès que `out_time` devient disponible, la valeur exacte prend le relais +- progression "la plus lente" pour les preparations video multi-pistes, avec suivi plus detaille par traitement disponible - **suppression de source accélérée** via suivi incrémental (pas de rescan global à chaque retrait) - covers TMDB cliquables dans les résultats de recherche (aperçu grand format avant validation) @@ -301,6 +305,7 @@ Le panneau **Paramètres** est un éditeur complet de `config.ini` intégré à - **Remux** : backend `ffmpeg` (nominal) - **Outils externes** : chemins explicites pour chaque outil (`ffmpeg`, `ffprobe`, `mediainfo`, `dovi_tool`, `hdr10plus_tool`, etc.) - **Encodage** : profil DoVi, compat-id, buffer RAM +- **Logs** : niveau de verbosite, journal fichier, rotation et capture des sorties outils dans les options - **Métadonnées** : auth TMDB via clé API v3 (`tmdb_api_key`) ou token Bearer v4 (`tmdb_bearer_token`), génération optionnelle de `.nfo` (`generate_nfo`) Les changements sont appliqués section par section ou en une seule fois via le bouton **Sauvegarder toute la configuration**. Un rechargement depuis `config.ini` est possible sans redémarrer l'application. @@ -415,7 +420,7 @@ flowchart TD A["Sources MKV, MP4 ou SRT"] --> B["Inspection via ffprobe"] B --> C["Edition conteneur dans RemuxPanel"] C --> D["Options video, audio et HDR dans EncodePanel"] - D --> E{"Video copy\nAudio copy\nAucune transformation HDR"} + D --> E{"Video copy
Audio copy
Aucune transformation HDR"} E -->|Oui| R0["WORKFLOW TYPE - REMUX"] R0 --> R1["Workflow remux FFmpeg"] @@ -432,9 +437,9 @@ flowchart TD flowchart TD A["WORKFLOW TYPE - REMUX"] --> B["STEP 1 - Validation configuration"] B --> C["STEP 2 - Preparation workspace, attachments et cover TMDB"] - C --> D["STEP 3 - Analyse mapping pistes + pre-scan de risque\nextraction attached_pic si present"] - D --> E{"Risque multi-source\nstrict interleave"} - E -->|Oui| F["STEP 4 - Synchronisation timeline multi-source\nFIFO, Named Pipe ou fallback fichier"] + C --> D["STEP 3 - Analyse mapping pistes + pre-scan de risque
extraction attached_pic si present"] + D --> E{"Risque multi-source
strict interleave"} + E -->|Oui| F["STEP 4 - Synchronisation timeline multi-source
FIFO, Named Pipe ou fallback fichier"] E -->|Non| G["STEP 4 - Synchronisation timeline non requise"] F --> H["STEP 5 - Chapitres : override FFMetadata ou copie source"] G --> H @@ -452,19 +457,19 @@ flowchart TD B --> C["STEP 2 - Preparation workspace et attachments"] C --> D["STEP 3 - Normalisation des options HDR dynamiques"] D --> E["STEP 4 - Routage du workflow"] - E --> F{"Injection fichier\nDoVi ou HDR10+\nnecessaire"} + E --> F{"Injection fichier
DoVi ou HDR10+
necessaire"} - F -->|Oui| K["STEP 5 - Extraction des metadata dynamiques\nDoVi et ou HDR10+"] + F -->|Oui| K["STEP 5 - Extraction des metadata dynamiques
DoVi et ou HDR10+"] K --> L["STEP 6 - Encodage video seule vers enc.hevc"] L --> M["STEP 7 - Injection HDR10+ et ou DoVi"] M --> N["STEP 8 - Encapsulation timeline video injectee"] - N --> O["STEP 9 - Reconstruction finale MKV\nSync timeline si risque detecte"] + N --> O["STEP 9 - Reconstruction finale MKV
Sync timeline si risque detecte"] - F -->|Non| G["STEP 5 - Construction de la commande ffmpeg\nsortie directe"] + F -->|Non| G["STEP 5 - Construction de la commande ffmpeg
sortie directe"] G --> H["STEP 6 - Preparation sync/remap + commande(s)"] H --> P{"Quality mode = SIZE"} - P -->|Oui| Q["STEP 7 - Execution ffmpeg en 2 passes\nsync timeline si risque detecte"] - P -->|Non| R["STEP 7 - Execution ffmpeg en single pass\nsync timeline si risque detecte"] + P -->|Oui| Q["STEP 7 - Execution ffmpeg en 2 passes
sync timeline si risque detecte"] + P -->|Non| R["STEP 7 - Execution ffmpeg en single pass
sync timeline si risque detecte"] O --> Z["Sortie MKV"] Q --> Z @@ -484,13 +489,13 @@ Lecture rapide : ```mermaid flowchart TD - A([Film 1 + Film 2]) --> B[Validation\nfichiers, extensions, outils,\nHEVC Film 1/2, HDR dans Film 2] - B --> C[Comparaison frame count\ntolerance <= 4] - C --> D{Film 2 = MKV\net DoVi + HDR10+ ?} + A([Film 1 + Film 2]) --> B[Validation
fichiers, extensions, outils,
HEVC Film 1/2, HDR dans Film 2] + B --> C[Comparaison frame count
tolerance <= 4] + C --> D{Film 2 = MKV
et DoVi + HDR10+ ?} - D -->|Oui| E1[Phase 1 parallel\nextract HEVC Film 1 si MKV\n+ extract HEVC Film 2] - E1 --> E2[Phase 2 parallel\nextract RPU + HDR10+\ndepuis film2.hevc] - D -->|Non| E3[Extraction parallel directe\nHEVC Film 1 si MKV\n+ RPU/HDR10+ depuis Film 2] + D -->|Oui| E1[Phase 1 parallel
extract HEVC Film 1 si MKV
+ extract HEVC Film 2] + E1 --> E2[Phase 2 parallel
extract RPU + HDR10+
depuis film2.hevc] + D -->|Non| E3[Extraction parallel directe
HEVC Film 1 si MKV
+ RPU/HDR10+ depuis Film 2] E2 --> F E3 --> F @@ -510,36 +515,54 @@ flowchart TD J -->|Non| L K --> L - L[ffmpeg final\nvideo injectee + audio/subs/metadata Film 1\nmap_metadata/map_chapters] --> M[Nettoyage] + L[ffmpeg final
video injectee + audio/subs/metadata Film 1
map_metadata/map_chapters] --> M[Nettoyage] M --> N([Sortie MKV]) ``` -### Détection HDR d'un fichier +### Inspection d'un fichier (ffprobe + mediainfo) ```mermaid flowchart TD - A([Fichier video]) --> B[ffprobe JSON\nstreams + format + chapitres] - B --> C{Conteneur Matroska/WebM ?} - C -->|Oui| C1[Enrichissement ffprobe MKV\nlanguage-ietf/language + tag_count] - C -->|Non| C2[Pas d enrichissement MKV] - C1 --> D[Normalisation langues IETF] - C2 --> D - - D --> E{side_data HDR ?} - E -->|Dolby Vision| D1[Dolby Vision] - E -->|HDR10+| D2[HDR10+] - E -->|Aucun| F[Fallback mediainfo] - - F --> G{HDR_Format} - G -->|Dolby Vision| D1 - G -->|SMPTE ST 2094| D2 - G -->|Autre ou vide| H{Color transfer} - - H -->|smpte2084| I[HDR10] - H -->|arib-std-b67| J[HLG] - H -->|autre| K[SDR] + A([Fichier a inspecter]) --> B{Fichier accessible ?} + B -->|Non| X[Arret avec erreur] + B -->|Oui| C[Analyse de base avec ffprobe] + + C --> D{Analyse exploitable ?} + D -->|Non| Y[Arret avec erreur] + D -->|Oui| E[Creation de la fiche media
en une seule lecture ffprobe
pistes chapitres attachments
infos globales et tags] + + E --> F[Tentative d enrichissement mediainfo
en une seule lecture
frame count et details HDR DoVi] + F --> H{Frame count encore absent ?} + H -->|Oui| H1[Nouvelle tentative simple
via mediainfo] + H -->|Non| I + H1 --> I + + I --> J{Fichier Matroska ou WebM ?} + J -->|Oui| J1[Enrichissement MKV
langues detaillees language-ietf
et comptage des tags] + J -->|Non| K + J1 --> K[Normalisation des langues
pour obtenir des valeurs coherentes] + + K --> L{Video principale presente ?} + L -->|Non| Z[Fiche finale retournee] + L -->|Oui| M[Detection HDR] + + M --> N{Metadonnees HDR visibles
directement dans ffprobe ?} + N -->|Oui| Q[Determination du type HDR] + N -->|Non ou incomplet| O[Essai d enrichissement HDR
avec mediainfo] + O --> P{Reponse suffisante ?} + P -->|Oui| Q + P -->|Non| R[Dernier recours
analyse plus fine par images] + R --> Q + + Q --> S[Classement final
Dolby Vision + HDR10+
Dolby Vision
HDR10+
HDR10
HLG
SDR] + S --> T[Mise a jour de la video principale] + T --> Z[Fiche finale retournee
prete pour affichage et workflows] ``` +| Fiche media produite par l inspection | +|---| +| **Identite generale** : chemin du fichier, format du conteneur, duree totale, taille, debit global, titre du conteneur, tags globaux utiles.

**Pistes video** : index, codec, resolution, framerate, profondeur de couleur, infos colorimetriques, type HDR detecte, details Dolby Vision si disponibles, langue, titre, duree, debit.

**Pistes audio** : index, codec, nombre de canaux, layout `stereo/5.1/7.1`, frequence d echantillonnage, debit, langue, titre, duree, indicateurs utiles comme Atmos ou DTS:X quand ils peuvent etre deduits.

**Pistes de sous-titres** : index, codec, langue, titre, drapeaux `forced` et `default`.

**Chapitres** : liste des chapitres avec position temporelle et nom.

**Attachments** : cover, polices et autres ressources embarquees avec nom, type MIME, taille si disponible, et indication `attached_pic` pour les images de couverture integrees comme flux video.

**Enrichissement MKV/WebM** : comptage des tags globaux, recuperation prioritaire des langues detaillees `language-ietf` quand elles existent, avec repli sur `language` sinon.

**Normalisation finale** : harmonisation des langues vers une forme plus coherente et exploitable dans l interface et les workflows suivants.

**Enrichissement optionnel via mediainfo** : frame count si disponible, confirmation ou enrichissement des metadonnees HDR, et details Dolby Vision supplementaires quand `ffprobe` ne les expose pas assez. | + ## Outils externes | Outil | Rôle principal | @@ -553,4 +576,4 @@ flowchart TD --- -*Mediarecode v1.4.1* +*Mediarecode v2.0.0* diff --git a/core/config.py b/core/config.py index 532f878..c6428a7 100644 --- a/core/config.py +++ b/core/config.py @@ -26,6 +26,11 @@ from core.lang_tags import Rfc5646LanguageTags from core.subprocess_utils import subprocess_text_kwargs +from core.workflows.common.ffmpeg_runtime import ( + default_ffmpeg_thread_count as _default_ffmpeg_thread_count, + normalize_ffmpeg_thread_count as _normalize_ffmpeg_thread_count, + normalize_max_parallel_video_encodes as _normalize_max_parallel_video_encodes, +) from core.workdir import ( clear_work_dir as clear_work_dir_contents, prepare_process_work_dir, @@ -96,6 +101,9 @@ def _resolve_ini_path() -> Path: "default_bitrate_per_channel_kbps", "bitrate_step_per_channel_kbps", }, + "ui": { + "verbose_file_logging", + }, } @@ -115,25 +123,6 @@ def _is_windows() -> bool: return sys.platform == "win32" -def _default_ffmpeg_thread_count() -> int: - """ - Default FFmpeg thread count: logical CPU count × 0.75, rounded up. - - Examples: - 4 cores -> 3 threads - 8 cores -> 6 threads - """ - cpu_count = os.cpu_count() or 1 - return max(1, (cpu_count * 3 + 3) // 4) - - -def _normalize_ffmpeg_thread_count(value: int | None) -> int: - """Return a safe FFmpeg thread count, preserving 0 as ffmpeg auto mode.""" - if value is None or value < 0: - return _default_ffmpeg_thread_count() - return value - - def _normalize_ui_scale_percent(value: int | None) -> int: """Clamp UI scale percentage to a safe interactive range.""" if value is None: @@ -394,7 +383,13 @@ def write_ini_settings(section_values: dict[str, dict[str, str]]) -> None: for section, values in section_values.items(): lines = _upsert_ini_section(lines, section, values) - lines = _remove_ini_keys(lines, _REMOVED_INI_KEYS) + removed_keys = { + section: _REMOVED_INI_KEYS[section] + for section in section_values + if section in _REMOVED_INI_KEYS + } + if removed_keys: + lines = _remove_ini_keys(lines, removed_keys) _INI_PATH.parent.mkdir(parents=True, exist_ok=True) _INI_PATH.write_text("\n".join(lines).rstrip() + "\n", encoding="utf-8") @@ -626,6 +621,10 @@ def _default_output_dir() -> Path: return Path(raw) if raw else Path.home() / "Videos" +def _default_verbose_log_dir() -> Path: + return _app_data_dir() / "logs" + + def _default_language_code() -> str: candidates: list[str | None] = [ os.environ.get("LC_ALL"), @@ -689,6 +688,13 @@ def _normalize_startup_panel(value: str | None) -> str: return aliases.get(raw, "dashboard") +def _normalize_file_logging_level(value: str | None) -> str: + raw = str(value or "").strip().lower() + if raw in {"standard", "verbose"}: + return raw + return "standard" + + INI_FIELD_GROUPS: tuple[dict[str, Any], ...] = ( { @@ -750,6 +756,7 @@ def _normalize_startup_panel(value: str | None) -> str: "fields": ( {"key": "ram_buffer_enabled", "attr": "ram_buffer_enabled", "kind": "bool", "label": "Buffer RAM activé", "description": "Active le buffer RAM pour les fichiers HEVC intermédiaires quand le seuil le permet."}, {"key": "ram_buffer_threshold_pct", "attr": "ram_buffer_threshold_pct", "kind": "int", "label": "Seuil buffer RAM (%)", "description": "Pourcentage minimal de RAM libre à conserver."}, + {"key": "max_parallel_video_encodes", "attr": "max_parallel_video_encodes", "kind": "int", "label": "Encodages vidéo parallèles max", "description": "Nombre maximal de pistes vidéo encodées simultanément dans le pipeline multi-pistes. 1 = séquentiel (défaut)."}, ), }, { @@ -763,6 +770,9 @@ def _normalize_startup_panel(value: str | None) -> str: {"key": "startup_panel", "attr": "startup_panel", "kind": "choice", "label": "Panneau à afficher au démarrage", "description": "Panneau chargé en premier au lancement de l'application.", "options": UI_STARTUP_PANEL_CHOICES}, {"key": "startup_menu_compact", "attr": "startup_menu_compact", "kind": "bool", "label": "Démarrer avec le menu en mode Compact", "description": "Si activé, le menu latéral est réduit en mode icônes au lancement."}, {"key": "startup_logs_expanded", "attr": "startup_logs_expanded", "kind": "bool", "label": "Ouvrir les logs au démarrage de l'application", "description": "Si activé, le panneau de logs est déplié au lancement."}, + {"key": "enable_file_logging", "attr": "enable_file_logging", "kind": "bool", "label": "Activer le logging fichier", "description": "Si activé, les logs applicatifs sont aussi écrits dans un fichier texte sous app_data/logs/."}, + {"key": "file_logging_level", "attr": "file_logging_level", "kind": "choice", "label": "Niveau de logging fichier", "description": "Standard écrit le flux visible dans la fenêtre. Verbose ajoute les sorties techniques détaillées des outils.", "options": (("standard", "Standard"), ("verbose", "Verbose"))}, + {"key": "verbose_log_dir", "attr": "verbose_log_dir", "kind": "directory", "label": "Dossier des logs fichier", "description": "Dossier où écrire les logs fichier. Prérempli par défaut avec le chemin complet actuel."}, ), }, { @@ -869,15 +879,25 @@ def _resolve_path(self, section: str, key: str, settings_key: str, default: Path return Path(self._resolve_text(section, key, settings_key, str(default))) def _resolve_int(self, section: str, key: str, settings_key: str, default: int) -> int: + def _parse_int(raw: object) -> int | None: + try: + return int(str(raw).strip()) + except (TypeError, ValueError): + return None + ini_value = self._ini_lookup(section, key) if ini_value is not _MISSING: - return default if ini_value == "" else int(str(ini_value)) + if ini_value == "": + return default + parsed_ini = _parse_int(ini_value) + return parsed_ini if parsed_ini is not None else default value = self._settings.value(settings_key, default) if value in (None, ""): return default if isinstance(value, int): return value - return int(str(value)) + parsed_settings = _parse_int(value) + return parsed_settings if parsed_settings is not None else default def _resolve_bool(self, section: str, key: str, settings_key: str, default: bool) -> bool: def _as_bool(raw: str) -> bool: @@ -956,6 +976,9 @@ def _load(self) -> None: self.ram_buffer_enabled = self._resolve_bool("encoding", "ram_buffer_enabled", "encoding/ram_buffer_enabled", True) self.ram_buffer_threshold_pct = self._resolve_int("encoding", "ram_buffer_threshold_pct", "encoding/ram_buffer_threshold_pct", 15) + self.max_parallel_video_encodes = _normalize_max_parallel_video_encodes( + self._resolve_int("encoding", "max_parallel_video_encodes", "encoding/max_parallel_video_encodes", 1) + ) self.aac_bitrate_per_channel_kbps = self._resolve_int( "audio_encoding", "aac_bitrate_per_channel_kbps", @@ -972,6 +995,34 @@ def _load(self) -> None: self._resolve_text("ui", "language", "ui/language", _default_language_code()) ) self.log_max_lines = self._resolve_int("ui", "log_max_lines", "ui/log_max_lines", 2000) + legacy_verbose_file_logging = self._resolve_bool( + "ui", "verbose_file_logging", "ui/verbose_file_logging", False + ) + enable_file_logging_ini = self._ini_lookup("ui", "enable_file_logging") + if enable_file_logging_ini is _MISSING and self._ini_lookup("ui", "verbose_file_logging") is not _MISSING: + self.enable_file_logging = legacy_verbose_file_logging + else: + self.enable_file_logging = self._resolve_bool( + "ui", "enable_file_logging", "ui/enable_file_logging", legacy_verbose_file_logging + ) + file_logging_level_ini = self._ini_lookup("ui", "file_logging_level") + default_file_logging_level = "verbose" if legacy_verbose_file_logging else "standard" + if file_logging_level_ini is _MISSING and self._ini_lookup("ui", "verbose_file_logging") is not _MISSING: + self.file_logging_level = default_file_logging_level + else: + self.file_logging_level = _normalize_file_logging_level( + self._resolve_text( + "ui", + "file_logging_level", + "ui/file_logging_level", + default_file_logging_level, + ) + ) + # Compat: ancien nom conservé pour les usages résiduels internes/tests. + self.verbose_file_logging = self.enable_file_logging + self.verbose_log_dir = self._resolve_path( + "ui", "verbose_log_dir", "ui/verbose_log_dir", _default_verbose_log_dir() + ) self.theme = self._resolve_text("ui", "theme", "ui/theme", "dark") self.ui_scale_percent = _normalize_ui_scale_percent( self._resolve_int("ui", "ui_scale_percent", "ui/ui_scale_percent", 100) @@ -1031,12 +1082,19 @@ def save(self) -> None: s.setValue("encoding/ram_buffer_enabled", "true" if self.ram_buffer_enabled else "false") s.setValue("encoding/ram_buffer_threshold_pct", self.ram_buffer_threshold_pct) + s.setValue("encoding/max_parallel_video_encodes", self.max_parallel_video_encodes) s.setValue("audio_encoding/aac_bitrate_per_channel_kbps", self.aac_bitrate_per_channel_kbps) s.setValue("audio_encoding/eac3_bitrate_per_channel_kbps", self.eac3_bitrate_per_channel_kbps) s.setValue("ui/language", self.language) s.setValue("ui/log_max_lines", self.log_max_lines) + s.setValue( + "ui/enable_file_logging", + "true" if self.enable_file_logging else "false", + ) + s.setValue("ui/file_logging_level", self.file_logging_level) + s.setValue("ui/verbose_log_dir", str(self.verbose_log_dir)) s.setValue("ui/theme", self.theme) s.setValue("ui/ui_scale_percent", self.ui_scale_percent) s.setValue("ui/startup_panel", self.startup_panel) @@ -1201,10 +1259,14 @@ def to_dict(self) -> dict[str, Any]: "encoding": { "ram_buffer_enabled": self.ram_buffer_enabled, "ram_buffer_threshold_pct": self.ram_buffer_threshold_pct, + "max_parallel_video_encodes": self.max_parallel_video_encodes, }, "ui": { "language": self.language, "log_max_lines": self.log_max_lines, + "enable_file_logging": self.enable_file_logging, + "file_logging_level": self.file_logging_level, + "verbose_log_dir": str(self.verbose_log_dir), "theme": self.theme, "ui_scale_percent": self.ui_scale_percent, "startup_panel": self.startup_panel, diff --git a/core/inspector.py b/core/inspector.py index ed8800b..88568df 100644 --- a/core/inspector.py +++ b/core/inspector.py @@ -22,7 +22,9 @@ import json import re +import shlex import subprocess +from collections.abc import Callable from dataclasses import dataclass, field from enum import Enum, auto from pathlib import Path @@ -72,9 +74,10 @@ class HDRType(Enum): Type de métadonnées HDR détecté dans le flux vidéo principal. Ordre de priorité (du plus riche au plus pauvre) : - DOLBY_VISION_HDR10PLUS > DOLBY_VISION > HDR10PLUS > HDR10 > NONE + DOLBY_VISION_HDR10PLUS > DOLBY_VISION > HDR10PLUS > HDR10 > HLG > NONE """ NONE = auto() + HLG = auto() HDR10 = auto() HDR10PLUS = auto() DOLBY_VISION = auto() @@ -84,6 +87,7 @@ def label(self) -> str: """Libellé court pour l'affichage dans l'UI.""" return { HDRType.NONE: "SDR", + HDRType.HLG: "HLG", HDRType.HDR10: "HDR10", HDRType.HDR10PLUS: "HDR10+", HDRType.DOLBY_VISION: "Dolby Vision", @@ -110,6 +114,8 @@ class VideoTrack: color_transfer: str | None # "smpte2084" (PQ), "arib-std-b67" (HLG) color_matrix: str | None # "bt2020nc" hdr_type: HDRType = HDRType.NONE + dovi_profile: int | None = None # ex: 8 pour P8.x + dovi_compat_id: int | None = None # 0=P8.0, 1=P8.1 language: str | None = None title: str | None = None duration_s: float | None = None @@ -126,6 +132,54 @@ def resolution(self) -> str: def is_hdr(self) -> bool: return self.hdr_type != HDRType.NONE + @property + def hdr_label(self) -> str: + """ + Label d'affichage enrichi : profil DoVi + couche de compatibilité. + + Mappage compat_id pour DoVi Profile 8 : + P8.0 → DoVi only (pas de fallback) + P8.1 → HDR10 + P8.2 → SDR (BT.709) + P8.4 → HLG + """ + if self.hdr_type == HDRType.NONE: + return "SDR" + + parts: list[str] = [] + + if self.hdr_type in (HDRType.DOLBY_VISION, HDRType.DOLBY_VISION_HDR10PLUS): + dv = "Dolby Vision" + if self.dovi_profile is not None: + compat = self.dovi_compat_id + if compat is not None: + dv += f" P{self.dovi_profile}.{compat}" + else: + dv += f" P{self.dovi_profile}" + parts.append(dv) + + if self.hdr_type == HDRType.DOLBY_VISION_HDR10PLUS: + parts.append("HDR10+") + elif self.hdr_type == HDRType.HDR10PLUS: + parts.append("HDR10+") + elif self.hdr_type == HDRType.HDR10: + parts.append("HDR10") + elif self.hdr_type == HDRType.HLG: + parts.append("HLG") + elif self.hdr_type == HDRType.DOLBY_VISION: + compat_label = self.dovi_compat_label + if compat_label: + parts.append(compat_label) + + return " + ".join(parts) if parts else self.hdr_type.label() + + @property + def dovi_compat_label(self) -> str | None: + """Label de la couche de compatibilité DoVi P8.x selon compat_id (None pour P8.0).""" + if self.dovi_profile != 8 or self.dovi_compat_id is None: + return None + return {1: "HDR10", 2: "SDR", 4: "HLG"}.get(self.dovi_compat_id) + @dataclass class AudioTrack: @@ -329,9 +383,44 @@ def __init__( self, ffprobe_bin: str = "ffprobe", mediainfo_bin: str = "mediainfo", + verbose_output: Callable[[str], None] | None = None, ) -> None: self._ffprobe = ffprobe_bin self._mediainfo = mediainfo_bin + self._verbose_output = verbose_output + + def _emit_verbose(self, line: str) -> None: + callback = self._verbose_output + if callback is None: + return + rendered = str(line).rstrip() + if not rendered: + return + try: + callback(rendered) + except Exception: + pass + + def _emit_command(self, cmd: list[str]) -> None: + self._emit_verbose(f"$ {shlex.join([str(part) for part in cmd])}") + + def _emit_process_result( + self, + tool_name: str, + result: subprocess.CompletedProcess[str], + *, + preview_stdout: bool = False, + ) -> None: + stdout = str(result.stdout or "") + stderr = str(result.stderr or "") + self._emit_verbose( + f"{tool_name} rc={result.returncode} stdout={len(stdout.encode('utf-8'))}o stderr={len(stderr.encode('utf-8'))}o" + ) + if preview_stdout: + for line in stdout.strip().splitlines()[:3]: + self._emit_verbose(f"{tool_name} stdout: {line[:400]}") + for line in stderr.strip().splitlines()[-3:]: + self._emit_verbose(f"{tool_name} stderr: {line[:400]}") # ------------------------------------------------------------------ # API publique @@ -344,22 +433,42 @@ def inspect(self, path: Path) -> FileInfo: Lève : InspectionError : si le fichier est illisible ou si ffprobe échoue. """ + self._emit_verbose(f"Inspection démarrée : {path}") if not path.is_file(): + self._emit_verbose(f"Inspection impossible : fichier introuvable ({path})") raise InspectionError(path, "fichier introuvable") raw = self._run_ffprobe(path) info = self._parse_ffprobe(path, raw) - # Enrichissement via mediainfo (non bloquant si absent) - try: - info.frame_count = self.get_frame_count(path) - except Exception: - pass # mediainfo absent ou fichier non supporté — on continue + # ── Enrichissement mediainfo (un seul appel JSON couvrant frame_count, + # HDR_Format, HDR_Format_Compatibility, profil DoVi). Remplace les + # 3 appels --Inform séparés et accélère la phase HDR. + mi_data = self._run_mediainfo_json(path) + mi_video = self._mediainfo_video_track(mi_data) if mi_data else None + + # Frame count (issu de mediainfo JSON quand disponible, sinon fallback + # legacy --Inform=FrameCount pour rester compatible avec très vieux mediainfo). + if mi_video is not None: + fc = mi_video.get("FrameCount") + if isinstance(fc, str) and fc.isdigit(): + info.frame_count = int(fc) + if info.frame_count is None: + try: + info.frame_count = self.get_frame_count(path) + except Exception: + pass - # Enrichissement MKV via ffprobe (tag count + language_ietf si présent) + # Enrichit le profil DoVi depuis mediainfo si ffprobe ne l'a pas fourni + # (certains builds ffprobe ne remontent pas DOVI configuration record). + if info.primary_video and mi_video is not None: + self._merge_dovi_from_mediainfo(info.primary_video, mi_video) + + # Enrichissement MKV : tag count + language_ietf depuis le raw ffprobe + # déjà parsé (évite un second appel à ffprobe). if "matroska" in info.format or "webm" in info.format: try: - tag_count, ietf_langs = self._get_mkv_track_data(path) + tag_count, ietf_langs = self._extract_mkv_track_data_from_raw(raw) info.tag_count = tag_count # Remplace les codes ISO 639-2 de ffprobe par les balises IETF # quand elles sont disponibles (ex : "en-US", "fr-FR"). @@ -381,11 +490,20 @@ def inspect(self, path: Path) -> FileInfo: normalized = Rfc5646LanguageTags.regionalize_track_language(lang, title) track.language = normalized if normalized and normalized != "und" else None - # HDR du flux vidéo principal — réutilise le raw déjà parsé (évite 2e appel ffprobe) + # HDR du flux vidéo principal — réutilise raw ffprobe ET mi_video. if info.primary_video: - info.hdr_type = self._detect_hdr_from_raw(path, raw) + info.hdr_type = self._detect_hdr_from_raw(path, raw, mi_video=mi_video) info.primary_video.hdr_type = info.hdr_type + chapter_count = info.chapters.count if info.chapters is not None else 0 + hdr_label = info.hdr_type.label() if info.primary_video else "Aucune piste vidéo" + frame_count = info.frame_count if info.frame_count is not None else "?" + self._emit_verbose( + "Inspection terminée : " + f"{path.name} V={len(info.video_tracks)} A={len(info.audio_tracks)} " + f"S={len(info.subtitle_tracks)} PJ={len(info.attachments)} " + f"Chap={chapter_count} HDR={hdr_label} Frames={frame_count}" + ) return info def get_frame_count(self, path: Path) -> int | None: @@ -395,18 +513,22 @@ def get_frame_count(self, path: Path) -> int | None: Retourne None si mediainfo est absent ou si la valeur est illisible. """ try: + cmd = [self._mediainfo, "--Inform=Video;%FrameCount%", str(path)] + self._emit_command(cmd) result = subprocess.run( - [self._mediainfo, "--Inform=Video;%FrameCount%", str(path)], + cmd, capture_output=True, check=False, **subprocess_text_kwargs(), # shell=True JAMAIS ) + self._emit_process_result("mediainfo", result, preview_stdout=True) raw = result.stdout.strip() if re.fullmatch(r"\d+", raw): + self._emit_verbose(f"mediainfo frame_count={raw}") return int(raw) except FileNotFoundError: - pass # mediainfo absent + self._emit_verbose("mediainfo introuvable dans PATH (frame count ignoré).") return None def detect_hdr_type(self, path: Path) -> HDRType: @@ -428,19 +550,32 @@ def detect_hdr_type(self, path: Path) -> HDRType: return HDRType.NONE return self._detect_hdr_from_raw(path, raw) - def _detect_hdr_from_raw(self, path: Path, raw: dict[str, Any]) -> HDRType: + def _detect_hdr_from_raw( + self, + path: Path, + raw: dict[str, Any], + *, + mi_video: dict[str, Any] | None = None, + ) -> HDRType: """ Détecte le type HDR depuis un dict ffprobe déjà parsé. Utilisé en interne par inspect() pour éviter un second appel ffprobe. + Si ``mi_video`` (track Video du JSON mediainfo) est fourni, il sert de + source HDR enrichie et évite tout sous-appel mediainfo additionnel. """ - video_streams = [s for s in raw.get("streams", []) if s.get("codec_type") == "video"] + video_streams = [ + s for s in raw.get("streams", []) + if s.get("codec_type") == "video" + and not bool((s.get("disposition") or {}).get("attached_pic", 0)) + ] if not video_streams: return HDRType.NONE vs = video_streams[0] - transfer = vs.get("color_transfer", "") - side_data = vs.get("side_data_list", []) + transfer = str(vs.get("color_transfer", "") or "") + side_data_obj = vs.get("side_data_list") + side_data = side_data_obj if isinstance(side_data_obj, list) else [] has_pq = transfer in ("smpte2084", "smpte2084le") has_hlg = transfer == "arib-std-b67" @@ -449,12 +584,44 @@ def _detect_hdr_from_raw(self, path: Path, raw: dict[str, Any]) -> HDRType: has_dovi = any(sd.get("side_data_type") == "DOVI configuration record" for sd in side_data) has_hdr10plus = any(sd.get("side_data_type") == "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)" for sd in side_data) - # Fallback mediainfo pour DoVi et HDR10+ (ffprobe peut manquer certains streams) + # Certains fichiers reportent les side-data dynamiques sur un autre flux + # vidéo que le premier (ou pas de façon stable selon ffprobe build). if not has_dovi or not has_hdr10plus: - mi_dovi, mi_hdr10plus = self._mediainfo_hdr_flags(path) + for stream in video_streams[1:]: + other_side_obj = stream.get("side_data_list") + other_side_data = other_side_obj if isinstance(other_side_obj, list) else [] + if not has_dovi and any(sd.get("side_data_type") == "DOVI configuration record" for sd in other_side_data): + has_dovi = True + if not has_hdr10plus and any( + sd.get("side_data_type") == "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)" + for sd in other_side_data + ): + has_hdr10plus = True + if has_dovi and has_hdr10plus: + break + + # Fallback mediainfo pour DoVi/HDR10+ (très rapide quand disponible). + # mediainfo lit les SEI/NAL units : si présent et qu'il a répondu, on + # lui fait confiance et on évite le coûteux probe frame-level (~3-6 s). + mediainfo_responded = False + if not has_dovi or not has_hdr10plus: + if mi_video is not None: + # Source unifiée mediainfo JSON (1 seul appel déjà effectué). + mi_dovi, mi_hdr10plus, mediainfo_responded = self._hdr_flags_from_mi_video(mi_video) + else: + mi_dovi, mi_hdr10plus, mediainfo_responded = self._mediainfo_hdr_flags(path) has_dovi = has_dovi or mi_dovi has_hdr10plus = has_hdr10plus or mi_hdr10plus + # Fallback ffprobe frame-level : uniquement quand mediainfo est absent + # ou n'a rien retourné. Sinon le coût (240 frames) ne se justifie pas. + if (not has_dovi or not has_hdr10plus) and not mediainfo_responded: + frame_flags = self._ffprobe_frame_dynamic_hdr_flags(path) + if frame_flags is not None: + frame_dovi, frame_hdr10plus = frame_flags + has_dovi = has_dovi or frame_dovi + has_hdr10plus = has_hdr10plus or frame_hdr10plus + # Priorité décroissante if has_dovi and has_hdr10plus: return HDRType.DOLBY_VISION_HDR10PLUS @@ -464,9 +631,11 @@ def _detect_hdr_from_raw(self, path: Path, raw: dict[str, Any]) -> HDRType: return HDRType.HDR10PLUS if has_pq and (has_master_disp or has_cll): return HDRType.HDR10 - if has_pq or has_hlg: - # PQ/HLG sans métadonnées statiques : HDR10 incomplet mais présent + if has_pq: + # PQ sans métadonnées statiques : HDR10 incomplet mais présent return HDRType.HDR10 + if has_hlg: + return HDRType.HLG return HDRType.NONE @@ -485,6 +654,7 @@ def _run_ffprobe(self, path: Path) -> dict[str, Any]: "-show_chapters", str(path), ] + self._emit_command(cmd) try: result = subprocess.run( cmd, @@ -494,43 +664,245 @@ def _run_ffprobe(self, path: Path) -> dict[str, Any]: # shell=True JAMAIS ) except FileNotFoundError: + self._emit_verbose("ffprobe introuvable dans PATH.") raise InspectionError(path, "ffprobe introuvable dans PATH") + self._emit_process_result("ffprobe", result) + if result.returncode != 0: stderr = result.stderr.strip()[-500:] raise InspectionError(path, f"ffprobe a échoué (code {result.returncode}) : {stderr}") try: - return json.loads(result.stdout) + payload = json.loads(result.stdout) except json.JSONDecodeError as exc: raise InspectionError(path, f"Sortie ffprobe non parseable : {exc}") + stream_count = len(payload.get("streams") or []) + chapter_count = len(payload.get("chapters") or []) + format_name = str((payload.get("format") or {}).get("format_name") or "?") + self._emit_verbose( + f"ffprobe JSON parsé : format={format_name} streams={stream_count} chapters={chapter_count}" + ) + return payload - def _mediainfo_hdr_flags(self, path: Path) -> tuple[bool, bool]: + def _ffprobe_frame_dynamic_hdr_flags( + self, + path: Path, + *, + max_frames: int = 240, + ) -> tuple[bool, bool] | None: """ - Retourne (has_dovi, has_hdr10plus) via mediainfo. + Retourne (has_dovi, has_hdr10plus) via ffprobe frame-level. - Utilise deux appels --Inform ciblés pour minimiser la latence. - Retourne (False, False) si mediainfo est absent. + Utilisé en fallback quand `-show_streams` ne remonte pas les side-data + dynamiques (cas fréquent sur certains remux DV/HDR10+). """ + cmd = [ + self._ffprobe, + "-v", "quiet", + "-print_format", "json", + "-select_streams", "v:0", + "-read_intervals", f"%+#{max(1, int(max_frames))}", + "-show_frames", + "-show_entries", "frame_side_data=side_data_type", + str(path), + ] + self._emit_command(cmd) try: - # Dolby Vision : champ HDR_Format contient "Dolby Vision" - r_dovi = subprocess.run( - [self._mediainfo, "--Inform=Video;%HDR_Format%", str(path)], - capture_output=True, check=False, **subprocess_text_kwargs(), + result = subprocess.run( + cmd, + capture_output=True, + check=False, + timeout=30, + **subprocess_text_kwargs(), ) - has_dovi = "dolby vision" in r_dovi.stdout.lower() + except FileNotFoundError: + self._emit_verbose("ffprobe introuvable pour le probe HDR frame-level.") + return None + self._emit_process_result("ffprobe", result) + if result.returncode != 0: + return None + try: + payload = json.loads(result.stdout or "{}") + except json.JSONDecodeError: + return None + + frames_obj = payload.get("frames") + if not isinstance(frames_obj, list): + return False, False - # HDR10+ : champ HDR_Format_Compatibility contient "HDR10+" - r_hdr10p = subprocess.run( - [self._mediainfo, "--Inform=Video;%HDR_Format_Compatibility%", str(path)], + has_dovi = False + has_hdr10plus = False + for frame in frames_obj: + if not isinstance(frame, dict): + continue + side_data_obj = frame.get("side_data_list") + if not isinstance(side_data_obj, list): + continue + for side_data in side_data_obj: + if not isinstance(side_data, dict): + continue + side_type = str(side_data.get("side_data_type", "") or "") + side_type_lower = side_type.lower() + if ("dolby vision" in side_type_lower) or (side_type == "DOVI configuration record"): + has_dovi = True + if ( + "hdr dynamic metadata smpte2094-40" in side_type_lower + or "hdr10+" in side_type_lower + or "smpte st 2094" in side_type_lower + or "smpte2094" in side_type_lower + ): + has_hdr10plus = True + if has_dovi and has_hdr10plus: + self._emit_verbose("ffprobe frame HDR : dovi=True hdr10plus=True") + return True, True + self._emit_verbose(f"ffprobe frame HDR : dovi={has_dovi} hdr10plus={has_hdr10plus}") + return has_dovi, has_hdr10plus + + def _mediainfo_hdr_flags(self, path: Path) -> tuple[bool, bool, bool]: + """ + Retourne ``(has_dovi, has_hdr10plus, mediainfo_responded)`` via mediainfo. + + ``mediainfo_responded`` est True si mediainfo a renvoyé du texte non vide + pour ``HDR_Format`` ou ``HDR_Format_Compatibility``. Cela permet à + l'appelant de savoir si mediainfo a vraiment statué (et donc d'éviter + un probe frame-level ffprobe coûteux quand la réponse est négative). + + Combine les deux requêtes en un seul appel mediainfo via un séparateur + ``|`` pour minimiser la latence. + """ + try: + # Un seul appel mediainfo : HDR_Format et HDR_Format_Compatibility + # concaténés avec un séparateur. Selon les sources, HDR10+ peut + # apparaître dans l'un ou l'autre. + cmd = [ + self._mediainfo, + "--Inform=Video;%HDR_Format%|%HDR_Format_Compatibility%", + str(path), + ] + self._emit_command(cmd) + r = subprocess.run( + cmd, capture_output=True, check=False, **subprocess_text_kwargs(), ) - has_hdr10plus = "hdr10+" in r_hdr10p.stdout.lower() + self._emit_process_result("mediainfo", r, preview_stdout=True) + stdout = (r.stdout or "").strip() + mediainfo_responded = bool(stdout.replace("|", "").strip()) + hdr_text = stdout.lower() + has_dovi = "dolby vision" in hdr_text + has_hdr10plus = ( + "hdr10+" in hdr_text + or "smpte st 2094" in hdr_text + or "smpte2094" in hdr_text + ) + self._emit_verbose( + f"mediainfo HDR : dovi={has_dovi} hdr10plus={has_hdr10plus} " + f"responded={mediainfo_responded}" + ) + return has_dovi, has_hdr10plus, mediainfo_responded - return has_dovi, has_hdr10plus + except FileNotFoundError: + self._emit_verbose("mediainfo introuvable dans PATH (détection HDR enrichie ignorée).") + return False, False, False + # ------------------------------------------------------------------ + # mediainfo JSON (source unifiée pour HDR, frame_count, profil DoVi) + # ------------------------------------------------------------------ + + def _run_mediainfo_json(self, path: Path) -> dict[str, Any] | None: + """ + Lance ``mediainfo --Output=JSON`` une seule fois et retourne le dict parsé. + + Retourne ``None`` si mediainfo est absent, en échec ou si la sortie + n'est pas du JSON valide. Cet appel unique remplace les 3 ``--Inform`` + ciblés (FrameCount, HDR_Format, HDR_Format_Compatibility) et expose en + prime tous les champs HDR riches (mastering display, MaxCLL/FALL, + compatibility profile DoVi, etc.). + """ + cmd = [self._mediainfo, "--Output=JSON", str(path)] + self._emit_command(cmd) + try: + result = subprocess.run( + cmd, + capture_output=True, check=False, **subprocess_text_kwargs(), + ) except FileNotFoundError: - return False, False + self._emit_verbose("mediainfo introuvable dans PATH (JSON ignoré).") + return None + self._emit_process_result("mediainfo", result) + if result.returncode != 0: + return None + try: + return json.loads(result.stdout or "{}") + except json.JSONDecodeError as exc: + self._emit_verbose(f"mediainfo JSON invalide : {exc}") + return None + + @staticmethod + def _mediainfo_video_track(mi_data: dict[str, Any]) -> dict[str, Any] | None: + """Retourne le 1er track ``@type=Video`` du JSON mediainfo, ou None.""" + media = mi_data.get("media") or {} + for track in media.get("track") or []: + if isinstance(track, dict) and track.get("@type") == "Video": + return track + return None + + @staticmethod + def _hdr_flags_from_mi_video(mi_video: dict[str, Any]) -> tuple[bool, bool, bool]: + """ + Retourne ``(has_dovi, has_hdr10plus, mediainfo_responded)`` à partir de + la track Video mediainfo. Pure : aucun sous-process, idéal pour éviter + de relancer mediainfo quand on a déjà le JSON complet. + """ + hdr_format = str(mi_video.get("HDR_Format") or "") + hdr_compat = str(mi_video.get("HDR_Format_Compatibility") or "") + hdr_text = f"{hdr_format}\n{hdr_compat}".lower() + responded = bool(hdr_format.strip() or hdr_compat.strip()) + has_dovi = "dolby vision" in hdr_text + has_hdr10plus = ( + "hdr10+" in hdr_text + or "smpte st 2094" in hdr_text + or "smpte2094" in hdr_text + ) + return has_dovi, has_hdr10plus, responded + + @staticmethod + def _merge_dovi_from_mediainfo( + video: VideoTrack, mi_video: dict[str, Any] + ) -> None: + """ + Complète ``video.dovi_profile`` / ``dovi_compat_id`` depuis mediainfo. + + Mappage : + - ``HDR_Format_Profile`` ``dvheNN`` ou ``dvavNN`` → profile NN. + - ``HDR_Format_Compatibility`` (côté droit du ``/`` après le profil) : + "HDR10" → compat_id 1 + "SDR" → compat_id 2 + "HLG" → compat_id 4 + sinon → 0 (P8.0, fallback DoVi-only). + + N'écrase pas une valeur déjà présente côté ffprobe (qui reste plus + précise quand ``DOVI configuration record`` est exposé). + """ + if video.dovi_profile is None: + hfp = str(mi_video.get("HDR_Format_Profile") or "") + # "dvhe.08 / " → 8 ; "dvav.05 / " → 5 + m = re.search(r"dv(?:he|av)\.?(\d+)", hfp.lower()) + if m: + video.dovi_profile = int(m.group(1)) + + if video.dovi_compat_id is None and video.dovi_profile is not None: + compat = str(mi_video.get("HDR_Format_Compatibility") or "").lower() + # Pour les DoVi, la chaîne contient "HDR10" / "SDR" / "HLG" + # (parfois avec "Profile B" derrière, qu'on ignore). + if "hdr10" in compat: + video.dovi_compat_id = 1 + elif "sdr" in compat: + video.dovi_compat_id = 2 + elif "hlg" in compat: + video.dovi_compat_id = 4 + else: + video.dovi_compat_id = 0 # ------------------------------------------------------------------ # Parsing ffprobe @@ -612,6 +984,15 @@ def _parse_video(self, s: dict[str, Any]) -> VideoTrack: if frame_rate in ("0/0", "0", None): frame_rate = None + # Profil DoVi depuis le side_data DOVI configuration record + dovi_profile: int | None = None + dovi_compat_id: int | None = None + for sd in s.get("side_data_list") or []: + if sd.get("side_data_type") == "DOVI configuration record": + dovi_profile = _int_or_none(sd.get("dv_profile")) + dovi_compat_id = _int_or_none(sd.get("dv_bl_signal_compatibility_id")) + break + return VideoTrack( index = s.get("index", 0), codec = s.get("codec_name", "?"), @@ -624,6 +1005,8 @@ def _parse_video(self, s: dict[str, Any]) -> VideoTrack: color_primaries = s.get("color_primaries"), color_transfer = s.get("color_transfer"), color_matrix = s.get("color_space"), + dovi_profile = dovi_profile, + dovi_compat_id = dovi_compat_id, language = tags.get("language"), title = tags.get("title"), duration_s = _float_or_none(s.get("duration")), @@ -682,53 +1065,36 @@ def _stream_tag_lookup(tags: dict[str, Any], normalized_key: str) -> str | None: return text return None - def _get_mkv_track_data( - self, path: Path + def _extract_mkv_track_data_from_raw( + self, raw: dict[str, Any] ) -> tuple[int, dict[int, str]]: """ - Appelle ``ffprobe`` et retourne : + Extrait depuis le ``raw`` ffprobe déjà parsé : - le nombre de balises MKV globales (int) - - un dict {track_id: language_ietf|language} pour chaque piste + - un dict ``{track_id: language_ietf|language}`` pour chaque piste - ``language-ietf`` est prioritaire ; ``language`` est utilisé en fallback. - Retourne (0, {}) si ffprobe est absent ou si la sortie ne peut pas - être parsée. + ``language-ietf`` est prioritaire, ``language`` en fallback. + Évite un second appel ffprobe (les données sont déjà dans le JSON + retourné par ``_run_ffprobe``). """ - try: - result = subprocess.run( - [ - self._ffprobe, - "-v", "quiet", - "-print_format", "json", - "-show_streams", - "-show_format", - str(path), - ], - capture_output=True, check=False, timeout=15, **subprocess_text_kwargs(), - ) - if result.returncode != 0: - return 0, {} - data = json.loads(result.stdout) - - fmt_tags = (data.get("format") or {}).get("tags") or {} - tag_count = sum( - 1 - for key, value in fmt_tags.items() - if str(value).strip() and str(key).strip().upper() not in _EXCLUDED_SOURCE_TAGS - ) + fmt_tags = (raw.get("format") or {}).get("tags") or {} + tag_count = sum( + 1 + for key, value in fmt_tags.items() + if str(value).strip() and str(key).strip().upper() not in _EXCLUDED_SOURCE_TAGS + ) - lang_map: dict[int, str] = {} - for track in data.get("streams", []): - tid = track.get("index") - tags = track.get("tags", {}) or {} - lang = self._stream_tag_lookup(tags, "language-ietf") - if lang is None: - lang = self._stream_tag_lookup(tags, "language") - if tid is not None and lang: - lang_map[tid] = lang - return tag_count, lang_map - except (FileNotFoundError, json.JSONDecodeError, Exception): - return 0, {} + lang_map: dict[int, str] = {} + for track in raw.get("streams", []): + tid = track.get("index") + tags = track.get("tags", {}) or {} + lang = self._stream_tag_lookup(tags, "language-ietf") + if lang is None: + lang = self._stream_tag_lookup(tags, "language") + if tid is not None and lang: + lang_map[tid] = lang + self._emit_verbose(f"MKV tags depuis raw : tag_count={tag_count} langues={len(lang_map)}") + return tag_count, lang_map # ============================================================================= diff --git a/core/logging.py b/core/logging.py new file mode 100644 index 0000000..91db6cc --- /dev/null +++ b/core/logging.py @@ -0,0 +1,218 @@ +""" +core/logging.py — Moteur de logging applicatif (UI + fichiers verbose). + +Centralise : + - les niveaux de logs applicatifs + - la normalisation des niveaux reçus via signaux + - l'écriture des logs verbose avec rotation + - la reprise de session depuis le dernier fichier existant +""" + +from __future__ import annotations + +import logging as _stdlib_logging +import re +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Callable + + +class LogLevel(str, Enum): + INFO = "INFO" + OK = "OK" + WARN = "WARN" + ERROR = "ERROR" + + +def get_logger(name: str) -> _stdlib_logging.Logger: + """Point d'entrée unique pour les loggers stdlib du projet.""" + return _stdlib_logging.getLogger(name) + + +def parse_log_level(value: str, default: LogLevel = LogLevel.INFO) -> LogLevel: + """Convertit une chaîne en LogLevel avec fallback robuste.""" + try: + return LogLevel(str(value).strip().upper()) + except ValueError: + return default + + +VERBOSE_LOG_MAX_BYTES = 50 * 1024 * 1024 +VERBOSE_LOG_MAX_FILES = 3 +_VERBOSE_LOG_FILE_RE = re.compile(r"^mediarecode-verbose-(\d{8}-\d{6})-(\d+)\.log$") + + +def _latest_verbose_log_file( + logs_dir: Path, + *, + max_files: int, +) -> tuple[str, int, Path] | None: + latest: tuple[str, int, Path] | None = None + for path in logs_dir.glob("mediarecode-verbose-*.log"): + match = _VERBOSE_LOG_FILE_RE.match(path.name) + if match is None: + continue + stamp = match.group(1) + try: + index = int(match.group(2)) + except ValueError: + continue + if index < 1 or index > max_files: + continue + candidate = (stamp, index, path) + if latest is None or candidate > latest: + latest = candidate + if latest is None: + return None + stamp, index, path = latest + return stamp, index, path + + +class VerboseFileLogger: + """Gestionnaire de fichier verbose avec rotation circulaire.""" + + def __init__( + self, + *, + app_data_dir: Path, + verbose_log_dir: Path | str | None, + enabled: bool, + on_write_error: Callable[[], None] | None = None, + max_bytes: int | None = None, + max_files: int | None = None, + ) -> None: + self._app_data_dir = Path(app_data_dir) + self._configured_verbose_log_dir = ( + Path(verbose_log_dir) if verbose_log_dir not in (None, "") else None + ) + self._enabled = bool(enabled) + self._on_write_error = on_write_error + self._max_bytes = int(max_bytes if max_bytes is not None else VERBOSE_LOG_MAX_BYTES) + self._max_files = max(1, int(max_files if max_files is not None else VERBOSE_LOG_MAX_FILES)) + + self._session_file_path: Path | None = None + self._session_stamp: str | None = None + self._file_index = 1 + self._session_bootstrapped = False + self._error_reported = False + + @property + def enabled(self) -> bool: + return self._enabled + + @property + def session_stamp(self) -> str | None: + return self._session_stamp + + @property + def file_index(self) -> int: + return self._file_index + + def configure( + self, + *, + app_data_dir: Path, + verbose_log_dir: Path | str | None, + enabled: bool, + ) -> None: + previous_dir = self._resolve_logs_dir_path() + self._app_data_dir = Path(app_data_dir) + self._configured_verbose_log_dir = ( + Path(verbose_log_dir) if verbose_log_dir not in (None, "") else None + ) + self._enabled = bool(enabled) + current_dir = self._resolve_logs_dir_path() + if current_dir != previous_dir: + self.reset_session() + self._error_reported = False + + def reset_session(self) -> None: + self._session_file_path = None + self._session_stamp = None + self._file_index = 1 + self._session_bootstrapped = False + + def part_path(self, index: int) -> Path: + logs_dir = self._logs_dir() + if self._session_stamp is None: + self._session_stamp = datetime.now().strftime("%Y%m%d-%H%M%S") + safe_index = max(1, min(self._max_files, int(index))) + return logs_dir / f"mediarecode-verbose-{self._session_stamp}-{safe_index:02d}.log" + + def session_path(self) -> Path: + if self._session_file_path is None: + if not self._session_bootstrapped: + self._session_bootstrapped = True + if self._session_stamp is None: + latest = _latest_verbose_log_file(self._logs_dir(), max_files=self._max_files) + if latest is not None: + stamp, index, path = latest + self._session_stamp = stamp + self._file_index = index + self._session_file_path = path + if self._session_file_path is None: + self._session_file_path = self.part_path(self._file_index) + return self._session_file_path + + def prepare_target(self, incoming_bytes: int) -> Path: + path = self.session_path() + if path.exists(): + try: + current_size = path.stat().st_size + except OSError: + current_size = 0 + if current_size + max(0, incoming_bytes) > self._max_bytes: + next_index = self._file_index + 1 + if next_index > self._max_files: + next_index = 1 + self._file_index = next_index + path = self.part_path(self._file_index) + self._session_file_path = path + try: + path.unlink(missing_ok=True) + except OSError: + pass + return path + + def append_application_message(self, message: str, level: LogLevel) -> None: + if not self._enabled: + return + # Keep stdlib logging importable in headless paths such as CLI smoke tests. + from core.i18n import translate_text + + rendered = translate_text(message) + line = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} [{level.value}] {rendered}\n" + self._append_raw_line(line) + + def append_tool_output(self, line: str, *, label: str | None = None) -> None: + if not self._enabled: + return + rendered = str(line).rstrip() + if not rendered: + return + prefix = f"[{label}] " if label else "" + entry = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} [TOOL] {prefix}{rendered}\n" + self._append_raw_line(entry) + + def _append_raw_line(self, line: str) -> None: + encoded = line.encode("utf-8") + path = self.prepare_target(len(encoded)) + try: + with path.open("a", encoding="utf-8") as handle: + handle.write(line) + except OSError: + if not self._error_reported: + self._error_reported = True + if self._on_write_error is not None: + self._on_write_error() + + def _resolve_logs_dir_path(self) -> Path: + if self._configured_verbose_log_dir is not None: + return Path(self._configured_verbose_log_dir) + return self._app_data_dir / "logs" + + def _logs_dir(self) -> Path: + path = self._resolve_logs_dir_path() + path.mkdir(parents=True, exist_ok=True) + return path diff --git a/core/media_info_fetcher.py b/core/media_info_fetcher.py index 5c73a2f..a555189 100644 --- a/core/media_info_fetcher.py +++ b/core/media_info_fetcher.py @@ -9,7 +9,6 @@ from __future__ import annotations import json -import logging import mimetypes import os import re @@ -21,6 +20,7 @@ from dataclasses import dataclass from pathlib import Path +from core.logging import get_logger from core.version import APP_USER_AGENT @@ -311,7 +311,7 @@ def iso639_2_to_tmdb_lang(code: str) -> str: _URL_TV = "https://www.themoviedb.org/tv/{id}" _URL_IMD = "https://www.imdb.com/title/{imdb_id}/" _TMDB_DEBUG_ENV = "MEDIARECODE_TMDB_DEBUG" -_TMDB_LOGGER = logging.getLogger("mediarecode.tmdb") +_TMDB_LOGGER = get_logger("mediarecode.tmdb") _TMDB_BEARER_TOKEN_ENV = "MEDIARECODE_TMDB_BEARER_TOKEN" _TMDB_INSECURE_SSL_ENV = "MEDIARECODE_TMDB_INSECURE_SSL" diff --git a/core/runner.py b/core/runner.py index f40fc6a..c012a7c 100644 --- a/core/runner.py +++ b/core/runner.py @@ -424,7 +424,11 @@ def _run_cmd( if signals is not None and signals._cancel_event.is_set(): proc.kill() raise TaskCancelledError() - chunk_bytes = chunk if isinstance(chunk, bytes) else chunk.encode("utf-8", errors="replace") + chunk_bytes: bytes + if isinstance(chunk, str): + chunk_bytes = chunk.encode("utf-8", errors="replace") + else: + chunk_bytes = bytes(chunk) # Normalise \r\n et \r solitaire en \n pour un split uniforme buf += chunk_bytes.replace(b"\r\n", b"\n").replace(b"\r", b"\n") *complete, buf = buf.split(b"\n") diff --git a/core/version.py b/core/version.py index 5f08550..9e9db17 100644 --- a/core/version.py +++ b/core/version.py @@ -5,7 +5,7 @@ from __future__ import annotations APP_NAME = "Mediarecode" -APP_VERSION = "1.4.2" +APP_VERSION = "2.0.0" APP_VERSION_LABEL = f"v{APP_VERSION}" APP_USER_AGENT = f"{APP_NAME}/{APP_VERSION}" WRITING_APPLICATION_TAG = ( diff --git a/core/workflows/attachment_utils.py b/core/workflows/attachment_utils.py deleted file mode 100644 index 1aa5b82..0000000 --- a/core/workflows/attachment_utils.py +++ /dev/null @@ -1,216 +0,0 @@ -""" -core/workflows/attachment_utils.py — Extraction et gestion des pièces jointes (attachments). - -Fournit : - MIME_BY_EXT — extension → type MIME - EXT_BY_MIME — type MIME → extension - mime_for(path) — type MIME d'après l'extension - ext_for_mime(mime) — extension d'après le type MIME - unique_attachment_path — chemin sans collision dans un répertoire - AttachmentPicExtractor — extrait les flux ``attached_pic`` avec RAM → disk fallback -""" - -from __future__ import annotations - -import os -import subprocess -import sys -from pathlib import Path -from typing import Callable - -from core.subprocess_utils import subprocess_text_kwargs - - -# --------------------------------------------------------------------------- -# Tables MIME (union remux_ffmpeg + encode/workflow) -# --------------------------------------------------------------------------- - -MIME_BY_EXT: dict[str, str] = { - ".jpg": "image/jpeg", - ".jpeg": "image/jpeg", - ".png": "image/png", - ".gif": "image/gif", - ".bmp": "image/bmp", - ".webp": "image/webp", - ".tif": "image/tiff", - ".tiff": "image/tiff", - ".ttf": "application/x-truetype-font", - ".otf": "font/otf", - ".woff": "font/woff", - ".woff2": "font/woff2", - ".txt": "text/plain", - ".xml": "application/xml", -} - -EXT_BY_MIME: dict[str, str] = { - "image/jpeg": ".jpg", - "image/png": ".png", - "image/gif": ".gif", - "image/bmp": ".bmp", - "image/webp": ".webp", - "image/tiff": ".tiff", - "application/x-truetype-font": ".ttf", - "font/ttf": ".ttf", - "font/otf": ".otf", - "font/woff": ".woff", - "font/woff2": ".woff2", - "text/plain": ".txt", - "application/xml": ".xml", -} - - -def mime_for(path: Path) -> str: - """Retourne le type MIME d'après l'extension du chemin.""" - return MIME_BY_EXT.get(path.suffix.lower(), "application/octet-stream") - - -def ext_for_mime(mimetype: str, fallback: str = ".bin") -> str: - """Retourne l'extension d'après le type MIME.""" - return EXT_BY_MIME.get((mimetype or "").strip().lower(), fallback) - - -def unique_attachment_path(dest_dir: Path, filename: str) -> Path: - """ - Retourne un chemin unique dans ``dest_dir`` sans collision de nom. - - Si ``filename`` est déjà libre, le retourne tel quel ; sinon ajoute - un suffixe numérique croissant (``stem_1.ext``, ``stem_2.ext``, …). - """ - candidate = dest_dir / filename - if not candidate.exists(): - return candidate - stem = Path(filename).stem - suffix = Path(filename).suffix - for idx in range(1, 1000): - alt = dest_dir / f"{stem}_{idx}{suffix}" - if not alt.exists(): - return alt - return dest_dir / f"{stem}_{os.getpid()}{suffix}" - - -# --------------------------------------------------------------------------- -# Extracteur de flux attached_pic -# --------------------------------------------------------------------------- - -class AttachmentPicExtractor: - """ - Extrait les flux ``attached_pic`` (couvertures MKV) vers des fichiers image. - - FFmpeg expose les couvertures MKV comme flux vidéo MJPEG avec - ``disposition.attached_pic=1``. Un simple ``-map … -c:t copy`` ne peut pas - les reconvertir en attachment MKV (le muxeur perd le flag). On extrait une - frame et on la ré-attache via ``-attach``. - - Stratégie d'écriture (RAM → disk) : - 1. ``ram_dir`` (e.g. ``/dev/shm``) si fourni et accessible — aucun I/O disque. - 2. ``fallback_dir`` fourni lors de l'appel à :meth:`extract` — dernier recours. - - Nota : FIFO/named-pipe non applicable — ``ffmpeg -attach`` requiert un - fichier seekable (``fstat`` pour la taille avant écriture de l'en-tête MKV). - """ - - def __init__( - self, - ffmpeg_bin: str, - thread_args: list[str], - log_cb: Callable[[str], None] | None = None, - ram_dir: Path | None = None, - ) -> None: - self._ffmpeg = ffmpeg_bin - self._thread_args = list(thread_args) - self._log = log_cb or (lambda _: None) - self._ram_dir = ram_dir - - # ------------------------------------------------------------------ - # API publique - # ------------------------------------------------------------------ - - @staticmethod - def default_ram_dir() -> Path | None: - """ - Retourne le répertoire RAM disponible sur cette plateforme, ou ``None``. - - · Linux / macOS : ``/dev/shm`` (tmpfs) - · Windows : ``None`` (pas d'équivalent standard) - """ - if sys.platform.startswith("win"): - return None - shm = Path("/dev/shm") - if shm.is_dir() and os.access(shm, os.W_OK): - return shm - return None - - def extract( - self, - source: Path, - stream_idx: int, - fallback_dir: Path, - filename_hint: str, - ) -> Path: - """ - Extrait le flux ``attached_pic`` ``stream_idx`` depuis ``source``. - - Tente d'abord ``ram_dir`` (si configuré), puis ``fallback_dir``. - Retourne le chemin du fichier extrait. - - :raises RuntimeError: si l'extraction échoue sur tous les répertoires. - """ - dirs_to_try: list[Path] = [] - if self._ram_dir is not None: - dirs_to_try.append(self._ram_dir) - if fallback_dir not in dirs_to_try: - dirs_to_try.append(fallback_dir) - - last_exc: Exception | None = None - for idx, dest_dir in enumerate(dirs_to_try): - dest = unique_attachment_path(dest_dir, filename_hint) - try: - self._do_extract(source, stream_idx, dest) - if idx > 0: - self._log( - f"Extraction attached_pic : RAM indisponible, " - f"fallback disque utilisé ({dest_dir})." - ) - return dest - except Exception as exc: # noqa: BLE001 - dest.unlink(missing_ok=True) - last_exc = exc - if idx < len(dirs_to_try) - 1: - self._log( - f"Extraction attached_pic vers {dest_dir} échouée " - f"({exc}), tentative suivante." - ) - - assert last_exc is not None - raise last_exc - - # ------------------------------------------------------------------ - # Implémentation interne - # ------------------------------------------------------------------ - - def _do_extract(self, source: Path, stream_idx: int, dest: Path) -> None: - """Lance l'extraction FFmpeg d'une seule frame depuis le flux ``stream_idx``.""" - cmd = [ - self._ffmpeg, - "-hide_banner", "-y", - "-i", source.as_posix(), - "-map", f"0:{stream_idx}", - *self._thread_args, - "-c", "copy", - "-frames:v", "1", - dest.as_posix(), - ] - self._log("$ " + " ".join(str(c) for c in cmd)) - result = subprocess.run( - cmd, - capture_output=True, - check=False, - timeout=60, - **subprocess_text_kwargs(), - ) - if result.returncode != 0 or not dest.exists() or dest.stat().st_size == 0: - stderr = (result.stderr or "").strip() - raise RuntimeError( - f"Extraction attached_pic échouée " - f"(source={source.name}, stream={stream_idx}): {stderr}" - ) diff --git a/core/workflows/common/__init__.py b/core/workflows/common/__init__.py new file mode 100644 index 0000000..f748ee2 --- /dev/null +++ b/core/workflows/common/__init__.py @@ -0,0 +1,75 @@ +"""Shared helpers and types for encode/remux workflows.""" + +from core.workflows.common.attachments import ( + ATTACHMENT_EXT_BY_MIME, + ATTACHMENT_MIME_BY_EXT, + attachment_filename_from_meta, + extension_for_mime, + mime_for_path, + sanitize_filename, +) +from core.workflows.common.chapters import ( + ffmeta_escape, + probe_media_duration_seconds, + write_ffmetadata_chapters, +) +from core.workflows.common.ffmpeg_runtime import ( + cli_path, + default_ffmpeg_thread_count, + ffmpeg_progress_args, + normalize_ffmpeg_thread_count, + normalize_max_parallel_video_encodes, +) +from core.workflows.common.metadata import ( + STREAM_SPEC_BY_TRACK_TYPE, + disposition_value, + normalize_track_language, + normalize_track_language_from_track, + resolve_global_tags, +) +from core.workflows.common.timeline_sync import ( + append_strict_interleave_mux_flags, + append_sync_inputs, + needs_strict_interleave, + sync_cleanup_paths, +) +from core.workflows.common.track_types import ( + TrackMetaEdit, + TrackMetaPatch, + TrackOffset, + TrackRef, + TrackTimeOffset, + TrackType, +) + +__all__ = [ + "ATTACHMENT_EXT_BY_MIME", + "ATTACHMENT_MIME_BY_EXT", + "STREAM_SPEC_BY_TRACK_TYPE", + "TrackMetaEdit", + "TrackMetaPatch", + "TrackOffset", + "TrackRef", + "TrackTimeOffset", + "TrackType", + "append_strict_interleave_mux_flags", + "append_sync_inputs", + "attachment_filename_from_meta", + "cli_path", + "default_ffmpeg_thread_count", + "disposition_value", + "extension_for_mime", + "ffmeta_escape", + "ffmpeg_progress_args", + "mime_for_path", + "needs_strict_interleave", + "normalize_ffmpeg_thread_count", + "normalize_max_parallel_video_encodes", + "normalize_track_language", + "normalize_track_language_from_track", + "probe_media_duration_seconds", + "resolve_global_tags", + "sanitize_filename", + "sync_cleanup_paths", + "write_ffmetadata_chapters", +] diff --git a/core/workflows/common/attachments.py b/core/workflows/common/attachments.py new file mode 100644 index 0000000..c965cfc --- /dev/null +++ b/core/workflows/common/attachments.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from pathlib import Path + +ATTACHMENT_MIME_BY_EXT: dict[str, str] = { + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".png": "image/png", + ".gif": "image/gif", + ".bmp": "image/bmp", + ".webp": "image/webp", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".ttf": "application/x-truetype-font", + ".otf": "font/otf", + ".woff": "font/woff", + ".woff2": "font/woff2", + ".txt": "text/plain", + ".xml": "application/xml", +} + +ATTACHMENT_EXT_BY_MIME: dict[str, str] = { + "image/jpeg": ".jpg", + "image/png": ".png", + "image/gif": ".gif", + "image/bmp": ".bmp", + "image/webp": ".webp", + "image/tiff": ".tiff", + "application/x-truetype-font": ".ttf", + "font/ttf": ".ttf", + "font/otf": ".otf", + "font/woff": ".woff", + "font/woff2": ".woff2", + "text/plain": ".txt", + "application/xml": ".xml", +} + + +def mime_for_path(path: Path) -> str: + return ATTACHMENT_MIME_BY_EXT.get(path.suffix.lower(), "application/octet-stream") + + +def extension_for_mime(mime: str, default: str = ".bin") -> str: + return ATTACHMENT_EXT_BY_MIME.get((mime or "").strip().lower(), default) + + +def sanitize_filename(name: str, fallback: str) -> str: + clean = Path((name or "").strip()).name + return clean or fallback + + +def attachment_filename_from_meta(meta: dict[str, object], stream_idx: int) -> str: + raw_name = str(meta.get("filename") or "").strip() + name = Path(raw_name).name if raw_name else f"attachment_{stream_idx}" + if Path(name).suffix: + return name + mime = str(meta.get("mimetype") or "").lower() + suffix = extension_for_mime(mime) + return f"{name}{suffix}" diff --git a/core/workflows/common/chapters.py b/core/workflows/common/chapters.py new file mode 100644 index 0000000..2a6162b --- /dev/null +++ b/core/workflows/common/chapters.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import json +import subprocess +from pathlib import Path + +from core.subprocess_utils import subprocess_text_kwargs + + +def ffmeta_escape(value: str) -> str: + text = str(value).replace("\\", "\\\\") + text = text.replace("\n", " ") + text = text.replace(";", "\\;").replace("#", "\\#").replace("=", "\\=") + return text + + +def probe_media_duration_seconds(ffprobe_bin: str, source: Path) -> float | None: + cmd = [ + ffprobe_bin, + "-v", "quiet", + "-print_format", "json", + "-show_format", + str(source), + ] + try: + result = subprocess.run( + cmd, + capture_output=True, + check=False, + timeout=20, + **subprocess_text_kwargs(), + ) + except FileNotFoundError: + return None + + if result.returncode != 0: + return None + try: + payload = json.loads(result.stdout or "{}") + raw = (payload.get("format") or {}).get("duration") + if raw is None: + return None + value = float(raw) + return value if value > 0 else None + except Exception: + return None + + +def write_ffmetadata_chapters( + entries: list, + out_dir: Path, + duration_s: float | None, +) -> Path: + sorted_entries = sorted(entries, key=lambda e: float(getattr(e, "timecode_s", 0.0))) + + if duration_s is None: + duration_s = max((float(getattr(e, "timecode_s", 0.0)) for e in sorted_entries), default=0.0) + 1.0 + total_ms = max(1, int(round(duration_s * 1000.0))) + + lines: list[str] = [";FFMETADATA1"] + for idx, chapter in enumerate(sorted_entries): + start_ms = max(0, int(round(float(getattr(chapter, "timecode_s", 0.0)) * 1000.0))) + if idx + 1 < len(sorted_entries): + end_ms = max(start_ms + 1, int(round(float(getattr(sorted_entries[idx + 1], "timecode_s", 0.0)) * 1000.0))) + else: + end_ms = max(start_ms + 1, total_ms) + + lines.extend([ + "", + "[CHAPTER]", + "TIMEBASE=1/1000", + f"START={start_ms}", + f"END={end_ms}", + f"title={ffmeta_escape(str(getattr(chapter, 'name', '') or ''))}", + ]) + + ffmeta_path = out_dir / "chapters.ffmetadata" + ffmeta_path.write_text("\n".join(lines) + "\n", encoding="utf-8") + return ffmeta_path diff --git a/core/workflows/common/ffmpeg_runtime.py b/core/workflows/common/ffmpeg_runtime.py new file mode 100644 index 0000000..10fd093 --- /dev/null +++ b/core/workflows/common/ffmpeg_runtime.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import os +from pathlib import Path + + +def cli_path(path: Path | str) -> str: + if isinstance(path, str): + return path + text = str(path) + if text.startswith("\\\\.\\pipe\\"): + return text + return path.as_posix() + + +def default_ffmpeg_thread_count() -> int: + """Default FFmpeg thread count: logical CPU count x 0.75, rounded up.""" + cpu_count = os.cpu_count() or 1 + return max(1, (cpu_count * 3 + 3) // 4) + + +def normalize_ffmpeg_thread_count(value: int | None) -> int: + """Return a safe FFmpeg thread count, preserving 0 as ffmpeg auto mode.""" + if value is None or value < 0: + return default_ffmpeg_thread_count() + return value + + +def normalize_max_parallel_video_encodes(value: int | None) -> int: + """Return a safe per-workflow parallelism value for multi-video preparation.""" + if value is None: + return 1 + return max(1, int(value)) + + +def ffmpeg_progress_args() -> list[str]: + """Force a stable machine-readable ffmpeg progress output.""" + return ["-progress", "pipe:1", "-nostats"] + + +def ffmpeg_thread_args(thread_count: int) -> list[str]: + """Formate `-threads N` (N=0 → ffmpeg auto). Le caller a déjà normalisé `thread_count`.""" + return ["-threads", str(max(0, int(thread_count)))] diff --git a/core/workflows/common/metadata.py b/core/workflows/common/metadata.py new file mode 100644 index 0000000..77eb7a7 --- /dev/null +++ b/core/workflows/common/metadata.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from collections.abc import Mapping + +from core.lang_tags import Rfc5646LanguageTags as LangTags + +STREAM_SPEC_BY_TRACK_TYPE: dict[str, str] = { + "video": "v", + "audio": "a", + "subtitle": "s", +} + + +def resolve_global_tags( + tag_overrides: Mapping[str, str] | None, + file_title: str = "", +) -> dict[str, str]: + tags: dict[str, str] = {} + if tag_overrides is not None: + for key, value in tag_overrides.items(): + key_s = str(key).strip() + value_s = str(value).strip() + if not key_s or not value_s: + continue + tags[key_s] = value_s + if file_title.strip(): + tags["title"] = file_title.strip() + return tags + + +def normalize_track_language( + language: str, + title: str | None = None, + *, + default_und: bool = False, +) -> str | None: + raw = (language or "").strip() + if not raw: + return "und" if default_und else None + + canonical = LangTags.normalize(raw) or raw + if canonical.lower() == "und": + return "und" + + regional = LangTags.regionalize_track_language(canonical, title) or canonical + if LangTags.is_valid(regional): + return regional + if LangTags.is_valid(canonical): + return canonical + return "und" if default_und else None + + +def normalize_track_language_from_track(track) -> str: + normalized = normalize_track_language( + getattr(track, "language", ""), + getattr(track, "title", None), + default_und=True, + ) + return normalized or "und" + + +def disposition_value( + *, + flag_default: bool | None, + flag_forced: bool | None, + flag_hearing_impaired: bool | None, + flag_visual_impaired: bool | None, + flag_original: bool | None, + flag_commentary: bool | None, + allow_partial: bool = False, +) -> str | None: + values = ( + flag_default, + flag_forced, + flag_hearing_impaired, + flag_visual_impaired, + flag_original, + flag_commentary, + ) + if all(v is None for v in values): + return None + if (not allow_partial) and any(v is None for v in values): + return None + + flags: list[str] = [] + if flag_default: + flags.append("default") + if flag_forced: + flags.append("forced") + if flag_hearing_impaired: + flags.append("hearing_impaired") + if flag_visual_impaired: + flags.append("visual_impaired") + if flag_original: + flags.append("original") + if flag_commentary: + flags.append("comment") + return "+".join(flags) if flags else "0" diff --git a/core/workflows/common/remux_postprocess.py b/core/workflows/common/remux_postprocess.py new file mode 100644 index 0000000..27c4684 --- /dev/null +++ b/core/workflows/common/remux_postprocess.py @@ -0,0 +1,50 @@ +"""Shared postprocess helpers formerly borrowed from the remux workflow facade.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Callable + +from core.workflows.common.chapters import ( + probe_media_duration_seconds, + write_ffmetadata_chapters, +) +from core.workflows.remux_mapping import resolve_mapped_tracks +from core.workflows.remux_models import RemuxConfig +from core.workflows.remux_sync import decide_strict_interleave_with_prescan + + +class RemuxPostprocessService: + """Small shared contract used by encode without depending on RemuxWorkflow.""" + + def __init__(self, *, ffprobe_bin: str) -> None: + self._ffprobe_bin = ffprobe_bin + + def set_ffprobe_bin(self, ffprobe_bin: str) -> None: + self._ffprobe_bin = ffprobe_bin + + def decide_strict_interleave_with_prescan( + self, + config: RemuxConfig, + *, + log_cb: Callable[[str, str], None], + ) -> bool: + return decide_strict_interleave_with_prescan( + config, + resolve_mapped_tracks=resolve_mapped_tracks, + log_cb=log_cb, + ) + + def probe_duration_seconds(self, source: Path) -> float | None: + return probe_media_duration_seconds(self._ffprobe_bin, source) + + @staticmethod + def write_ffmetadata_chapters( + entries: list, + out_dir: Path, + duration_s: float | None, + ) -> Path: + return write_ffmetadata_chapters(entries, out_dir, duration_s) + + +__all__ = ["RemuxPostprocessService"] diff --git a/core/workflows/common/timeline_sync.py b/core/workflows/common/timeline_sync.py new file mode 100644 index 0000000..250d1dc --- /dev/null +++ b/core/workflows/common/timeline_sync.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from collections.abc import Sequence +from pathlib import Path + +from core.workflows.common.track_types import TimelineMappedTrack + + +def needs_strict_interleave( + mapped_tracks: Sequence[TimelineMappedTrack], + *, + foreign_track_types: tuple[str, ...] = ("audio", "subtitle"), +) -> bool: + if len({int(mapped_track.source_file_index) for mapped_track in mapped_tracks}) < 2: + return False + + has_subtitle_output = any( + str(mapped_track.track.track_type) == "subtitle" + for mapped_track in mapped_tracks + ) + if not has_subtitle_output: + return False + + primary_video = next( + (mapped_track for mapped_track in mapped_tracks if str(mapped_track.track.track_type) == "video"), + None, + ) + if primary_video is None: + return False + + primary_source = int(primary_video.source_file_index) + return any( + str(mapped_track.track.track_type) in foreign_track_types + and int(mapped_track.source_file_index) != primary_source + for mapped_track in mapped_tracks + ) + + +def append_strict_interleave_mux_flags(cmd: list[str]) -> None: + cmd.extend(["-max_interleave_delta", "0"]) + cmd.extend(["-max_muxing_queue_size", "9999"]) + + +def append_sync_inputs( + cmd: list[str], + sync_inputs: list[Path | str], + *, + input_formats: list[str] | None = None, + default_format: str = "matroska", +) -> None: + for index, sync_input in enumerate(sync_inputs): + fmt = default_format + if input_formats is not None and index < len(input_formats): + fmt = str(input_formats[index] or default_format) + cmd.extend(["-f", fmt, "-i", str(sync_input)]) + + +def sync_cleanup_paths(sync_inputs: list[Path | str]) -> list[Path]: + return [path for path in sync_inputs if isinstance(path, Path)] diff --git a/core/workflows/common/track_types.py b/core/workflows/common/track_types.py new file mode 100644 index 0000000..d65711d --- /dev/null +++ b/core/workflows/common/track_types.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Protocol + + +class TrackType(str, Enum): + VIDEO = "video" + AUDIO = "audio" + SUBTITLE = "subtitle" + ATTACHMENT = "attachment" + + +@dataclass(frozen=True) +class TrackRef: + track_type: str + source_path: Path + stream_index: int + track_entry_id: str | None = None + + +@dataclass +class TrackOffset: + """Décalage temporel appliqué à une piste d'entrée (en millisecondes).""" + + track_type: str + source_path: Path + stream_index: int + offset_ms: int = 0 + + +@dataclass +class TrackMetaPatch: + """Édition de métadonnées/dispositions d'une piste de sortie.""" + + track_order: int + language: str = "" + title: str | None = None + flag_default: bool | None = None + flag_forced: bool | None = None + flag_hearing_impaired: bool | None = None + flag_visual_impaired: bool | None = None + flag_original: bool | None = None + flag_commentary: bool | None = None + + +TrackTimeOffset = TrackOffset +TrackMetaEdit = TrackMetaPatch + + +class TrackTypeCarrier(Protocol): + @property + def track_type(self) -> str: + ... + + +class TimelineMappedTrack(Protocol): + @property + def source_file_index(self) -> int: + ... + + @property + def track(self) -> TrackTypeCarrier: + ... diff --git a/core/workflows/encode/__init__.py b/core/workflows/encode/__init__.py index bb71a7e..f85abee 100644 --- a/core/workflows/encode/__init__.py +++ b/core/workflows/encode/__init__.py @@ -6,8 +6,8 @@ continue to work unchanged. """ -from core.workflows.encode.models import ( - QualityMode, +from core.workflows.encode.catalog import ( + AudioCodecSpec, SOFTWARE_VIDEO_CODECS, HARDWARE_VIDEO_CODECS, AUDIO_CODECS, @@ -19,9 +19,25 @@ QSV_PRESETS, AMF_PRESETS, TONEMAP_ALGORITHMS, + VideoCodecFamily, + VideoCodecSpec, + audio_codec_spec, + encoder_badge, + is_h264_video_codec, + is_hardware_video_codec, presets_for_codec, + supports_dynamic_hdr, + supports_force_8bit, + video_codec_family, + video_codec_spec, +) +from core.workflows.encode.models import ( + QualityMode, VideoEncodeSettings, AudioTrackSettings, + VideoTrackEncodePlan, + TrackMetaPatch, + TrackOffset, TrackMetaEdit, TrackTimeOffset, EncodeConfig, @@ -34,6 +50,9 @@ __all__ = [ "QualityMode", + "VideoCodecFamily", + "VideoCodecSpec", + "AudioCodecSpec", "SOFTWARE_VIDEO_CODECS", "HARDWARE_VIDEO_CODECS", "AUDIO_CODECS", @@ -46,8 +65,19 @@ "AMF_PRESETS", "TONEMAP_ALGORITHMS", "presets_for_codec", + "video_codec_spec", + "audio_codec_spec", + "video_codec_family", + "is_h264_video_codec", + "is_hardware_video_codec", + "supports_dynamic_hdr", + "supports_force_8bit", + "encoder_badge", "VideoEncodeSettings", "AudioTrackSettings", + "VideoTrackEncodePlan", + "TrackMetaPatch", + "TrackOffset", "TrackMetaEdit", "TrackTimeOffset", "EncodeConfig", diff --git a/core/workflows/encode/catalog.py b/core/workflows/encode/catalog.py new file mode 100644 index 0000000..1fcea34 --- /dev/null +++ b/core/workflows/encode/catalog.py @@ -0,0 +1,348 @@ +"""Shared codec catalogue for encode workflow, hardware detection and UI.""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum + +SOFTWARE_VIDEO_CODECS: list[tuple[str, str]] = [ + ("libx265", "x265 — HEVC (logiciel)"), + ("libx264", "x264 — H.264 (logiciel)"), + ("libsvtav1", "SVT-AV1 (logiciel)"), +] + +HARDWARE_VIDEO_CODECS: list[tuple[str, str]] = [ + ("hevc_nvenc", "NVENC — HEVC (NVIDIA)"), + ("hevc_amf", "AMF — HEVC (AMD-WIN)"), + ("hevc_vaapi", "VAAPI — HEVC (AMD)"), + ("hevc_qsv", "QSV — HEVC (Intel)"), + ("h264_nvenc", "NVENC — H.264 (NVIDIA)"), + ("h264_amf", "AMF — H.264 (AMD-WIN)"), + ("h264_vaapi", "VAAPI — H.264 (AMD)"), + ("h264_qsv", "QSV — H.264 (Intel)"), + ("av1_nvenc", "NVENC — AV1 (NVIDIA RTX 40+)"), + ("av1_amf", "AMF — AV1 (AMD RX 7000+)"), + ("av1_vaapi", "VAAPI — AV1 (AMD/Intel)"), + ("av1_qsv", "QSV — AV1 (Intel Arc/12e gen+)"), +] + +AUDIO_CODECS: list[tuple[str, str]] = [ + ("copy", "Copie (sans réencodage)"), + ("aac", "AAC"), + ("ac3", "AC-3 (Dolby Digital)"), + ("eac3", "EAC-3 (Dolby Digital+)"), + ("flac", "FLAC (sans perte)"), +] + +X265_PRESETS = [ + "ultrafast", "superfast", "veryfast", "faster", "fast", + "medium", "slow", "slower", "veryslow", "placebo", +] +X264_PRESETS = X265_PRESETS +SVTAV1_PRESETS = [str(i) for i in range(13)] +NVENC_PRESETS = ["p1", "p2", "p3", "p4", "p5", "p6", "p7", "slow", "medium", "fast", "hp", "hq"] +VAAPI_PRESETS = [str(i) for i in range(8)] +QSV_PRESETS = ["veryslow", "slower", "slow", "medium", "fast", "faster", "veryfast"] +AMF_PRESETS = ["quality", "balanced", "speed"] + +TONEMAP_ALGORITHMS = ["hable", "mobius", "reinhard", "gamma", "linear", "clip"] + +NVENC_VIDEO_CODECS: frozenset[str] = frozenset({"hevc_nvenc", "h264_nvenc", "av1_nvenc"}) +AMF_VIDEO_CODECS: frozenset[str] = frozenset({"hevc_amf", "h264_amf", "av1_amf"}) +QSV_VIDEO_CODECS: frozenset[str] = frozenset({"hevc_qsv", "h264_qsv", "av1_qsv"}) +VAAPI_VIDEO_CODECS: frozenset[str] = frozenset({"hevc_vaapi", "h264_vaapi", "av1_vaapi"}) +H264_VIDEO_CODECS: frozenset[str] = frozenset({"libx264", "h264_nvenc", "h264_amf", "h264_qsv", "h264_vaapi"}) +DYNAMIC_HDR_VIDEO_CODECS: frozenset[str] = frozenset({"copy", "libx265", "hevc_nvenc", "hevc_amf", "hevc_qsv", "hevc_vaapi"}) + +VIDEO_ENCODER_BADGES: dict[str, str] = { + "libx265": "x265", + "libx264": "x264", + "libsvtav1": "SVT-AV1", + "hevc_nvenc": "NVENC", + "h264_nvenc": "NVENC", + "av1_nvenc": "NVENC", + "hevc_amf": "AMF", + "h264_amf": "AMF", + "av1_amf": "AMF", + "hevc_qsv": "QSV", + "h264_qsv": "QSV", + "av1_qsv": "QSV", + "hevc_vaapi": "VAAPI", + "h264_vaapi": "VAAPI", + "av1_vaapi": "VAAPI", +} +VIDEO_HDR_BADGE_ORDER: tuple[str, ...] = ("HDR", "HLG", "DV", "10+", "SDR") + + +class VideoCodecFamily(str, Enum): + SOFTWARE = "software" + NVENC = "nvenc" + AMF = "amf" + QSV = "qsv" + VAAPI = "vaapi" + OTHER = "other" + + +@dataclass(frozen=True) +class VideoCodecSpec: + codec_id: str + label: str + family: VideoCodecFamily + presets: tuple[str, ...] + encoder_badge: str + supports_dynamic_hdr: bool = False + is_h264: bool = False + supports_force_8bit: bool = False + + @property + def is_hardware(self) -> bool: + return self.family is not VideoCodecFamily.SOFTWARE + + +@dataclass(frozen=True) +class AudioCodecSpec: + codec_id: str + label: str + passthrough: bool = False + lossless: bool = False + supports_bitrate: bool = True + supports_truehd_core_bsf: bool = False + + +VIDEO_CODEC_SPECS: dict[str, VideoCodecSpec] = { + "libx265": VideoCodecSpec( + codec_id="libx265", + label="x265 — HEVC (logiciel)", + family=VideoCodecFamily.SOFTWARE, + presets=tuple(X265_PRESETS), + encoder_badge="x265", + supports_dynamic_hdr=True, + ), + "libx264": VideoCodecSpec( + codec_id="libx264", + label="x264 — H.264 (logiciel)", + family=VideoCodecFamily.SOFTWARE, + presets=tuple(X264_PRESETS), + encoder_badge="x264", + is_h264=True, + supports_force_8bit=True, + ), + "libsvtav1": VideoCodecSpec( + codec_id="libsvtav1", + label="SVT-AV1 (logiciel)", + family=VideoCodecFamily.SOFTWARE, + presets=tuple(SVTAV1_PRESETS), + encoder_badge="SVT-AV1", + ), + "hevc_nvenc": VideoCodecSpec( + codec_id="hevc_nvenc", + label="NVENC — HEVC (NVIDIA)", + family=VideoCodecFamily.NVENC, + presets=tuple(NVENC_PRESETS), + encoder_badge="NVENC", + supports_dynamic_hdr=True, + ), + "hevc_amf": VideoCodecSpec( + codec_id="hevc_amf", + label="AMF — HEVC (AMD-WIN)", + family=VideoCodecFamily.AMF, + presets=tuple(AMF_PRESETS), + encoder_badge="AMF", + supports_dynamic_hdr=True, + ), + "hevc_vaapi": VideoCodecSpec( + codec_id="hevc_vaapi", + label="VAAPI — HEVC (AMD)", + family=VideoCodecFamily.VAAPI, + presets=tuple(VAAPI_PRESETS), + encoder_badge="VAAPI", + supports_dynamic_hdr=True, + ), + "hevc_qsv": VideoCodecSpec( + codec_id="hevc_qsv", + label="QSV — HEVC (Intel)", + family=VideoCodecFamily.QSV, + presets=tuple(QSV_PRESETS), + encoder_badge="QSV", + supports_dynamic_hdr=True, + ), + "h264_nvenc": VideoCodecSpec( + codec_id="h264_nvenc", + label="NVENC — H.264 (NVIDIA)", + family=VideoCodecFamily.NVENC, + presets=tuple(NVENC_PRESETS), + encoder_badge="NVENC", + is_h264=True, + supports_force_8bit=True, + ), + "h264_amf": VideoCodecSpec( + codec_id="h264_amf", + label="AMF — H.264 (AMD-WIN)", + family=VideoCodecFamily.AMF, + presets=tuple(AMF_PRESETS), + encoder_badge="AMF", + is_h264=True, + supports_force_8bit=True, + ), + "h264_vaapi": VideoCodecSpec( + codec_id="h264_vaapi", + label="VAAPI — H.264 (AMD)", + family=VideoCodecFamily.VAAPI, + presets=tuple(VAAPI_PRESETS), + encoder_badge="VAAPI", + is_h264=True, + supports_force_8bit=True, + ), + "h264_qsv": VideoCodecSpec( + codec_id="h264_qsv", + label="QSV — H.264 (Intel)", + family=VideoCodecFamily.QSV, + presets=tuple(QSV_PRESETS), + encoder_badge="QSV", + is_h264=True, + supports_force_8bit=True, + ), + "av1_nvenc": VideoCodecSpec( + codec_id="av1_nvenc", + label="NVENC — AV1 (NVIDIA RTX 40+)", + family=VideoCodecFamily.NVENC, + presets=tuple(NVENC_PRESETS), + encoder_badge="NVENC", + ), + "av1_amf": VideoCodecSpec( + codec_id="av1_amf", + label="AMF — AV1 (AMD RX 7000+)", + family=VideoCodecFamily.AMF, + presets=tuple(AMF_PRESETS), + encoder_badge="AMF", + ), + "av1_vaapi": VideoCodecSpec( + codec_id="av1_vaapi", + label="VAAPI — AV1 (AMD/Intel)", + family=VideoCodecFamily.VAAPI, + presets=tuple(VAAPI_PRESETS), + encoder_badge="VAAPI", + ), + "av1_qsv": VideoCodecSpec( + codec_id="av1_qsv", + label="QSV — AV1 (Intel Arc/12e gen+)", + family=VideoCodecFamily.QSV, + presets=tuple(QSV_PRESETS), + encoder_badge="QSV", + ), +} + +VIDEO_CODEC_FAMILY_MAP: dict[str, VideoCodecFamily] = { + codec_id: spec.family for codec_id, spec in VIDEO_CODEC_SPECS.items() +} + +AUDIO_CODEC_SPECS: dict[str, AudioCodecSpec] = { + "copy": AudioCodecSpec( + codec_id="copy", + label="Copie (sans réencodage)", + passthrough=True, + supports_bitrate=False, + supports_truehd_core_bsf=True, + ), + "aac": AudioCodecSpec(codec_id="aac", label="AAC"), + "ac3": AudioCodecSpec(codec_id="ac3", label="AC-3 (Dolby Digital)"), + "eac3": AudioCodecSpec(codec_id="eac3", label="EAC-3 (Dolby Digital+)"), + "flac": AudioCodecSpec( + codec_id="flac", + label="FLAC (sans perte)", + lossless=True, + supports_bitrate=False, + ), +} + + +def presets_for_codec(codec: str) -> list[str]: + spec = video_codec_spec(codec) + if spec is not None: + return list(spec.presets) + return X265_PRESETS + + +def is_h264_video_codec(codec: str) -> bool: + spec = video_codec_spec(codec) + return bool(spec is not None and spec.is_h264) + + +def supports_dynamic_hdr(codec: str) -> bool: + spec = video_codec_spec(codec) + if spec is not None: + return spec.supports_dynamic_hdr + return str(codec or "").strip().lower() in DYNAMIC_HDR_VIDEO_CODECS + + +def encoder_badge(codec: str) -> str: + spec = video_codec_spec(codec) + if spec is not None: + return spec.encoder_badge + normalized = str(codec or "").strip().lower() + return VIDEO_ENCODER_BADGES.get(normalized, normalized.upper()) + + +def video_codec_spec(codec: str) -> VideoCodecSpec | None: + normalized = str(codec or "").strip().lower() + return VIDEO_CODEC_SPECS.get(normalized) + + +def audio_codec_spec(codec: str) -> AudioCodecSpec | None: + normalized = str(codec or "").strip().lower() + return AUDIO_CODEC_SPECS.get(normalized) + + +def video_codec_family(codec: str) -> VideoCodecFamily: + spec = video_codec_spec(codec) + if spec is None: + return VideoCodecFamily.OTHER + return spec.family + + +def is_hardware_video_codec(codec: str) -> bool: + spec = video_codec_spec(codec) + return bool(spec is not None and spec.is_hardware) + + +def supports_force_8bit(codec: str) -> bool: + spec = video_codec_spec(codec) + return bool(spec is not None and spec.supports_force_8bit) + + +__all__ = [ + "VideoCodecFamily", + "VideoCodecSpec", + "AudioCodecSpec", + "SOFTWARE_VIDEO_CODECS", + "HARDWARE_VIDEO_CODECS", + "AUDIO_CODECS", + "X265_PRESETS", + "X264_PRESETS", + "SVTAV1_PRESETS", + "NVENC_PRESETS", + "VAAPI_PRESETS", + "QSV_PRESETS", + "AMF_PRESETS", + "TONEMAP_ALGORITHMS", + "NVENC_VIDEO_CODECS", + "AMF_VIDEO_CODECS", + "QSV_VIDEO_CODECS", + "VAAPI_VIDEO_CODECS", + "H264_VIDEO_CODECS", + "DYNAMIC_HDR_VIDEO_CODECS", + "VIDEO_ENCODER_BADGES", + "VIDEO_HDR_BADGE_ORDER", + "VIDEO_CODEC_SPECS", + "VIDEO_CODEC_FAMILY_MAP", + "AUDIO_CODEC_SPECS", + "presets_for_codec", + "is_h264_video_codec", + "supports_dynamic_hdr", + "encoder_badge", + "video_codec_spec", + "audio_codec_spec", + "video_codec_family", + "is_hardware_video_codec", + "supports_force_8bit", +] diff --git a/core/workflows/encode/domain/__init__.py b/core/workflows/encode/domain/__init__.py new file mode 100644 index 0000000..6ff2571 --- /dev/null +++ b/core/workflows/encode/domain/__init__.py @@ -0,0 +1,31 @@ +"""Encode domain services.""" + +from .codecs import ( + EncodeCodecDomainCallbacks, + audio_codec_args, + build_encoder_vf, + force_h264_8bit, + h264_8bit_pix_fmt_args, + hardware_input_args, + hdr_meta_args, + needs_ac3_51_downmix, + video_codec_args, + video_codec_args_bitrate, + video_codec_args_crf, + x265_params, +) + +__all__ = [ + "EncodeCodecDomainCallbacks", + "audio_codec_args", + "build_encoder_vf", + "force_h264_8bit", + "h264_8bit_pix_fmt_args", + "hardware_input_args", + "hdr_meta_args", + "needs_ac3_51_downmix", + "video_codec_args", + "video_codec_args_bitrate", + "video_codec_args_crf", + "x265_params", +] diff --git a/core/workflows/encode/domain/codecs.py b/core/workflows/encode/domain/codecs.py new file mode 100644 index 0000000..17cd173 --- /dev/null +++ b/core/workflows/encode/domain/codecs.py @@ -0,0 +1,395 @@ +"""Codec domain helpers extracted from EncodeWorkflow.""" + +from __future__ import annotations + +from dataclasses import dataclass + +from core.workflows.encode.catalog import ( + AMF_VIDEO_CODECS, + NVENC_VIDEO_CODECS, + QSV_VIDEO_CODECS, + VAAPI_VIDEO_CODECS, + is_h264_video_codec, +) +from core.workflows.encode.models import ( + AudioTrackSettings, + QualityMode, + VideoEncodeSettings, + normalize_audio_bitrate_kbps, +) + + +@dataclass(frozen=True) +class EncodeCodecDomainCallbacks: + platform: str + vaapi_device: str | None = None + qsv_device: str | None = None + amf_device: str | None = None + nvenc_device: str | None = None + + +def force_h264_8bit(video: VideoEncodeSettings) -> bool: + return bool(getattr(video, "force_8bit", False)) and is_h264_video_codec(video.codec) + + +def h264_8bit_pix_fmt_args(video: VideoEncodeSettings) -> list[str]: + if not force_h264_8bit(video): + return [] + if video.codec == "libx264": + return ["-pix_fmt", "yuv420p"] + return ["-pix_fmt", "nv12"] + + +def x265_params(video: VideoEncodeSettings) -> str: + parts: list[str] = [] + if video.extra_params: + parts.append(video.extra_params.strip(":")) + if video.inject_hdr_meta and not video.tonemap_to_sdr: + if video.master_display: + parts.append(f"master-display={video.master_display}") + if video.max_cll: + parts.append(f"max-cll={video.max_cll}") + return ":".join(part for part in parts if part) + + +def video_codec_args(video: VideoEncodeSettings, bitrate_kbps: int, *, callbacks: EncodeCodecDomainCallbacks) -> list[str]: + if video.quality_mode == QualityMode.CRF: + return video_codec_args_crf(video, callbacks=callbacks) + return video_codec_args_bitrate(video, bitrate_kbps, callbacks=callbacks) + + +def video_codec_args_crf(video: VideoEncodeSettings, *, callbacks: EncodeCodecDomainCallbacks) -> list[str]: + match video.codec: + case "copy": + return ["-c:v", "copy"] + case "libx265": + args = ["-c:v", "libx265", "-crf", str(video.crf), "-preset", video.preset] + x265 = x265_params(video) + if x265: + args.extend(["-x265-params", x265]) + return args + case "libx264": + return [ + "-c:v", "libx264", "-crf", str(video.crf), "-preset", video.preset, + *h264_8bit_pix_fmt_args(video), + ] + case "libsvtav1": + args = ["-c:v", "libsvtav1", "-crf", str(video.crf), "-preset", video.preset] + if video.extra_params: + args.extend(["-svtav1-params", video.extra_params]) + return args + case "hevc_nvenc": + return [ + "-c:v", "hevc_nvenc", "-rc:v", "vbr", "-cq:v", str(video.crf), "-preset:v", video.preset, + *nvenc_device_args(callbacks), + ] + case "hevc_amf": + args = ["-c:v", "hevc_amf", "-rc", "cqp", "-qp_p", str(video.crf), "-qp_i", str(video.crf)] + if video.preset: + args.extend(["-quality", video.preset]) + return args + case "hevc_qsv": + args = ["-c:v", "hevc_qsv", "-global_quality", str(video.crf), "-look_ahead", "1", "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + return args + case "hevc_vaapi": + return [ + "-c:v", "hevc_vaapi", "-rc_mode", "CQP", "-qp", str(video.crf), + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + ] + case "h264_nvenc": + return [ + "-c:v", "h264_nvenc", "-rc:v", "vbr", "-cq:v", str(video.crf), "-preset:v", video.preset, + *nvenc_device_args(callbacks), + *h264_8bit_pix_fmt_args(video), + ] + case "h264_amf": + args = ["-c:v", "h264_amf", "-rc", "cqp", "-qp_p", str(video.crf), "-qp_i", str(video.crf)] + if video.preset: + args.extend(["-quality", video.preset]) + args.extend(h264_8bit_pix_fmt_args(video)) + return args + case "h264_qsv": + args = ["-c:v", "h264_qsv", "-global_quality", str(video.crf), "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + args.extend(h264_8bit_pix_fmt_args(video)) + return args + case "h264_vaapi": + return [ + "-c:v", "h264_vaapi", "-rc_mode", "CQP", "-qp", str(video.crf), + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + *h264_8bit_pix_fmt_args(video), + ] + case "av1_nvenc": + return [ + "-c:v", "av1_nvenc", "-rc:v", "vbr", "-cq:v", str(video.crf), "-preset:v", video.preset, + *nvenc_device_args(callbacks), + ] + case "av1_amf": + args = ["-c:v", "av1_amf", "-rc", "cqp", "-qp_p", str(video.crf), "-qp_i", str(video.crf)] + if video.preset: + args.extend(["-quality", video.preset]) + return args + case "av1_qsv": + args = ["-c:v", "av1_qsv", "-global_quality", str(video.crf), "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + return args + case "av1_vaapi": + return [ + "-c:v", "av1_vaapi", "-rc_mode", "CQP", "-qp", str(video.crf), + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + ] + case _: + return ["-c:v", video.codec, "-crf", str(video.crf)] + + +def video_codec_args_bitrate( + video: VideoEncodeSettings, + bitrate_kbps: int, + *, + callbacks: EncodeCodecDomainCallbacks, +) -> list[str]: + match video.codec: + case "copy": + return ["-c:v", "copy"] + case "libx265": + args = ["-c:v", "libx265", "-b:v", f"{bitrate_kbps}k", "-preset", video.preset] + x265 = x265_params(video) + if x265: + args.extend(["-x265-params", x265]) + return args + case "libx264": + return [ + "-c:v", "libx264", "-b:v", f"{bitrate_kbps}k", "-preset", video.preset, + *h264_8bit_pix_fmt_args(video), + ] + case "libsvtav1": + args = ["-c:v", "libsvtav1", "-b:v", f"{bitrate_kbps}k", "-preset", video.preset] + if video.extra_params: + args.extend(["-svtav1-params", video.extra_params]) + return args + case "hevc_nvenc": + return [ + "-c:v", "hevc_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", video.preset, + *nvenc_device_args(callbacks), + ] + case "hevc_amf": + args = ["-c:v", "hevc_amf", "-b:v", f"{bitrate_kbps}k"] + if video.preset: + args.extend(["-quality", video.preset]) + return args + case "hevc_qsv": + args = ["-c:v", "hevc_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + return args + case "hevc_vaapi": + return [ + "-c:v", "hevc_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + ] + case "h264_nvenc": + return [ + "-c:v", "h264_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", video.preset, + *nvenc_device_args(callbacks), + *h264_8bit_pix_fmt_args(video), + ] + case "h264_amf": + args = ["-c:v", "h264_amf", "-b:v", f"{bitrate_kbps}k"] + if video.preset: + args.extend(["-quality", video.preset]) + args.extend(h264_8bit_pix_fmt_args(video)) + return args + case "h264_qsv": + args = ["-c:v", "h264_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + args.extend(h264_8bit_pix_fmt_args(video)) + return args + case "h264_vaapi": + return [ + "-c:v", "h264_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + *h264_8bit_pix_fmt_args(video), + ] + case "av1_nvenc": + return [ + "-c:v", "av1_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", video.preset, + *nvenc_device_args(callbacks), + ] + case "av1_amf": + args = ["-c:v", "av1_amf", "-b:v", f"{bitrate_kbps}k"] + if video.preset: + args.extend(["-quality", video.preset]) + return args + case "av1_qsv": + args = ["-c:v", "av1_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] + if video.preset: + args.extend(["-preset", video.preset]) + return args + case "av1_vaapi": + return [ + "-c:v", "av1_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", + "-compression_level", (video.preset or "4"), + "-async_depth", "4", + ] + case _: + return ["-c:v", video.codec, "-b:v", f"{bitrate_kbps}k"] + + +def build_encoder_vf(video: VideoEncodeSettings, *, callbacks: EncodeCodecDomainCallbacks) -> str: + vf = build_vf(video) + force_8bit = force_h264_8bit(video) + if video.codec not in VAAPI_VIDEO_CODECS: + if ( + callbacks.platform == "win32" + and video.codec in AMF_VIDEO_CODECS + and callbacks.amf_device is not None + and (video.tonemap_to_sdr or force_8bit) + ): + amf_upload = "format=nv12,hwupload" + if force_8bit: + if vf and "format=yuv420p" not in {part.strip() for part in vf.split(",")}: + vf = f"{vf},format=yuv420p" + elif not vf: + vf = "format=yuv420p" + return f"{vf},{amf_upload}" if vf else amf_upload + if force_8bit: + force_8bit_filter = "format=yuv420p" + if vf and force_8bit_filter in {part.strip() for part in vf.split(",")}: + return vf + return f"{vf},{force_8bit_filter}" if vf else force_8bit_filter + return vf + if video.tonemap_to_sdr or force_8bit: + vaapi_upload = "format=nv12,hwupload" + return f"{vf},{vaapi_upload}" if vf else vaapi_upload + return vf + + +def build_vf(video: VideoEncodeSettings) -> str: + if not video.tonemap_to_sdr: + return "" + algo = video.tonemap_algorithm or "hable" + return ( + "zscale=transfer=linear:npl=100," + "format=gbrpf32le," + "zscale=primaries=bt709," + f"tonemap=tonemap={algo}:desat=0," + "zscale=transfer=bt709:matrix=bt709:range=tv," + "format=yuv420p" + ) + + +def hardware_input_args(video: VideoEncodeSettings, *, callbacks: EncodeCodecDomainCallbacks) -> list[str]: + args: list[str] = [] + tonemap = bool(video.tonemap_to_sdr) + force_8bit = force_h264_8bit(video) + + if video.codec in VAAPI_VIDEO_CODECS: + if callbacks.vaapi_device: + args.extend(["-vaapi_device", callbacks.vaapi_device]) + if not tonemap and not force_8bit: + args.extend(["-hwaccel", "vaapi", "-hwaccel_output_format", "vaapi"]) + return args + + if video.codec in QSV_VIDEO_CODECS: + if callbacks.qsv_device: + args.extend(["-qsv_device", callbacks.qsv_device]) + if tonemap or force_8bit: + return args + args.extend(["-hwaccel", "qsv", "-hwaccel_output_format", "qsv"]) + return args + + if video.codec in AMF_VIDEO_CODECS and callbacks.platform == "win32": + if callbacks.amf_device: + args.extend([ + "-init_hw_device", f"d3d11va=mre_amf:{callbacks.amf_device}", + "-filter_hw_device", "mre_amf", + ]) + if tonemap or force_8bit: + return args + if callbacks.amf_device: + args.extend([ + "-hwaccel", "d3d11va", + "-hwaccel_device", "mre_amf", + "-hwaccel_output_format", "d3d11", + ]) + else: + args.extend(["-hwaccel", "d3d11va", "-hwaccel_output_format", "d3d11"]) + return args + + if tonemap or force_8bit: + return args + + if video.codec in NVENC_VIDEO_CODECS: + if callbacks.platform == "win32" and callbacks.nvenc_device: + args.extend(["-hwaccel_device", callbacks.nvenc_device]) + args.extend(["-hwaccel", "cuda", "-hwaccel_output_format", "cuda"]) + + return args + + +def nvenc_device_args(callbacks: EncodeCodecDomainCallbacks) -> list[str]: + if callbacks.platform != "win32" or not callbacks.nvenc_device: + return [] + return ["-gpu", callbacks.nvenc_device] + + +def hdr_meta_args(video: VideoEncodeSettings) -> list[str]: + if video.codec in ("copy", "libx264", "h264_nvenc", "h264_amf", "h264_qsv"): + return [] + args = ["-color_primaries", "bt2020", "-color_trc", "smpte2084", "-colorspace", "bt2020nc"] + if video.codec == "hevc_nvenc": + if video.master_display: + args.extend(["-master_display", video.master_display]) + if video.max_cll: + args.extend(["-max_cll", video.max_cll]) + return args + + +def audio_codec_args(out_idx: int, audio: AudioTrackSettings) -> list[str]: + args: list[str] = [] + needs_downmix = needs_ac3_51_downmix(audio) + bitrate_kbps = normalize_audio_bitrate_kbps( + audio.codec, + audio.bitrate_kbps, + audio.input_channels, + None, + audio.input_channel_layout, + ) + match audio.codec: + case "copy": + args.extend([f"-c:a:{out_idx}", "copy"]) + if audio.extract_truehd_core: + args.extend([f"-bsf:a:{out_idx}", "truehd_core"]) + case "aac": + args.extend([f"-c:a:{out_idx}", "aac", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) + case "ac3": + args.extend([f"-c:a:{out_idx}", "ac3", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) + case "eac3": + args.extend([f"-c:a:{out_idx}", "eac3", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) + case "flac": + args.extend([f"-c:a:{out_idx}", "flac"]) + case _: + args.extend([f"-c:a:{out_idx}", audio.codec]) + if needs_downmix: + args.extend([f"-ac:a:{out_idx}", "6", f"-channel_layout:a:{out_idx}", "5.1"]) + return args + + +def needs_ac3_51_downmix(audio: AudioTrackSettings) -> bool: + if audio.codec not in {"ac3", "eac3"}: + return False + if (audio.input_channels or 0) >= 8: + return True + layout = (audio.input_channel_layout or "").lower() + return "7.1" in layout + diff --git a/core/workflows/encode/hardware.py b/core/workflows/encode/hardware.py index 3a4a65c..63fc320 100644 --- a/core/workflows/encode/hardware.py +++ b/core/workflows/encode/hardware.py @@ -16,14 +16,23 @@ from pathlib import Path from core.subprocess_utils import subprocess_text_kwargs, subprocess_windows_no_window_kwargs -from core.workflows.encode.models import HARDWARE_VIDEO_CODECS, SOFTWARE_VIDEO_CODECS +from core.workflows.encode.catalog import ( + AMF_VIDEO_CODECS as _AMF_CODECS, + HARDWARE_VIDEO_CODECS, + NVENC_VIDEO_CODECS as _NVENC_CODECS, + QSV_VIDEO_CODECS as _QSV_CODECS, + SOFTWARE_VIDEO_CODECS, + VAAPI_VIDEO_CODECS as _VAAPI_CODECS, +) +from core.workflows.encode.hw_devices import ( + select_linux_hwaccel_device, + select_windows_hwaccel_device, +) _NULLSRC = "nullsrc=s=256x256:r=25:d=0.1" # ≥ 1 frame garantie (25fps × 0.1s) _GENERIC_HW_FILTER = "format=nv12" _VAAPI_FILTER = "format=nv12,hwupload" -_NVENC_CODECS = {"hevc_nvenc", "h264_nvenc", "av1_nvenc"} -_VAAPI_CODECS = {"hevc_vaapi", "h264_vaapi", "av1_vaapi"} class HardwareEncoderDetector: @@ -50,6 +59,7 @@ def __init__(self) -> None: self._system_ffmpeg_cached = False self._vaapi_device_cache: str | None = None self._vaapi_device_cached = False + self._qsv_device_cache: dict[str, str | None] = {} @staticmethod def _resolve_ffmpeg(ffmpeg_bin: str) -> str: @@ -128,6 +138,17 @@ def _cached_vaapi_device(self) -> str | None: self._vaapi_device_cached = True return device + def _cached_qsv_device(self, ffmpeg_bin: str) -> str | None: + """Retourne le device QSV avec cache par instance et binaire ffmpeg.""" + resolved = self._resolve_ffmpeg(ffmpeg_bin) + with self._cache_lock: + if resolved in self._qsv_device_cache: + return self._qsv_device_cache[resolved] + device = self._qsv_device(ffmpeg_bin) + with self._cache_lock: + self._qsv_device_cache[resolved] = device + return device + def detect_software(self, ffmpeg_bin: str = "ffmpeg") -> set[str]: """ Retourne les codecs SOFTWARE disponibles. @@ -170,14 +191,13 @@ def detect(self, ffmpeg_bin: str = "ffmpeg") -> tuple[set[str], str]: return set(), ffmpeg_bin resolved = self._resolve_ffmpeg(ff) - vaapi_device = self._cached_vaapi_device() available: set[str] = set() nvenc_compiled = compiled & _NVENC_CODECS if nvenc_compiled: - available |= self._detect_nvenc(resolved, nvenc_compiled, vaapi_device) + available |= self._detect_nvenc(resolved, nvenc_compiled) - available |= self._probe_codecs(resolved, compiled - _NVENC_CODECS, vaapi_device) + available |= self._probe_codecs(resolved, compiled - _NVENC_CODECS) return available, ff @@ -232,7 +252,6 @@ def _detect_nvenc( self, ffmpeg_bin: str, compiled: set[str], - vaapi_device: str | None, ) -> set[str]: """ Garde la logique historique Linux/macOS pour NVIDIA. @@ -247,23 +266,22 @@ def _detect_nvenc( # Windows (.exe) depuis un host non-win32 ; on force alors la logique # "probe FFmpeg réel" sans dépendre de nvidia-smi. if sys.platform == "win32" or ffmpeg_bin.lower().endswith(".exe"): - return self._probe_codecs(ffmpeg_bin, compiled, vaapi_device) + return self._probe_codecs(ffmpeg_bin, compiled) if self._nvidia_ok(): return set(compiled) - return self._probe_codecs(ffmpeg_bin, compiled, vaapi_device) + return self._probe_codecs(ffmpeg_bin, compiled) def _probe_codecs( self, ffmpeg_bin: str, codec_ids: set[str], - vaapi_device: str | None, ) -> set[str]: """Probe une liste de codecs et retourne ceux qui sont utilisables.""" jobs: list[tuple[str, list[str]]] = [] for codec_id in codec_ids: - cmd = self._probe_command(ffmpeg_bin, codec_id, vaapi_device) + cmd = self._probe_command(ffmpeg_bin, codec_id) if cmd is None: continue jobs.append((codec_id, cmd)) @@ -315,7 +333,6 @@ def _probe_command( self, ffmpeg_bin: str, codec_id: str, - vaapi_device: str | None, ) -> list[str] | None: """ Construit la commande de probe adaptee a la famille d'encodeur. @@ -331,6 +348,7 @@ def _probe_command( ] if codec_id in _VAAPI_CODECS: + vaapi_device = self._cached_vaapi_device() if vaapi_device is None: return None return [ @@ -343,6 +361,50 @@ def _probe_command( "-f", "null", "-", ] + if codec_id in _QSV_CODECS: + qsv_device = self._cached_qsv_device(ffmpeg_bin) + if qsv_device is None: + return None + return [ + *base_cmd, + "-qsv_device", qsv_device, + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _GENERIC_HW_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-f", "null", "-", + ] + + if codec_id in _AMF_CODECS and self._is_windows_runtime(ffmpeg_bin): + amf_device = self._amf_device(ffmpeg_bin, codec_id) + if amf_device is None: + return None + device_name = f"mre_amf_probe{amf_device}" + return [ + *base_cmd, + "-init_hw_device", f"d3d11va={device_name}:{amf_device}", + "-filter_hw_device", device_name, + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _VAAPI_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-f", "null", "-", + ] + + if codec_id in _NVENC_CODECS and self._is_windows_runtime(ffmpeg_bin): + nvenc_device = self._nvenc_device(ffmpeg_bin, codec_id) + if nvenc_device is None: + return None + return [ + *base_cmd, + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _GENERIC_HW_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-gpu", nvenc_device, + "-f", "null", "-", + ] + return [ *base_cmd, "-f", "lavfi", "-i", _NULLSRC, @@ -353,10 +415,25 @@ def _probe_command( ] @staticmethod - def _vaapi_device() -> str | None: - """Retourne le chemin du premier device VAAPI disponible, ou None.""" - for i in range(8): - node = Path(f"/dev/dri/renderD{128 + i}") - if node.exists(): - return str(node) - return None + def _is_windows_runtime(ffmpeg_bin: str) -> bool: + return sys.platform == "win32" or ffmpeg_bin.lower().endswith(".exe") + + def _vaapi_device(self) -> str | None: + """Retourne le render node Linux ciblé pour VAAPI, ou None.""" + return select_linux_hwaccel_device("hevc_vaapi") + + def _qsv_device(self, ffmpeg_bin: str, codec_id: str = "hevc_qsv") -> str | None: + """Retourne le device QSV ciblé selon l'OS, ou None.""" + if self._is_windows_runtime(ffmpeg_bin): + return select_windows_hwaccel_device(codec_id, ffmpeg_bin=self._resolve_ffmpeg(ffmpeg_bin)) + return select_linux_hwaccel_device("hevc_qsv") + + def _amf_device(self, ffmpeg_bin: str, codec_id: str = "hevc_amf") -> str | None: + if not self._is_windows_runtime(ffmpeg_bin): + return None + return select_windows_hwaccel_device(codec_id, ffmpeg_bin=self._resolve_ffmpeg(ffmpeg_bin)) + + def _nvenc_device(self, ffmpeg_bin: str, codec_id: str = "hevc_nvenc") -> str | None: + if not self._is_windows_runtime(ffmpeg_bin): + return None + return select_windows_hwaccel_device(codec_id, ffmpeg_bin=self._resolve_ffmpeg(ffmpeg_bin)) diff --git a/core/workflows/encode/hw_devices.py b/core/workflows/encode/hw_devices.py new file mode 100644 index 0000000..5a0329a --- /dev/null +++ b/core/workflows/encode/hw_devices.py @@ -0,0 +1,233 @@ +""" +Helpers pour sélectionner explicitement un device hardware par famille de codec. +""" + +from __future__ import annotations + +import subprocess +import sys +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path +from typing import Iterable + +from core.subprocess_utils import subprocess_windows_no_window_kwargs +from core.workflows.encode.catalog import ( + AMF_VIDEO_CODECS as _AMF_CODECS, + NVENC_VIDEO_CODECS as _NVENC_CODECS, + QSV_VIDEO_CODECS as _QSV_CODECS, + VAAPI_VIDEO_CODECS as _VAAPI_CODECS, +) + +_AMD_VENDOR_IDS = {"0x1002"} +_INTEL_VENDOR_IDS = {"0x8086"} +_NVIDIA_VENDOR_IDS = {"0x10de"} + +_AMD_DRIVERS = {"amdgpu", "radeon"} +_INTEL_DRIVERS = {"i915", "xe"} +_NVIDIA_DRIVERS = {"nvidia", "nouveau"} + +_NULLSRC = "nullsrc=s=256x256:r=25:d=0.1" +_GENERIC_HW_FILTER = "format=nv12" +_HWUPLOAD_FILTER = "format=nv12,hwupload" + + +@dataclass(frozen=True) +class LinuxRenderNodeInfo: + path: str + vendor_id: str | None = None + driver: str | None = None + + +def _read_text(path: Path) -> str | None: + try: + value = path.read_text(encoding="utf-8").strip() + except OSError: + return None + return value or None + + +def _driver_name(path: Path) -> str | None: + try: + return path.resolve().name.strip() or None + except OSError: + return None + + +def _vendor_kind(node: LinuxRenderNodeInfo) -> str: + vendor_id = str(node.vendor_id or "").strip().lower() + driver = str(node.driver or "").strip().lower() + + if vendor_id in _AMD_VENDOR_IDS or driver in _AMD_DRIVERS: + return "amd" + if vendor_id in _INTEL_VENDOR_IDS or driver in _INTEL_DRIVERS: + return "intel" + if vendor_id in _NVIDIA_VENDOR_IDS or driver in _NVIDIA_DRIVERS: + return "nvidia" + if vendor_id or driver: + return "other" + return "unknown" + + +def _first_node(nodes: Iterable[LinuxRenderNodeInfo], *kinds: str) -> LinuxRenderNodeInfo | None: + wanted = set(kinds) + for node in nodes: + if _vendor_kind(node) in wanted: + return node + return None + + +@lru_cache(maxsize=1) +def detect_linux_render_nodes() -> tuple[LinuxRenderNodeInfo, ...]: + if not sys.platform.startswith("linux"): + return () + + sysfs_root = Path("/sys/class/drm") + dev_root = Path("/dev/dri") + nodes: list[LinuxRenderNodeInfo] = [] + + for render_node in sorted(sysfs_root.glob("renderD*")): + device_path = dev_root / render_node.name + if not device_path.exists(): + continue + nodes.append( + LinuxRenderNodeInfo( + path=str(device_path), + vendor_id=_read_text(render_node / "device" / "vendor"), + driver=_driver_name(render_node / "device" / "driver"), + ) + ) + + return tuple(nodes) + + +def select_linux_hwaccel_device( + codec_id: str, + *, + nodes: tuple[LinuxRenderNodeInfo, ...] | None = None, +) -> str | None: + """ + Retourne le render node DRM à utiliser pour `codec_id`, ou None. + + Politique : + - QSV -> Intel uniquement. + - VAAPI -> AMD en priorité, puis Intel, puis tout GPU non-NVIDIA. + Cela évite de tomber sur un render node NVIDIA en multi-GPU quand + l'utilisateur attend en pratique un backend VAAPI Mesa/Intel. + """ + current_nodes = detect_linux_render_nodes() if nodes is None else tuple(nodes) + if not current_nodes: + return None + + if codec_id in _QSV_CODECS: + node = _first_node(current_nodes, "intel") + return node.path if node is not None else None + + if codec_id in _VAAPI_CODECS: + node = ( + _first_node(current_nodes, "amd") + or _first_node(current_nodes, "intel") + or _first_node(current_nodes, "other") + ) + if node is not None: + return node.path + if len(current_nodes) == 1: + return current_nodes[0].path + return None + + return None + + +def _is_windows_runtime(ffmpeg_bin: str) -> bool: + return sys.platform == "win32" or ffmpeg_bin.lower().endswith(".exe") + + +def _probe_windows_adapter(cmd: list[str]) -> bool: + try: + result = subprocess.run( + cmd, + capture_output=True, + check=False, + timeout=5, + **subprocess_windows_no_window_kwargs(), + ) + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + return False + return result.returncode == 0 + + +def _windows_probe_command(ffmpeg_bin: str, codec_id: str, adapter_index: int) -> list[str] | None: + base_cmd = [ffmpeg_bin, "-hide_banner", "-loglevel", "error"] + + if codec_id in _NVENC_CODECS: + return [ + *base_cmd, + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _GENERIC_HW_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-gpu", str(adapter_index), + "-f", "null", "-", + ] + + if codec_id in _QSV_CODECS: + return [ + *base_cmd, + "-qsv_device", str(adapter_index), + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _GENERIC_HW_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-f", "null", "-", + ] + + if codec_id in _AMF_CODECS: + device_name = f"mre_amf_probe{adapter_index}" + return [ + *base_cmd, + "-init_hw_device", f"d3d11va={device_name}:{adapter_index}", + "-filter_hw_device", device_name, + "-f", "lavfi", "-i", _NULLSRC, + "-vf", _HWUPLOAD_FILTER, + "-frames:v", "1", + "-c:v", codec_id, + "-f", "null", "-", + ] + + return None + + +@lru_cache(maxsize=64) +def select_windows_hwaccel_device( + codec_id: str, + *, + ffmpeg_bin: str = "ffmpeg", + max_adapters: int = 8, +) -> str | None: + """ + Retourne l'index d'adaptateur Windows a utiliser pour `codec_id`, ou None. + + La resolution est basee sur des probes FFmpeg reels, ce qui evite de + supposer que l'ordre WMI/DirectX/CUDA est identique. + """ + if not _is_windows_runtime(ffmpeg_bin): + return None + + if codec_id not in (_NVENC_CODECS | _QSV_CODECS | _AMF_CODECS): + return None + + for adapter_index in range(max(1, int(max_adapters))): + cmd = _windows_probe_command(ffmpeg_bin, codec_id, adapter_index) + if cmd is None: + return None + if _probe_windows_adapter(cmd): + return str(adapter_index) + return None + + +__all__ = [ + "LinuxRenderNodeInfo", + "detect_linux_render_nodes", + "select_linux_hwaccel_device", + "select_windows_hwaccel_device", +] diff --git a/core/workflows/encode/models.py b/core/workflows/encode/models.py index 22a6161..ebba918 100644 --- a/core/workflows/encode/models.py +++ b/core/workflows/encode/models.py @@ -17,6 +17,32 @@ from dataclasses import dataclass, field from enum import Enum from pathlib import Path +from typing import Protocol + +from core.workflows.encode.catalog import ( + AMF_PRESETS, + AUDIO_CODECS, + HARDWARE_VIDEO_CODECS, + NVENC_PRESETS, + QSV_PRESETS, + SOFTWARE_VIDEO_CODECS, + SVTAV1_PRESETS, + TONEMAP_ALGORITHMS, + VAAPI_PRESETS, + X264_PRESETS, + X265_PRESETS, + presets_for_codec, +) +from core.workflows.common.track_types import TrackMetaEdit, TrackMetaPatch, TrackTimeOffset, TrackOffset + + +class ChapterEntryLike(Protocol): + timecode_s: float + name: str + + +SubtitleTrackRef = tuple[Path, int] +AttachmentStreamRef = tuple[Path, int] # ============================================================================= @@ -31,36 +57,6 @@ class QualityMode(str, Enum): def label(self) -> str: return {"crf": "CRF", "bitrate": "Débit (kbps)", "size": "Taille cible (Mo)"}[self.value] - -SOFTWARE_VIDEO_CODECS: list[tuple[str, str]] = [ - ("libx265", "x265 — HEVC (logiciel)"), - ("libx264", "x264 — H.264 (logiciel)"), - ("libsvtav1", "SVT-AV1 (logiciel)"), -] - -HARDWARE_VIDEO_CODECS: list[tuple[str, str]] = [ - ("hevc_nvenc", "NVENC — HEVC (NVIDIA)"), - ("hevc_amf", "AMF — HEVC (AMD-WIN)"), - ("hevc_vaapi", "VAAPI — HEVC (AMD)"), - ("hevc_qsv", "QSV — HEVC (Intel)"), - ("h264_nvenc", "NVENC — H.264 (NVIDIA)"), - ("h264_amf", "AMF — H.264 (AMD-WIN)"), - ("h264_vaapi", "VAAPI — H.264 (AMD)"), - ("h264_qsv", "QSV — H.264 (Intel)"), - ("av1_nvenc", "NVENC — AV1 (NVIDIA RTX 40+)"), - ("av1_amf", "AMF — AV1 (AMD RX 7000+)"), - ("av1_vaapi", "VAAPI — AV1 (AMD/Intel)"), - ("av1_qsv", "QSV — AV1 (Intel Arc/12e gen+)"), -] - -AUDIO_CODECS: list[tuple[str, str]] = [ - ("copy", "Copie (sans réencodage)"), - ("aac", "AAC"), - ("ac3", "AC-3 (Dolby Digital)"), - ("eac3", "EAC-3 (Dolby Digital+)"), - ("flac", "FLAC (sans perte)"), -] - AC3_STANDARD_BITRATES_KBPS: list[int] = [ 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640, @@ -176,37 +172,6 @@ def normalize_audio_bitrate_kbps( return choices[-1] return min(choices, key=lambda choice: abs(choice - bitrate)) -X265_PRESETS = ["ultrafast", "superfast", "veryfast", "faster", "fast", - "medium", "slow", "slower", "veryslow", "placebo"] -X264_PRESETS = X265_PRESETS -SVTAV1_PRESETS = [str(i) for i in range(13)] # 0 = qualité max, 12 = vitesse max -NVENC_PRESETS = ["p1", "p2", "p3", "p4", "p5", "p6", "p7", - "slow", "medium", "fast", "hp", "hq"] -# VAAPI compression_level : 0 = meilleure qualité, 7 = plus rapide -VAAPI_PRESETS = [str(i) for i in range(8)] -# QSV preset : noms équivalents aux x264 presets -QSV_PRESETS = ["veryslow", "slower", "slow", "medium", "fast", "faster", "veryfast"] -# AMF quality : balanced est un bon compromis -AMF_PRESETS = ["quality", "balanced", "speed"] - -TONEMAP_ALGORITHMS = ["hable", "mobius", "reinhard", "gamma", "linear", "clip"] - - -def presets_for_codec(codec: str) -> list[str]: - """Retourne la liste de presets appropriée pour le codec donné.""" - if codec == "libsvtav1": - return SVTAV1_PRESETS - if codec in ("hevc_nvenc", "h264_nvenc", "av1_nvenc"): - return NVENC_PRESETS - if codec in ("hevc_vaapi", "h264_vaapi", "av1_vaapi"): - return VAAPI_PRESETS - if codec in ("hevc_qsv", "h264_qsv", "av1_qsv"): - return QSV_PRESETS - if codec in ("hevc_amf", "h264_amf", "av1_amf"): - return AMF_PRESETS - return X265_PRESETS # libx265, libx264 - - # ============================================================================= # Dataclasses # ============================================================================= @@ -224,10 +189,17 @@ class VideoEncodeSettings: target_size_mb: int = 4000 preset: str = "slow" extra_params: str = "" # x265-params / svtav1-params passthrough + # Précheck UI: forcer une sortie 8-bit pour les encodeurs H.264 + # quand la source est > 8-bit (appliqué piste par piste). + force_8bit: bool = False # HDR statique inject_hdr_meta: bool = False master_display: str = "" # ex. "G(8500,39850)B(6550,2300)R(35400,14600)WP(15635,16450)L(40000000,50)" max_cll: str = "" # ex. "1000,400" + # HDR dynamique + copy_dv: bool = False + copy_hdr10plus: bool = False + dovi_profile: str = "0" # Tone mapping tonemap_to_sdr: bool = False tonemap_algorithm: str = "hable" @@ -246,35 +218,14 @@ class AudioTrackSettings: track_entry_id: str | None = None # GUID de l'objet TrackEntry synchronisé entre panels -@dataclass -class TrackTimeOffset: - """Décalage temporel appliqué à une piste d'entrée (en millisecondes).""" - track_type: str # "video" | "audio" | "subtitle" - source_path: Path - stream_index: int - offset_ms: int = 0 - - -@dataclass -class TrackMetaEdit: - """ - Édition de métadonnées d'une piste de sortie, appliquée via post-process FFmpeg. - - track_order : numéro de piste 1-based dans le fichier de sortie (sélecteur @N). - language : balise IETF BCP-47 à écrire via `language=`, ou "" pour ne pas toucher. - title : nom de la piste à écrire, ou None pour ne pas toucher - (chaîne vide "" = effacer le titre existant). - flag_* : flags de disposition Matroska. None = ne pas toucher. - """ - track_order: int - language: str = "" - title: str | None = None - flag_default: bool | None = None - flag_forced: bool | None = None - flag_hearing_impaired: bool | None = None - flag_visual_impaired: bool | None = None - flag_original: bool | None = None - flag_commentary: bool | None = None +@dataclass(frozen=True) +class VideoTrackEncodePlan: + """Résumé UI d'un plan d'encodage vidéo pour une piste remux.""" + track_entry_id: str + codec_summary: str + target_codec: str = "copy" + hdr_badges: tuple[str, ...] = () + is_modified: bool = False @dataclass @@ -282,44 +233,67 @@ class EncodeConfig: """Configuration complète d'un encodage.""" source: Path output: Path - video: VideoEncodeSettings - audio_tracks: list[AudioTrackSettings] + video: VideoEncodeSettings | None = None + video_tracks: list[VideoEncodeSettings] = field(default_factory=list) + audio_tracks: list[AudioTrackSettings] = field(default_factory=list) copy_subtitles: bool = True # Pistes de sous-titres multi-sources : (chemin_source, stream_index_ffprobe) # Si non vide, remplace le copy_subtitles générique. - subtitle_tracks: list = field(default_factory=list) # list[tuple[Path, int]] + subtitle_tracks: list[SubtitleTrackRef] = field(default_factory=list) keep_chapters: bool = True #: Chapitres personnalisés à appliquer en post-traitement FFmpeg. #: None → comportement keep_chapters (copie depuis la source ou rien). #: list → écrase les chapitres existants avec ces entrées. - chapter_overrides: list | None = None # list[ChapterEntry] | None + chapter_overrides: list[ChapterEntryLike] | None = None # Flux d'attachements à copier : (chemin_source, stream_index_ffprobe) # Sélection individuelle — remplace l'ancien attachment_sources global. - attachment_streams: list = field(default_factory=list) # list[tuple[Path, int]] + attachment_streams: list[AttachmentStreamRef] = field(default_factory=list) # Fichiers externes à attacher (ajout manuel, via -attach ffmpeg). - extra_attachments: list = field(default_factory=list) # list[Path] + extra_attachments: list[Path] = field(default_factory=list) # Sources dont on copie les tags globaux via post-traitement FFmpeg. - tag_sources: list = field(default_factory=list) # list[Path] + tag_sources: list[Path] = field(default_factory=list) #: Balises MKV globales à écrire directement (prioritaire sur tag_sources). #: None → utiliser tag_sources si présents. #: dict → écrire ces balises et ignorer tag_sources. #: {} → supprimer toutes les balises existantes. tag_overrides: dict | None = None # dict[str, str] | None # Éditions de métadonnées de pistes (langue, titre) appliquées via FFmpeg. - track_meta_edits: list = field(default_factory=list) # list[TrackMetaEdit] + track_meta_edits: list[TrackMetaEdit] = field(default_factory=list) # Décalages temporels par piste (ms), appliqués directement au runtime encode. - track_time_offsets: list = field(default_factory=list) # list[TrackTimeOffset] + track_time_offsets: list[TrackTimeOffset] = field(default_factory=list) file_title: str = "" # balise Title du segment de sortie duration_s: float | None = None # requis pour le mode taille cible # Passthrough métadonnées dynamiques (HEVC uniquement) - copy_dv: bool = False # injecter RPU Dolby Vision via dovi_tool - copy_hdr10plus: bool = False # injecter HDR10+ SEI via hdr10plus_tool - dovi_profile: str = "0" # flag -m dovi_tool : "0"=conserver, "2"=normaliser P8.1 + copy_dv: bool = False # compat legacy : miroir de la vidéo primaire + copy_hdr10plus: bool = False # compat legacy : miroir de la vidéo primaire + dovi_profile: str = "0" # compat legacy : miroir de la vidéo primaire work_dir: Path | None = None # dossier de travail (passlog, fichiers temp) #: Cover TMDB à télécharger juste avant l'encodage : (url, filename). #: None → pas de cover TMDB en attente. tmdb_cover: tuple[str, str] | None = None + def __post_init__(self) -> None: + if not self.video_tracks and self.video is not None: + primary = self.video + if not primary.copy_dv and self.copy_dv: + primary.copy_dv = self.copy_dv + if not primary.copy_hdr10plus and self.copy_hdr10plus: + primary.copy_hdr10plus = self.copy_hdr10plus + if primary.dovi_profile == "0" and self.dovi_profile != "0": + primary.dovi_profile = self.dovi_profile + self.video_tracks = [primary] + elif self.video_tracks and self.video is None: + self.video = self.video_tracks[0] + + if self.video is None: + raise ValueError("EncodeConfig nécessite au moins une piste vidéo.") + + # La première piste reste l'accesseur de compatibilité pour l'ancien code. + self.video = self.video_tracks[0] + self.copy_dv = bool(self.video.copy_dv) + self.copy_hdr10plus = bool(self.video.copy_hdr10plus) + self.dovi_profile = str(self.video.dovi_profile or "0") + @dataclass class EncodePreset: diff --git a/core/workflows/encode/planning/__init__.py b/core/workflows/encode/planning/__init__.py new file mode 100644 index 0000000..d07deb1 --- /dev/null +++ b/core/workflows/encode/planning/__init__.py @@ -0,0 +1,90 @@ +"""Planning helpers for the encode workflow.""" + +from .command_plan import build_encode_command_selection +from .metadata_plan import ( + PreparedContainerMetadataInputs, + append_container_metadata_args, + build_container_metadata_plan, + container_chapter_map_value, + container_metadata_map_value, + materialize_container_metadata_inputs, + prepare_container_metadata_inputs, +) +from .encode_plan import build_encode_plan +from .offsets import ( + build_offset_specs, + offset_seconds, + track_offset_ms, + track_time_offset_lookup, + video_map_arg, +) +from .preview import format_preview_command, format_preview_commands +from .preview import format_preview_selection +from .plan_models import ( + ContainerMetadataPlan, + EncodeCommandSelection, + EncodePlan, + MaterializedContainerMetadataPlan, + PlannedVideoTrack, + ResolvedTrackAssembly, + ResolvedSubtitleTracks, + SyncAnalysisPlan, + SyncMappedTrackPlan, + SourceLayout, + TrackMapping, +) +from .sources import resolve_source_layout, source_input_index_map +from .subtitles import probe_stream_indices, resolve_subtitle_tracks_for_encode +from .sync_plan import ( + build_sync_analysis_plan, + build_probe_remux_config, + build_sync_mapped_tracks, + needs_strict_interleave_for_encode, + requires_file_sync_fallback_for_offsets, +) +from .track_assembly import build_track_input_paths, resolve_track_assembly +from .validation import is_dir_writable, validate_encode_config + +__all__ = [ + "EncodePlan", + "EncodeCommandSelection", + "ContainerMetadataPlan", + "PreparedContainerMetadataInputs", + "MaterializedContainerMetadataPlan", + "PlannedVideoTrack", + "ResolvedTrackAssembly", + "ResolvedSubtitleTracks", + "SourceLayout", + "SyncAnalysisPlan", + "SyncMappedTrackPlan", + "TrackMapping", + "append_container_metadata_args", + "build_encode_command_selection", + "build_container_metadata_plan", + "build_encode_plan", + "build_sync_analysis_plan", + "build_probe_remux_config", + "build_sync_mapped_tracks", + "build_track_input_paths", + "build_offset_specs", + "container_chapter_map_value", + "container_metadata_map_value", + "format_preview_command", + "format_preview_commands", + "format_preview_selection", + "is_dir_writable", + "materialize_container_metadata_inputs", + "needs_strict_interleave_for_encode", + "offset_seconds", + "prepare_container_metadata_inputs", + "probe_stream_indices", + "resolve_source_layout", + "resolve_subtitle_tracks_for_encode", + "resolve_track_assembly", + "requires_file_sync_fallback_for_offsets", + "source_input_index_map", + "track_offset_ms", + "track_time_offset_lookup", + "validate_encode_config", + "video_map_arg", +] diff --git a/core/workflows/encode/planning/command_plan.py b/core/workflows/encode/planning/command_plan.py new file mode 100644 index 0000000..3234475 --- /dev/null +++ b/core/workflows/encode/planning/command_plan.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from core.workflows.encode.models import EncodeConfig + +from .plan_models import EncodeCommandSelection, EncodePlan + + +def build_encode_command_selection( + config: EncodeConfig, + *, + plan: EncodePlan, + is_multi_video, + uses_two_pass, + build_multi_video_preview, + build_two_pass, + build_single_pass, + chapter_materialize_dir=None, +) -> EncodeCommandSelection: + if is_multi_video(config): + commands = tuple( + tuple(cmd) + for cmd in build_multi_video_preview(config, plan=plan) + if cmd + ) + preview_index = max(0, len(commands) - 1) if commands else 0 + return EncodeCommandSelection( + commands=commands, + preview_index=preview_index, + is_multi_video=True, + is_two_pass=False, + ) + + if uses_two_pass(config): + commands = tuple( + tuple(cmd) + for cmd in build_two_pass( + config, + chapter_materialize_dir=chapter_materialize_dir, + plan=plan, + ) + if cmd + ) + return EncodeCommandSelection( + commands=commands, + preview_index=1 if len(commands) > 1 else 0, + is_multi_video=False, + is_two_pass=True, + ) + + cmd = build_single_pass( + config, + chapter_materialize_dir=chapter_materialize_dir, + plan=plan, + ) + return EncodeCommandSelection( + commands=((tuple(cmd),) if cmd else tuple()), + preview_index=0, + is_multi_video=False, + is_two_pass=False, + ) diff --git a/core/workflows/encode/planning/encode_plan.py b/core/workflows/encode/planning/encode_plan.py new file mode 100644 index 0000000..5d31dbb --- /dev/null +++ b/core/workflows/encode/planning/encode_plan.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from core.workflows.encode.models import EncodeConfig, VideoEncodeSettings + +from .metadata_plan import build_container_metadata_plan +from .offsets import track_time_offset_lookup +from .plan_models import EncodePlan, PlannedVideoTrack +from .sources import resolve_source_layout +from .subtitles import resolve_subtitle_tracks_for_encode +from .sync_plan import build_sync_analysis_plan + + +def build_planned_video_tracks( + config: EncodeConfig, + *, + video_tracks, + video_source_from_settings, +) -> tuple[PlannedVideoTrack, ...]: + planned: list[PlannedVideoTrack] = [] + for video in video_tracks(config): + settings = video if isinstance(video, VideoEncodeSettings) else VideoEncodeSettings() + planned.append( + PlannedVideoTrack( + source=Path(video_source_from_settings(config, settings)), + stream_index=int(getattr(settings, "stream_index", 0) or 0), + codec=str(settings.codec), + quality_mode=settings.quality_mode, + inject_hdr_meta=bool(settings.inject_hdr_meta), + tonemap_to_sdr=bool(settings.tonemap_to_sdr), + copy_dv=bool(settings.copy_dv), + copy_hdr10plus=bool(settings.copy_hdr10plus), + master_display=str(settings.master_display or ""), + max_cll=str(settings.max_cll or ""), + ) + ) + return tuple(planned) + + +def build_encode_plan( + config: EncodeConfig, + *, + probe_subtitle_indices: Callable[[Path, str], list[int] | None] | None = None, + resolve_subtitle_tracks=None, + resolve_global_tags, + video_tracks, + video_source_from_settings, + video_source_path, + video_stream_index, + video_map_key, +) -> EncodePlan: + source_layout = resolve_source_layout(config) + all_sources = list(source_layout.sources) + source_idx = dict(source_layout.source_idx) + offset_lookup = track_time_offset_lookup(config) + if resolve_subtitle_tracks is not None: + subtitle_tracks, subtitles_resolved = resolve_subtitle_tracks(config, all_sources) + resolved_subtitles = tuple((Path(source), int(stream_index)) for source, stream_index in subtitle_tracks) + else: + if probe_subtitle_indices is None: + raise ValueError("probe_subtitle_indices callback is required") + resolved = resolve_subtitle_tracks_for_encode( + config, + all_sources, + probe_indices=probe_subtitle_indices, + ) + resolved_subtitles = tuple(resolved.tracks) + subtitles_resolved = bool(resolved.complete) + source = Path(video_source_path(config)) + stream_index = int(video_stream_index(config)) + input_idx = source_idx.get(source, source_idx.get(config.source, 0)) + container_metadata = build_container_metadata_plan( + config, + resolve_global_tags=resolve_global_tags, + ) + planned_video_tracks = build_planned_video_tracks( + config, + video_tracks=video_tracks, + video_source_from_settings=video_source_from_settings, + ) + sync_analysis = build_sync_analysis_plan( + config, + all_sources, + source_idx, + list(resolved_subtitles), + subtitles_resolved=bool(subtitles_resolved), + offset_lookup=offset_lookup, + ) + + return EncodePlan( + all_sources=tuple(all_sources), + source_idx=EncodePlan.freeze_mapping(source_idx), + offset_lookup=EncodePlan.freeze_mapping(offset_lookup), + resolved_subtitle_tracks=tuple(resolved_subtitles), + subtitles_resolved=bool(subtitles_resolved), + video_source=source, + video_stream=stream_index, + video_key=video_map_key(config), + video_input_idx=int(input_idx), + video_default_map=(int(input_idx), int(stream_index)), + video_tracks=planned_video_tracks, + sync_analysis=sync_analysis, + container_metadata=container_metadata, + ) diff --git a/core/workflows/encode/planning/metadata_plan.py b/core/workflows/encode/planning/metadata_plan.py new file mode 100644 index 0000000..8ac8d79 --- /dev/null +++ b/core/workflows/encode/planning/metadata_plan.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from core.workflows.encode.models import ChapterEntryLike, EncodeConfig + +from .plan_models import ( + ContainerMetadataPlan, + MaterializedContainerMetadataPlan, + PreparedContainerMetadataInputs, +) + + +def build_container_metadata_plan( + config: EncodeConfig, + *, + resolve_global_tags: Callable[[EncodeConfig], dict[str, str]], +) -> ContainerMetadataPlan: + tag_source: Path | None = None + if config.tag_overrides is None and config.tag_sources: + tag_source = Path(config.tag_sources[-1]) + return ContainerMetadataPlan( + tag_source=tag_source, + tag_overrides_defined=config.tag_overrides is not None, + chapter_overrides_defined=config.chapter_overrides is not None, + chapter_overrides_nonempty=bool(config.chapter_overrides), + has_track_meta_edits=bool(config.track_meta_edits), + global_tags=resolve_global_tags(config), + ) + + +def prepare_container_metadata_inputs( + cmd: list[str], + config: EncodeConfig, + *, + source_idx: dict[Path, int], + next_input_index: int, + container_metadata_plan: ContainerMetadataPlan | None = None, + chapter_materialize_dir: Path | None = None, + chapter_probe_source: Path | None = None, + probe_duration_seconds: Callable[[Path], float | None], + write_ffmetadata_chapters: Callable[[list, Path, float | None], Path], +) -> PreparedContainerMetadataInputs: + materialized = materialize_container_metadata_inputs( + config, + source_idx=source_idx, + next_input_index=next_input_index, + container_metadata_plan=container_metadata_plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=chapter_probe_source, + probe_duration_seconds=probe_duration_seconds, + write_ffmetadata_chapters=write_ffmetadata_chapters, + ) + cmd.extend(materialized.input_args) + return PreparedContainerMetadataInputs( + next_input_index=materialized.next_input_index, + chapter_input_index=materialized.chapter_input_index, + tag_input_index=materialized.tag_input_index, + ) + + +def materialize_container_metadata_inputs( + config: EncodeConfig, + *, + source_idx: dict[Path, int], + next_input_index: int, + container_metadata_plan: ContainerMetadataPlan | None = None, + chapter_materialize_dir: Path | None = None, + chapter_probe_source: Path | None = None, + probe_duration_seconds: Callable[[Path], float | None], + write_ffmetadata_chapters: Callable[[list[ChapterEntryLike], Path, float | None], Path], +) -> MaterializedContainerMetadataPlan: + metadata_plan = container_metadata_plan or build_container_metadata_plan( + config, + resolve_global_tags=lambda current: {}, + ) + input_args: list[str] = [] + chapter_input_index: int | None = None + if metadata_plan.chapter_overrides_nonempty: + if chapter_materialize_dir is not None: + duration_s = probe_duration_seconds(chapter_probe_source or config.source) + assert config.chapter_overrides is not None + chapter_file = write_ffmetadata_chapters( + config.chapter_overrides, + chapter_materialize_dir, + duration_s, + ) + chapter_ref = str(chapter_file) + else: + chapter_ref = "" + chapter_input_index = next_input_index + next_input_index += 1 + input_args.extend(["-i", chapter_ref]) + + tag_input_index: int | None = None + if metadata_plan.tag_source is not None: + mapped_idx = source_idx.get(metadata_plan.tag_source) + if mapped_idx is not None: + tag_input_index = mapped_idx + else: + tag_input_index = next_input_index + next_input_index += 1 + input_args.extend(["-i", str(metadata_plan.tag_source)]) + return MaterializedContainerMetadataPlan( + next_input_index=next_input_index, + chapter_input_index=chapter_input_index, + tag_input_index=tag_input_index, + input_args=tuple(input_args), + ) + + +def container_chapter_map_value( + config: EncodeConfig, + *, + default_chapter_input_index: int, + chapter_input_index: int | None, + container_metadata_plan: ContainerMetadataPlan | None = None, +) -> str: + metadata_plan = container_metadata_plan + chapter_overrides_defined = ( + metadata_plan.chapter_overrides_defined + if metadata_plan is not None + else config.chapter_overrides is not None + ) + chapter_overrides_nonempty = ( + metadata_plan.chapter_overrides_nonempty + if metadata_plan is not None + else bool(config.chapter_overrides) + ) + if chapter_overrides_defined: + if chapter_overrides_nonempty and chapter_input_index is not None: + return str(chapter_input_index) + return "-1" + return str(default_chapter_input_index) if config.keep_chapters else "-1" + + +def container_metadata_map_value( + config: EncodeConfig, + *, + default_metadata_input_index: int, + chapter_input_index: int | None, + tag_input_index: int | None, + include_copy_video_stream_passthrough: bool, + is_video_passthrough: Callable[[EncodeConfig], bool], + chapter_map: str | None = None, + container_metadata_plan: ContainerMetadataPlan | None = None, +) -> str | None: + metadata_plan = container_metadata_plan + tag_overrides_defined = ( + metadata_plan.tag_overrides_defined + if metadata_plan is not None + else config.tag_overrides is not None + ) + chapter_overrides_defined = ( + metadata_plan.chapter_overrides_defined + if metadata_plan is not None + else config.chapter_overrides is not None + ) + has_track_meta_edits = ( + metadata_plan.has_track_meta_edits + if metadata_plan is not None + else bool(config.track_meta_edits) + ) + if tag_overrides_defined: + if chapter_input_index is not None: + return str(chapter_input_index) + if chapter_map is not None and chapter_map not in {"-1", ""}: + return chapter_map + return "-1" + if tag_input_index is not None: + return str(tag_input_index) + if include_copy_video_stream_passthrough and is_video_passthrough(config): + return str(default_metadata_input_index) + if chapter_overrides_defined or has_track_meta_edits: + return str(default_metadata_input_index) + return None + + +def append_container_metadata_args( + cmd: list[str], + config: EncodeConfig, + *, + default_metadata_input_index: int, + default_chapter_input_index: int, + chapter_input_index: int | None, + tag_input_index: int | None, + include_copy_video_stream_passthrough: bool, + is_video_passthrough: Callable[[EncodeConfig], bool], + resolve_global_tags: Callable[[EncodeConfig], dict[str, str]], + build_track_meta_args: Callable[[EncodeConfig], list[str]], + container_metadata_plan: ContainerMetadataPlan | None = None, +) -> None: + metadata_plan = container_metadata_plan or build_container_metadata_plan( + config, + resolve_global_tags=resolve_global_tags, + ) + chapter_map = container_chapter_map_value( + config, + default_chapter_input_index=default_chapter_input_index, + chapter_input_index=chapter_input_index, + container_metadata_plan=metadata_plan, + ) + metadata_map = container_metadata_map_value( + config, + default_metadata_input_index=default_metadata_input_index, + chapter_input_index=chapter_input_index, + tag_input_index=tag_input_index, + include_copy_video_stream_passthrough=include_copy_video_stream_passthrough, + is_video_passthrough=is_video_passthrough, + chapter_map=chapter_map, + container_metadata_plan=metadata_plan, + ) + if metadata_map is not None: + cmd.extend(["-map_metadata", metadata_map]) + if include_copy_video_stream_passthrough and is_video_passthrough(config): + cmd.extend([ + "-map_metadata:s:v:0", + f"{default_metadata_input_index}:s:v:0", + ]) + + cmd.extend(["-map_chapters", chapter_map]) + + global_tags = dict(metadata_plan.global_tags) + title_value = global_tags.pop("title", None) + if title_value is not None: + cmd.extend(["-metadata", f"title={title_value}"]) + + cmd.extend(["-metadata", "encoder=", "-metadata", "creation_time="]) + for key, value in global_tags.items(): + cmd.extend(["-metadata", f"{key}={value}"]) + cmd.extend(build_track_meta_args(config)) diff --git a/core/workflows/encode/planning/offsets.py b/core/workflows/encode/planning/offsets.py new file mode 100644 index 0000000..d699f5b --- /dev/null +++ b/core/workflows/encode/planning/offsets.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from pathlib import Path + +from core.workflows.encode.models import EncodeConfig, EncodeError +from core.workflows.common.track_types import TrackTimeOffset +from core.workflows.encode.runtime_helpers import EncodeOffsetInputSpec + +from .plan_models import MapKey, TrackMapping + + +def offset_seconds(offset_ms: int) -> str: + return f"{abs(int(offset_ms)) / 1000.0:.3f}" + + +def track_time_offset_lookup(config: EncodeConfig) -> dict[tuple[str, Path, int], int]: + lookup: dict[tuple[str, Path, int], int] = {} + for raw in config.track_time_offsets: + if not isinstance(raw, TrackTimeOffset): + continue + track_type = str(raw.track_type or "").strip().lower() + if track_type not in {"video", "audio", "subtitle"}: + continue + lookup[(track_type, Path(raw.source_path), int(raw.stream_index))] = int(raw.offset_ms) + return lookup + + +def track_offset_ms( + lookup: dict[tuple[str, Path, int], int], + *, + track_type: str, + source_path: Path, + stream_index: int, + allow_single_video_source_fallback: bool = True, +) -> int: + key = (str(track_type).strip().lower(), Path(source_path), int(stream_index)) + if key in lookup: + return int(lookup[key]) + if allow_single_video_source_fallback and key[0] == "video": + matches = [ + int(value) + for (candidate_type, candidate_source, _candidate_stream), value in lookup.items() + if candidate_type == "video" and candidate_source == key[1] + ] + if len(matches) == 1: + return matches[0] + return 0 + + +def build_offset_specs( + config: EncodeConfig, + *, + track_mappings: list[TrackMapping], + offset_lookup: dict[tuple[str, Path, int], int] | None = None, +) -> list[EncodeOffsetInputSpec]: + lookup = offset_lookup if offset_lookup is not None else track_time_offset_lookup(config) + specs: list[EncodeOffsetInputSpec] = [] + for map_key, input_path, input_stream_index in track_mappings: + source_path, source_stream_index, track_type = map_key + offset_ms = track_offset_ms( + lookup, + track_type=track_type, + source_path=source_path, + stream_index=source_stream_index, + ) + if offset_ms == 0: + continue + if track_type == "video" and offset_ms < 0: + raise EncodeError( + "Décalage vidéo négatif interdit : " + f"source={source_path}, stream={source_stream_index}, offset={offset_ms} ms" + ) + specs.append( + EncodeOffsetInputSpec( + map_key=(Path(source_path), int(source_stream_index), str(track_type)), + input_path=input_path, + input_stream_index=int(input_stream_index), + offset_ms=int(offset_ms), + ) + ) + return specs + + +def video_map_arg( + default_map: tuple[int, int], + *, + offset_remap: dict[MapKey, tuple[int, int]], + map_key: MapKey, +) -> str: + remapped = offset_remap.get(map_key) + if remapped is None: + stream_index = int(default_map[1]) + if stream_index == 0: + return f"{int(default_map[0])}:v:0" + return f"{int(default_map[0])}:{stream_index}" + return f"{int(remapped[0])}:{int(remapped[1])}" diff --git a/core/workflows/encode/planning/plan_models.py b/core/workflows/encode/planning/plan_models.py new file mode 100644 index 0000000..a9cf6d0 --- /dev/null +++ b/core/workflows/encode/planning/plan_models.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from types import MappingProxyType +from typing import Mapping, TypeVar + +from core.workflows.encode.models import QualityMode +from core.workflows.remux_models import RemuxConfig + +_TKey = TypeVar("_TKey") +_TValue = TypeVar("_TValue") + + +MapKey = tuple[Path, int, str] +TrackMapping = tuple[MapKey, Path | str, int] + + +@dataclass(frozen=True) +class SourceLayout: + sources: tuple[Path, ...] + source_idx: dict[Path, int] + + +@dataclass(frozen=True) +class ResolvedSubtitleTracks: + tracks: tuple[tuple[Path, int], ...] + complete: bool + + +@dataclass(frozen=True) +class PreparedContainerMetadataInputs: + next_input_index: int + chapter_input_index: int | None + tag_input_index: int | None + + +@dataclass(frozen=True) +class MaterializedContainerMetadataPlan: + next_input_index: int + chapter_input_index: int | None + tag_input_index: int | None + input_args: tuple[str, ...] + + +@dataclass(frozen=True) +class ContainerMetadataPlan: + tag_source: Path | None + tag_overrides_defined: bool + chapter_overrides_defined: bool + chapter_overrides_nonempty: bool + has_track_meta_edits: bool + global_tags: Mapping[str, str] + + +@dataclass(frozen=True) +class PlannedVideoTrack: + source: Path + stream_index: int + codec: str + quality_mode: QualityMode + inject_hdr_meta: bool + tonemap_to_sdr: bool + copy_dv: bool + copy_hdr10plus: bool + master_display: str + max_cll: str + + +@dataclass(frozen=True) +class SyncMappedTrackPlan: + source_file_index: int + stream_index: int + track_type: str + + @property + def track(self) -> "SyncMappedTrackPlan": + return self + + +@dataclass(frozen=True) +class SyncAnalysisPlan: + enabled: bool + mapped_tracks: tuple[SyncMappedTrackPlan, ...] + offset_requires_file_fallback: bool + needs_subtitle_prescan: bool + strict_interleave_without_prescan: bool + allow_live_sync: bool + probe_remux_config: RemuxConfig | None + + +@dataclass(frozen=True) +class ResolvedTrackAssembly: + track_mappings: tuple[TrackMapping, ...] + video_map: tuple[int, int] + + +@dataclass(frozen=True) +class EncodeCommandSelection: + commands: tuple[tuple[str, ...], ...] + preview_index: int + is_multi_video: bool + is_two_pass: bool + + @property + def preview_command(self) -> tuple[str, ...]: + if not self.commands: + return () + index = min(max(0, int(self.preview_index)), len(self.commands) - 1) + return self.commands[index] + + +@dataclass(frozen=True) +class EncodePlan: + all_sources: tuple[Path, ...] + source_idx: Mapping[Path, int] + offset_lookup: Mapping[tuple[str, Path, int], int] + resolved_subtitle_tracks: tuple[tuple[Path, int], ...] + subtitles_resolved: bool + video_source: Path + video_stream: int + video_key: MapKey + video_input_idx: int + video_default_map: tuple[int, int] + video_tracks: tuple[PlannedVideoTrack, ...] + sync_analysis: SyncAnalysisPlan + container_metadata: ContainerMetadataPlan + + @staticmethod + def freeze_mapping(mapping: dict[_TKey, _TValue]) -> Mapping[_TKey, _TValue]: + return MappingProxyType(dict(mapping)) diff --git a/core/workflows/encode/planning/preview.py b/core/workflows/encode/planning/preview.py new file mode 100644 index 0000000..26e3ab7 --- /dev/null +++ b/core/workflows/encode/planning/preview.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from .plan_models import EncodeCommandSelection + + +def format_preview_command(cmd: list[str], *, prefix: str = "") -> str: + if not cmd: + return "" + lines = [cmd[0]] + index = 1 + while index < len(cmd): + token = cmd[index] + if token.startswith("-") and index + 1 < len(cmd) and not cmd[index + 1].startswith("-"): + lines.append(f" {token} {cmd[index + 1]}") + index += 2 + else: + lines.append(f" {token}") + index += 1 + return prefix + " \\\n".join(lines) + + +def format_preview_commands(commands: list[list[str]]) -> str: + blocks: list[str] = [] + for index, cmd in enumerate(commands, start=1): + if not cmd: + continue + blocks.append(f"# Commande {index}\n" + format_preview_command(cmd)) + return "\n\n".join(blocks) + + +def format_preview_selection(selection: EncodeCommandSelection) -> str: + if selection.is_multi_video: + return format_preview_commands([list(cmd) for cmd in selection.commands]) + + cmd = list(selection.preview_command) + if not cmd: + return "" + prefix = ( + "# Mode taille cible : passe 1 omise de cet aperçu\n" + if selection.is_two_pass + else "" + ) + return format_preview_command(cmd, prefix=prefix) diff --git a/core/workflows/encode/planning/sources.py b/core/workflows/encode/planning/sources.py new file mode 100644 index 0000000..b85f224 --- /dev/null +++ b/core/workflows/encode/planning/sources.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from pathlib import Path + +from core.workflows.encode.models import EncodeConfig + +from .plan_models import SourceLayout + + +def source_input_index_map(sources: list[Path], *, start_index: int = 0) -> dict[Path, int]: + return {src: start_index + i for i, src in enumerate(sources)} + + +def resolve_source_layout(config: EncodeConfig) -> SourceLayout: + sources: list[Path] = [config.source] + + video_tracks = list(config.video_tracks) + if not video_tracks and config.video is not None: + video_tracks = [config.video] + + for video in video_tracks: + video_source = Path(video.source_path or config.source) + if video_source not in sources: + sources.append(video_source) + for audio in config.audio_tracks: + source_path = Path(audio.source_path or config.source) + if source_path not in sources: + sources.append(source_path) + for source_path, _stream_index in config.subtitle_tracks: + source_path = Path(source_path) + if source_path not in sources: + sources.append(source_path) + for source_path, _stream_index in config.attachment_streams: + source_path = Path(source_path) + if source_path not in sources: + sources.append(source_path) + + return SourceLayout( + sources=tuple(sources), + source_idx=source_input_index_map(sources), + ) diff --git a/core/workflows/encode/planning/subtitles.py b/core/workflows/encode/planning/subtitles.py new file mode 100644 index 0000000..829491f --- /dev/null +++ b/core/workflows/encode/planning/subtitles.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path + +from core.workflows.encode.models import EncodeConfig + +from .plan_models import ResolvedSubtitleTracks + + +def probe_stream_indices( + source: Path, + codec_type: str, + *, + ffprobe_streams_payload: Callable[[Path], dict[str, object] | None], + ffprobe_stream_dicts: Callable[[dict[str, object]], list[dict[str, object]]], +) -> list[int] | None: + payload = ffprobe_streams_payload(source) + if payload is None: + return None + indices: list[int] = [] + for stream in ffprobe_stream_dicts(payload): + if stream.get("codec_type") != codec_type: + continue + raw_index = stream.get("index") + if not isinstance(raw_index, (int, str)): + continue + try: + indices.append(int(raw_index)) + except (TypeError, ValueError): + continue + return sorted(set(indices)) + + +def resolve_subtitle_tracks_for_encode( + config: EncodeConfig, + all_sources: list[Path], + *, + probe_indices: Callable[[Path, str], list[int] | None], +) -> ResolvedSubtitleTracks: + if config.subtitle_tracks: + deduped: list[tuple[Path, int]] = [] + seen: set[tuple[Path, int]] = set() + for source_path, stream_index in config.subtitle_tracks: + key = (Path(source_path), int(stream_index)) + if key in seen: + continue + seen.add(key) + deduped.append(key) + return ResolvedSubtitleTracks(tuple(deduped), True) + + if not config.copy_subtitles: + return ResolvedSubtitleTracks((), True) + + resolved: list[tuple[Path, int]] = [] + seen: set[tuple[Path, int]] = set() + for source_path in all_sources: + subtitle_indices = probe_indices(source_path, "subtitle") + if subtitle_indices is None: + return ResolvedSubtitleTracks((), False) + for stream_index in subtitle_indices: + key = (Path(source_path), int(stream_index)) + if key in seen: + continue + seen.add(key) + resolved.append(key) + return ResolvedSubtitleTracks(tuple(resolved), True) diff --git a/core/workflows/encode/planning/sync_plan.py b/core/workflows/encode/planning/sync_plan.py new file mode 100644 index 0000000..7eac49e --- /dev/null +++ b/core/workflows/encode/planning/sync_plan.py @@ -0,0 +1,239 @@ +from __future__ import annotations + +from collections.abc import Sequence +from pathlib import Path +from typing import Protocol + +from core.workflows.common.timeline_sync import needs_strict_interleave +from core.workflows.encode.models import EncodeConfig +from core.workflows.remux_models import RemuxConfig, SourceInput, TrackEntry + +from .offsets import track_offset_ms +from .plan_models import SyncAnalysisPlan, SyncMappedTrackPlan + + +class _SyncMappedTrackLike(Protocol): + @property + def source_file_index(self) -> int: + ... + + @property + def stream_index(self) -> int: + ... + + +def _track_type_of(mapped_track) -> str: + direct = getattr(mapped_track, "track_type", None) + if direct is not None: + return str(direct) + return str(getattr(getattr(mapped_track, "track", None), "track_type", "")) + + +def build_probe_remux_config( + config: EncodeConfig, + all_sources: list[Path], + source_idx_local: dict[Path, int], + resolved_subtitle_tracks: list[tuple[Path, int]], +) -> RemuxConfig: + tracks_by_source: dict[int, list[TrackEntry]] = {i: [] for i in range(len(all_sources))} + + def _push_track(src_idx: int, stream_idx: int, track_type: str) -> None: + bucket = tracks_by_source.setdefault(src_idx, []) + if any(track.mkv_tid == stream_idx and track.track_type == track_type for track in bucket): + return + bucket.append( + TrackEntry( + mkv_tid=stream_idx, + track_type=track_type, + codec="COPY", + display_info="", + language="", + title="", + ) + ) + + _push_track(0, 0, "video") + for audio in config.audio_tracks: + source = Path(audio.source_path or config.source) + src_idx = source_idx_local.get(source, 0) + _push_track(src_idx, int(audio.stream_index), "audio") + for source, stream_idx in resolved_subtitle_tracks: + src_idx = source_idx_local.get(source) + if src_idx is None: + continue + _push_track(src_idx, int(stream_idx), "subtitle") + + sources: list[SourceInput] = [] + track_order: list[tuple[int, int] | tuple[int, int, str]] = [] + for index, source in enumerate(all_sources): + source_tracks = tracks_by_source.get(index, []) + sources.append(SourceInput(path=source, file_index=index, tracks=source_tracks)) + for track in source_tracks: + track_order.append((index, int(track.mkv_tid))) + + return RemuxConfig( + sources=sources, + output=config.output, + track_order=track_order, + keep_chapters=config.keep_chapters, + ) + + +def build_sync_mapped_tracks( + config: EncodeConfig, + source_idx_local: dict[Path, int], + resolved_subtitle_tracks: list[tuple[Path, int]], +) -> list[SyncMappedTrackPlan]: + mapped: list[SyncMappedTrackPlan] = [ + SyncMappedTrackPlan( + source_file_index=0, + stream_index=0, + track_type="video", + ) + ] + for audio in config.audio_tracks: + source = Path(audio.source_path or config.source) + src_idx = source_idx_local.get(source, 0) + mapped.append( + SyncMappedTrackPlan( + source_file_index=src_idx, + stream_index=int(audio.stream_index), + track_type="audio", + ) + ) + for source, stream_idx in resolved_subtitle_tracks: + src_idx = source_idx_local.get(source) + if src_idx is None: + continue + mapped.append( + SyncMappedTrackPlan( + source_file_index=src_idx, + stream_index=int(stream_idx), + track_type="subtitle", + ) + ) + return mapped + + +def needs_strict_interleave_for_encode( + mapped_tracks: Sequence[_SyncMappedTrackLike], +) -> bool: + if len({int(track.source_file_index) for track in mapped_tracks}) < 2: + return False + if not any(_track_type_of(track) == "subtitle" for track in mapped_tracks): + return False + primary_video = next((track for track in mapped_tracks if _track_type_of(track) == "video"), None) + if primary_video is None: + return False + primary_source = int(primary_video.source_file_index) + return any( + _track_type_of(track) == "audio" + and int(track.source_file_index) != primary_source + for track in mapped_tracks + ) + + +def requires_file_sync_fallback_for_offsets( + config: EncodeConfig, + mapped_tracks: Sequence[_SyncMappedTrackLike], + source_by_index: dict[int, Path], + *, + track_offset_ms=None, + offset_lookup: dict[tuple[str, Path, int], int], +) -> bool: + primary_video = next((track for track in mapped_tracks if _track_type_of(track) == "video"), None) + if primary_video is None: + return False + + for mapped_track in mapped_tracks: + track_type = _track_type_of(mapped_track) + if track_type not in {"audio", "subtitle"}: + continue + if mapped_track.source_file_index == primary_video.source_file_index: + continue + source_path = source_by_index.get(mapped_track.source_file_index) + if source_path is None: + continue + resolver = track_offset_ms or globals()["track_offset_ms"] + offset_ms = resolver( + offset_lookup, + track_type=track_type, + source_path=source_path, + stream_index=mapped_track.stream_index, + ) + if offset_ms != 0: + return True + return False + + +def build_sync_analysis_plan( + config: EncodeConfig, + all_sources: list[Path], + source_idx_local: dict[Path, int], + resolved_subtitle_tracks: list[tuple[Path, int]], + *, + subtitles_resolved: bool, + offset_lookup: dict[tuple[str, Path, int], int], +) -> SyncAnalysisPlan: + if len(source_idx_local) < 2: + return SyncAnalysisPlan( + enabled=False, + mapped_tracks=(), + offset_requires_file_fallback=False, + needs_subtitle_prescan=False, + strict_interleave_without_prescan=False, + allow_live_sync=True, + probe_remux_config=None, + ) + + mapped_tracks = tuple( + build_sync_mapped_tracks( + config, + source_idx_local, + resolved_subtitle_tracks, + ) + ) + path_by_source_idx = {idx: path for path, idx in source_idx_local.items()} + offset_requires_sync = requires_file_sync_fallback_for_offsets( + config, + mapped_tracks, + path_by_source_idx, + offset_lookup=offset_lookup, + ) + if offset_requires_sync: + return SyncAnalysisPlan( + enabled=True, + mapped_tracks=mapped_tracks, + offset_requires_file_fallback=True, + needs_subtitle_prescan=False, + strict_interleave_without_prescan=True, + allow_live_sync=False, + probe_remux_config=None, + ) + + if subtitles_resolved: + return SyncAnalysisPlan( + enabled=True, + mapped_tracks=mapped_tracks, + offset_requires_file_fallback=False, + needs_subtitle_prescan=True, + strict_interleave_without_prescan=False, + allow_live_sync=True, + probe_remux_config=build_probe_remux_config( + config, + all_sources, + source_idx_local, + resolved_subtitle_tracks, + ), + ) + + strict_without_prescan = bool(config.copy_subtitles and needs_strict_interleave_for_encode(mapped_tracks)) + return SyncAnalysisPlan( + enabled=True, + mapped_tracks=mapped_tracks, + offset_requires_file_fallback=False, + needs_subtitle_prescan=False, + strict_interleave_without_prescan=strict_without_prescan, + allow_live_sync=True, + probe_remux_config=None, + ) diff --git a/core/workflows/encode/planning/track_assembly.py b/core/workflows/encode/planning/track_assembly.py new file mode 100644 index 0000000..8278b1a --- /dev/null +++ b/core/workflows/encode/planning/track_assembly.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from collections.abc import Sequence +from pathlib import Path +from typing import Mapping + +from core.workflows.encode.models import EncodeConfig + +from .plan_models import EncodePlan, ResolvedTrackAssembly + + +def build_track_input_paths( + *, + leading_inputs: Sequence[Path | str] = (), + all_sources: Sequence[Path] = (), + sync_inputs: Sequence[Path | str] = (), +) -> tuple[Path | str, ...]: + return tuple([*leading_inputs, *all_sources, *sync_inputs]) + + +def resolve_track_assembly( + config: EncodeConfig, + plan: EncodePlan, + *, + source_idx: Mapping[Path, int], + track_input_paths: Sequence[Path | str], + sync_remap: Mapping[tuple[Path, int, str], tuple[int, int]] | None = None, + video_default_map: tuple[int, int] | None = None, + video_fallback_input: Path | str | None = None, + include_video: bool = True, +) -> ResolvedTrackAssembly: + sync_remap = sync_remap or {} + resolved_subtitle_tracks = list(plan.resolved_subtitle_tracks) + track_inputs = tuple(track_input_paths) + + def _input_path(idx: int, fallback: Path | str) -> Path | str: + if 0 <= idx < len(track_inputs): + return track_inputs[idx] + return fallback + + video_base_map = sync_remap.get(plan.video_key, video_default_map or plan.video_default_map) + video_fallback = ( + video_fallback_input + if video_fallback_input is not None + else plan.all_sources[plan.video_input_idx] + ) + track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [] + if include_video: + track_mappings.append( + ( + plan.video_key, + _input_path(int(video_base_map[0]), video_fallback), + int(video_base_map[1]), + ) + ) + + for audio in config.audio_tracks: + src_path = Path(audio.source_path or config.source) + remapped = sync_remap.get((src_path, int(audio.stream_index), "audio")) + if remapped is not None: + inp, stream_idx = remapped + else: + inp = source_idx.get(src_path) + if inp is None: + inp = source_idx.get(config.source, 0) + stream_idx = int(audio.stream_index) + track_mappings.append( + ( + (src_path, int(audio.stream_index), "audio"), + _input_path(int(inp), src_path), + int(stream_idx), + ) + ) + + for src_path_raw, stream_idx_raw in resolved_subtitle_tracks: + src_path = Path(src_path_raw) + remapped = sync_remap.get((src_path, int(stream_idx_raw), "subtitle")) + if remapped is not None: + inp, stream_idx = remapped + else: + inp = source_idx.get(src_path) + if inp is None: + continue + stream_idx = int(stream_idx_raw) + track_mappings.append( + ( + (src_path, int(stream_idx_raw), "subtitle"), + _input_path(int(inp), src_path), + int(stream_idx), + ) + ) + + return ResolvedTrackAssembly( + track_mappings=tuple(track_mappings), + video_map=(int(video_base_map[0]), int(video_base_map[1])), + ) diff --git a/core/workflows/encode/planning/validation.py b/core/workflows/encode/planning/validation.py new file mode 100644 index 0000000..9d6cccf --- /dev/null +++ b/core/workflows/encode/planning/validation.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +import re +import tempfile +from pathlib import Path +from typing import Callable + +from core.workflows.common.track_types import TrackTimeOffset +from core.workflows.encode.models import EncodeConfig, QualityMode, VideoEncodeSettings +from core.workflows.encode.planning.plan_models import PlannedVideoTrack + +_DYNAMIC_HDR_ALLOWED_CODECS = {"copy", "libx265", "hevc_nvenc", "hevc_amf", "hevc_qsv", "hevc_vaapi"} +_MASTER_DISPLAY_RE = re.compile(r"^G\(\d+,\d+\)B\(\d+,\d+\)R\(\d+,\d+\)WP\(\d+,\d+\)L\(\d+,\d+\)$") +_MAX_CLL_RE = re.compile(r"^\d+,\d+$") + + +def is_dir_writable(path: Path) -> bool: + try: + with tempfile.NamedTemporaryFile( + mode="wb", + dir=path, + prefix="mrecode_write_probe_", + delete=True, + ): + pass + return True + except OSError: + return False + + +def validate_encode_config( + config: EncodeConfig, + *, + planned_video_tracks: tuple[PlannedVideoTrack, ...] | None = None, + video_tracks: list[VideoEncodeSettings] | None = None, + video_source_from_settings: Callable[[EncodeConfig, VideoEncodeSettings], Path] | None = None, + dir_writable: Callable[[Path], bool] = is_dir_writable, +) -> list[str]: + errors: list[str] = [] + if not config.source.is_file(): + errors.append(f"Fichier source introuvable : {config.source}") + if planned_video_tracks is None: + if video_tracks is None or video_source_from_settings is None: + raise ValueError("planned_video_tracks or (video_tracks + video_source_from_settings) is required") + planned_video_tracks = tuple( + PlannedVideoTrack( + source=Path(video_source_from_settings(config, video)), + stream_index=int(getattr(video, "stream_index", 0) or 0), + codec=str(video.codec), + quality_mode=video.quality_mode, + inject_hdr_meta=bool(video.inject_hdr_meta), + tonemap_to_sdr=bool(video.tonemap_to_sdr), + copy_dv=bool(video.copy_dv), + copy_hdr10plus=bool(video.copy_hdr10plus), + master_display=str(video.master_display or ""), + max_cll=str(video.max_cll or ""), + ) + for video in video_tracks + ) + if not planned_video_tracks: + errors.append("Aucune piste vidéo sélectionnée.") + return errors + + for index, video in enumerate(planned_video_tracks, start=1): + source = video.source + if not source.is_file(): + errors.append(f"Piste vidéo #{index} — source introuvable : {source}") + if video.codec == "copy" and (video.inject_hdr_meta or video.tonemap_to_sdr): + errors.append( + f"Piste vidéo #{index} — codec copy incompatible avec HDR statique ou tone-mapping." + ) + if (video.copy_dv or video.copy_hdr10plus) and video.codec not in _DYNAMIC_HDR_ALLOWED_CODECS: + errors.append( + f"Piste vidéo #{index} — DoVi/HDR10+ exige une sortie HEVC ou copy." + ) + + output_dir = config.output.parent + if not output_dir.exists(): + errors.append(f"Dossier de sortie inexistant : {output_dir}") + elif not dir_writable(output_dir): + errors.append( + "Dossier de sortie non inscriptible : " + f"{output_dir} (vérifiez les protections Windows sur les dossiers Bibliothèques)." + ) + if config.source == config.output: + errors.append("Le fichier de sortie doit être différent du fichier source.") + if any(video.quality_mode == QualityMode.SIZE and video.codec != "copy" for video in planned_video_tracks) and not (config.duration_s or 0) > 0: + errors.append("Durée du fichier source inconnue — mode taille cible impossible.") + + for index, video in enumerate(planned_video_tracks, start=1): + if video.inject_hdr_meta and not video.tonemap_to_sdr: + if video.master_display and not _MASTER_DISPLAY_RE.match(video.master_display.strip()): + errors.append( + f"Piste vidéo #{index} — format master_display invalide. " + "Attendu : G(x,y)B(x,y)R(x,y)WP(x,y)L(max,min)" + ) + if video.max_cll and not _MAX_CLL_RE.match(video.max_cll.strip()): + errors.append( + f"Piste vidéo #{index} — format MaxCLL invalide. Attendu : MaxCLL,MaxFALL ex. 1000,400" + ) + + for raw in config.track_time_offsets: + if not isinstance(raw, TrackTimeOffset): + continue + track_type = str(raw.track_type or "").strip().lower() + if track_type == "video" and int(raw.offset_ms) < 0: + errors.append( + "Décalage vidéo négatif interdit : " + f"source={Path(raw.source_path)}, stream={int(raw.stream_index)}, " + f"offset={int(raw.offset_ms)} ms" + ) + return errors diff --git a/core/workflows/encode/remux_bridge.py b/core/workflows/encode/remux_bridge.py new file mode 100644 index 0000000..c25bf64 --- /dev/null +++ b/core/workflows/encode/remux_bridge.py @@ -0,0 +1,190 @@ +"""Bridge helpers between remux and encode workflow configurations.""" + +from __future__ import annotations + +from pathlib import Path + +from core.workflows.common.track_types import TrackMetaPatch, TrackOffset, TrackType +from core.workflows.encode.models import EncodeConfig +from core.workflows.remux_models import RemuxConfig, TrackEntry + + +def merge_remux_into_encode_config( + encode_cfg: EncodeConfig, + remux_cfg: RemuxConfig, +) -> EncodeConfig: + """Enrichit l'encode config avec les informations issues du remux panel.""" + sub_tracks: list[tuple[Path, int]] = [] + attachment_streams: list[tuple[Path, int]] = [] + tag_sources: list[Path] = [] + + source_by_index = {src.file_index: src for src in remux_cfg.sources} + remux_track_map: dict[tuple[Path, int], TrackEntry] = {} + remux_track_map_by_id: dict[str, TrackEntry] = {} + + for src in remux_cfg.sources: + for track in src.tracks: + remux_track_map[(src.path, track.mkv_tid)] = track + remux_track_map_by_id[track.entry_id] = track + for att in src.selected_attachments: + attachment_streams.append((src.path, att.index)) + if src.copy_tags: + tag_sources.append(src.path) + + ordered_tracks: list[tuple[Path, TrackEntry]] = [] + for item in remux_cfg.track_order: + file_index = int(item[0]) + mkv_tid = int(item[1]) + entry_id = str(item[2]).strip() if len(item) > 2 else "" + src = source_by_index.get(file_index) + if src is None: + continue + track = remux_track_map_by_id.get(entry_id) if entry_id else remux_track_map.get((src.path, mkv_tid)) + if track is None: + continue + ordered_tracks.append((src.path, track)) + + sub_tracks = [ + (src_path, track.mkv_tid) + for src_path, track in ordered_tracks + if track.track_type == "subtitle" + ] + + tag_overrides = remux_cfg.tag_overrides + track_meta_edits: list[TrackMetaPatch] = [] + track_time_offsets: list[TrackOffset] = [] + video_tracks = encode_cfg.video_tracks or ([encode_cfg.video] if encode_cfg.video is not None else []) + + def _make_edit(track_order: int, track: TrackEntry) -> TrackMetaPatch | None: + lang = (track.language or "").strip() + orig_lang = (track.orig_language or "").strip() + title = (track.title or "").strip() + if not lang and orig_lang and lang != orig_lang: + lang = "und" + has_flag_state = any(( + track.flag_default, + track.flag_forced, + track.flag_hearing_impaired, + track.flag_visual_impaired, + track.flag_original, + track.flag_commentary, + )) + has_flag_change = any(( + track.flag_default != track.orig_flag_default, + track.flag_forced != track.orig_flag_forced, + track.flag_hearing_impaired != track.orig_flag_hearing_impaired, + track.flag_visual_impaired != track.orig_flag_visual_impaired, + track.flag_original != track.orig_flag_original, + track.flag_commentary != track.orig_flag_commentary, + )) + if not lang and not title and not has_flag_state and not has_flag_change: + return None + return TrackMetaPatch( + track_order=track_order, + language=lang, + title=title if title else None, + flag_default=track.flag_default, + flag_forced=track.flag_forced, + flag_hearing_impaired=track.flag_hearing_impaired, + flag_visual_impaired=track.flag_visual_impaired, + flag_original=track.flag_original, + flag_commentary=track.flag_commentary, + ) + + def _append_track_offset(track_type: str, src_path: Path, stream_index: int, track: TrackEntry | None) -> None: + if track is None: + return + offset_ms = int(getattr(track, "time_shift_ms", 0) or 0) + if offset_ms == 0: + return + track_time_offsets.append(TrackOffset( + track_type=track_type, + source_path=src_path, + stream_index=int(stream_index), + offset_ms=offset_ms, + )) + + def _find_track(src_path: Path, stream_index: int, track_type: str) -> TrackEntry | None: + track = remux_track_map.get((src_path, stream_index)) + if track is not None: + return track + for entry in remux_track_map.values(): + if entry.mkv_tid == stream_index and entry.track_type == track_type: + return entry + return None + + for video_order, video_settings in enumerate(video_tracks, start=1): + video_src = Path(video_settings.source_path or encode_cfg.source) + if video_settings.track_entry_id: + video_entry = remux_track_map_by_id.get(video_settings.track_entry_id) + else: + video_entry = _find_track(video_src, int(video_settings.stream_index), "video") + if video_entry is None: + continue + edit = _make_edit(video_order, video_entry) + if edit: + track_meta_edits.append(edit) + _append_track_offset(TrackType.VIDEO.value, video_src, int(video_settings.stream_index), video_entry) + + audio_offset = len(video_tracks) + 1 + for audio_order, audio_settings in enumerate(encode_cfg.audio_tracks): + src_path = audio_settings.source_path or encode_cfg.source + if audio_settings.track_entry_id: + track = remux_track_map_by_id.get(audio_settings.track_entry_id) + else: + track = _find_track(src_path, audio_settings.stream_index, "audio") + if track is None: + continue + edit = _make_edit(audio_offset + audio_order, track) + if edit: + track_meta_edits.append(edit) + _append_track_offset(TrackType.AUDIO.value, src_path, audio_settings.stream_index, track) + + sub_offset = audio_offset + len(encode_cfg.audio_tracks) + used_sub_tracks = sub_tracks or encode_cfg.subtitle_tracks + for sub_order, (sub_path, sub_sid) in enumerate(used_sub_tracks): + track = remux_track_map.get((sub_path, sub_sid)) + if track is None: + continue + edit = _make_edit(sub_offset + sub_order, track) + if edit: + track_meta_edits.append(edit) + _append_track_offset(TrackType.SUBTITLE.value, sub_path, sub_sid, track) + + chapter_overrides = remux_cfg.chapter_overrides + if ( + not sub_tracks + and not attachment_streams + and not tag_sources + and tag_overrides is None + and not track_meta_edits + and not track_time_offsets + and encode_cfg.keep_chapters == remux_cfg.keep_chapters + and remux_cfg.chapter_overrides is None + ): + return encode_cfg + + return EncodeConfig( + source=encode_cfg.source, + output=encode_cfg.output, + video=encode_cfg.video, + video_tracks=video_tracks, + audio_tracks=encode_cfg.audio_tracks, + copy_subtitles=encode_cfg.copy_subtitles if not sub_tracks else False, + subtitle_tracks=sub_tracks or encode_cfg.subtitle_tracks, + keep_chapters=remux_cfg.keep_chapters, + chapter_overrides=chapter_overrides, + attachment_streams=attachment_streams, + tag_sources=[] if tag_overrides is not None else tag_sources, + tag_overrides=tag_overrides, + track_meta_edits=track_meta_edits, + track_time_offsets=track_time_offsets, + duration_s=encode_cfg.duration_s, + copy_dv=encode_cfg.copy_dv, + copy_hdr10plus=encode_cfg.copy_hdr10plus, + dovi_profile=encode_cfg.dovi_profile, + work_dir=encode_cfg.work_dir, + file_title=encode_cfg.file_title, + extra_attachments=encode_cfg.extra_attachments, + tmdb_cover=encode_cfg.tmdb_cover, + ) diff --git a/core/workflows/encode/runtime/__init__.py b/core/workflows/encode/runtime/__init__.py new file mode 100644 index 0000000..e30a867 --- /dev/null +++ b/core/workflows/encode/runtime/__init__.py @@ -0,0 +1,76 @@ +"""Runtime runners for the encode workflow. + +## Conventions runtime + +### Plage `log_step` +- `1` a `4` restent reserves a la preparation de haut niveau dans + `EncodeWorkflow._run_with_preparation`. +- Les runners runtime commencent a `5` et ne doivent pas reutiliser `1..4`. + +### Ranges par runner +- `DirectOutputRunner`: + - `5`: construction de commande + - `6`: preparation sync/remap + - `7`: execution ffmpeg (single-pass ou two-pass) +- `MetadataInjectRunner`: + - `5`: extraction metadonnees dynamiques (DoVi/HDR10+) + - `6`: encodage video seule + - `7`: injection HDR10+ / DoVi + - `8`: encapsulation timeline de la video injectee + - `9`: reconstruction finale MKV +- `MultiVideoPipelineRunner`: + - preparation des pistes faite via logs/progress dedies + - `5`: reconstruction finale multi-pistes video + +### Contrats callbacks +- `check_cancelled(signals)` doit lever `TaskCancelledError` quand necessaire. +- `build_encode_plan(config)` doit retourner un plan stable reutilisable sur tout + le pipeline du runner. +- `run_cmd(...)` est la seule porte d'execution process et doit relayer la + progression vers `signals.progress`. +- Les callbacks de mapping/sync (`prepare_multisource_sync`, + `append_sync_inputs`, `append_offset_aux_inputs`) doivent preserver les + index d'inputs pour garder les `-map` deterministes. +""" + +from .attachment_preparation import ( + AttachmentPreparationService, + AttachmentPreparationServiceCallbacks, + default_attachment_filename, + extract_attached_pic, + probe_attachment_stream, + unique_attachment_path, +) +from .bindings import SignalBindingService, SignalBindingServiceCallbacks +from .direct_output import DirectOutputRunner, DirectOutputRunnerCallbacks +from .metadata_inject import MetadataInjectRunner, MetadataInjectRunnerCallbacks +from .multi_video import MultiVideoPipelineRunner, MultiVideoPipelineRunnerCallbacks +from .storage_guard import ( + ensure_inject_storage_available, + estimate_duration_seconds, + estimate_inject_storage_requirements, + estimate_inject_video_bytes, + format_bytes, +) + +__all__ = [ + "AttachmentPreparationService", + "AttachmentPreparationServiceCallbacks", + "SignalBindingService", + "SignalBindingServiceCallbacks", + "DirectOutputRunner", + "DirectOutputRunnerCallbacks", + "MetadataInjectRunner", + "MetadataInjectRunnerCallbacks", + "MultiVideoPipelineRunner", + "MultiVideoPipelineRunnerCallbacks", + "default_attachment_filename", + "ensure_inject_storage_available", + "estimate_duration_seconds", + "estimate_inject_storage_requirements", + "estimate_inject_video_bytes", + "extract_attached_pic", + "format_bytes", + "probe_attachment_stream", + "unique_attachment_path", +] diff --git a/core/workflows/encode/runtime/attachment_preparation.py b/core/workflows/encode/runtime/attachment_preparation.py new file mode 100644 index 0000000..aa04984 --- /dev/null +++ b/core/workflows/encode/runtime/attachment_preparation.py @@ -0,0 +1,207 @@ +from __future__ import annotations + +import json +import os +import shutil +import subprocess +import tempfile +from dataclasses import dataclass, replace +from pathlib import Path +from typing import Callable + +from core.runner import TaskCancelledError, TaskSignals +from core.subprocess_utils import subprocess_text_kwargs +from core.workflows.common.attachments import attachment_filename_from_meta +from core.workflows.encode.models import EncodeConfig, EncodeError + + +def probe_attachment_stream( + source: Path, + stream_idx: int, + *, + ffprobe_bin: str, + subprocess_run: Callable[..., subprocess.CompletedProcess[str]] = subprocess.run, + text_kwargs_factory: Callable[[], dict[str, object]] = subprocess_text_kwargs, +) -> dict[str, object]: + """Retourne les métadonnées minimales d'un stream potentiellement attachment.""" + cmd = [ + ffprobe_bin, + "-v", + "quiet", + "-print_format", + "json", + "-show_streams", + str(source), + ] + try: + result = subprocess_run( + cmd, + capture_output=True, + check=False, + timeout=30, + **text_kwargs_factory(), + ) + except FileNotFoundError: + return { + "is_attached_pic": False, + "filename": f"attachment_{stream_idx}.bin", + "mimetype": "application/octet-stream", + } + + if result.returncode != 0: + return { + "is_attached_pic": False, + "filename": f"attachment_{stream_idx}.bin", + "mimetype": "application/octet-stream", + } + + try: + payload = json.loads(result.stdout or "{}") + except json.JSONDecodeError: + payload = {} + + for stream in payload.get("streams", []): + if int(stream.get("index", -1)) != int(stream_idx): + continue + tags = stream.get("tags", {}) or {} + disposition = stream.get("disposition", {}) or {} + return { + "is_attached_pic": bool(disposition.get("attached_pic", 0)), + "filename": tags.get("filename") or f"attachment_{stream_idx}.bin", + "mimetype": tags.get("mimetype") or "application/octet-stream", + } + + return { + "is_attached_pic": False, + "filename": f"attachment_{stream_idx}.bin", + "mimetype": "application/octet-stream", + } + + +def unique_attachment_path(tmp_dir: Path, filename: str) -> Path: + """Retourne un chemin unique dans ``tmp_dir`` pour éviter les collisions.""" + candidate = tmp_dir / filename + if not candidate.exists(): + return candidate + stem = candidate.stem + suffix = candidate.suffix + for idx in range(1, 1000): + alt = tmp_dir / f"{stem}_{idx}{suffix}" + if not alt.exists(): + return alt + return tmp_dir / f"{stem}_{os.getpid()}{suffix}" + + +def extract_attached_pic( + source: Path, + stream_idx: int, + dest: Path, + *, + ffmpeg_bin: str, + ffmpeg_thread_args: Callable[[], list[str]], + check_cancelled: Callable[[TaskSignals | None], None], + log_info: Callable[[str], None], + run_cmd: Callable[[list[str], str, TaskSignals | None], object], + signals: TaskSignals | None = None, +) -> None: + """Extrait un ``attached_pic`` vers un vrai fichier image.""" + check_cancelled(signals) + cmd = [ + ffmpeg_bin, + "-hide_banner", + "-y", + "-i", + str(source), + "-map", + f"0:{stream_idx}", + *ffmpeg_thread_args(), + "-c", + "copy", + "-frames:v", + "1", + str(dest), + ] + log_info("$ " + " ".join(cmd)) + try: + run_cmd(cmd, "extract-attached-pic", signals) + except TaskCancelledError: + dest.unlink(missing_ok=True) + raise + except Exception as exc: + dest.unlink(missing_ok=True) + stderr = str(exc).strip() + raise EncodeError( + f"Extraction attachment échouée pour le stream {stream_idx} de {source.name}: {stderr}" + ) from exc + check_cancelled(signals) + if not dest.exists(): + raise EncodeError( + f"Extraction attachment échouée pour le stream {stream_idx} de {source.name}: fichier absent" + ) + + +@dataclass(frozen=True) +class AttachmentPreparationServiceCallbacks: + check_cancelled: Callable[[TaskSignals | None], None] + describe_attachment_stream: Callable[[Path, int], dict[str, object]] + attachment_filename: Callable[[dict[str, object], int], str] + unique_attachment_path: Callable[[Path, str], Path] + extract_attached_pic: Callable[[Path, int, Path, TaskSignals | None], None] + + +class AttachmentPreparationService: + """Prépare les attachments avant le runtime encode.""" + + def __init__(self, callbacks: AttachmentPreparationServiceCallbacks) -> None: + self._callbacks = callbacks + + def prepare( + self, + config: EncodeConfig, + *, + work_dir: Path, + signals: TaskSignals | None = None, + ) -> tuple[EncodeConfig, Path | None]: + cb = self._callbacks + if not config.attachment_streams: + return config, None + + tmp_dir = Path(tempfile.mkdtemp(prefix="enc_attachments_", dir=str(work_dir))) + direct_streams: list = [] + extracted_files: list[Path] = [] + created_any = False + + try: + for selection in config.attachment_streams: + cb.check_cancelled(signals) + src_path, stream_idx = selection[:2] + meta = cb.describe_attachment_stream(src_path, stream_idx) + cb.check_cancelled(signals) + if not meta["is_attached_pic"]: + direct_streams.append(selection) + continue + + created_any = True + filename = cb.attachment_filename(meta, stream_idx) + dest = cb.unique_attachment_path(tmp_dir, filename) + cb.extract_attached_pic(src_path, stream_idx, dest, signals) + cb.check_cancelled(signals) + extracted_files.append(dest) + + if not created_any: + shutil.rmtree(tmp_dir, ignore_errors=True) + return config, None + + prepared = replace( + config, + attachment_streams=direct_streams, + extra_attachments=[*extracted_files, *config.extra_attachments], + ) + return prepared, tmp_dir + except Exception: + shutil.rmtree(tmp_dir, ignore_errors=True) + raise + + +def default_attachment_filename(meta: dict[str, object], stream_idx: int) -> str: + return attachment_filename_from_meta(meta, stream_idx) diff --git a/core/workflows/encode/runtime/bindings.py b/core/workflows/encode/runtime/bindings.py new file mode 100644 index 0000000..de84954 --- /dev/null +++ b/core/workflows/encode/runtime/bindings.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.runner import TaskSignals +from core.workdir import remove_path +from core.workflows.remux_timeline_sync import LiveSyncSession + + +@dataclass(frozen=True) +class SignalBindingServiceCallbacks: + muxing_bind_on_success: Callable[[TaskSignals, Path], None] + language_bind_on_success: Callable[[TaskSignals, Path], None] + write_nfo: Callable[[Path], None] + remove_path: Callable[[Path], None] = remove_path + + +class SignalBindingService: + """Centralise les bindings de cleanup et post-actions runtime.""" + + def __init__(self, callbacks: SignalBindingServiceCallbacks) -> None: + self._callbacks = callbacks + + def bind_live_sync_cleanup( + self, + signals: TaskSignals, + session: LiveSyncSession | None, + ) -> None: + if session is None: + return + + done = {"closed": False} + for proc in session.processes: + signals._register_proc(proc) + + def _cleanup(*_args) -> None: + if done["closed"]: + return + done["closed"] = True + for proc in session.processes: + signals._unregister_proc(proc) + session.close() + + signals.finished.connect(_cleanup) + signals.failed.connect(_cleanup) + signals.cancelled.connect(_cleanup) + + def bind_temp_cleanup(self, signals: TaskSignals, cleanup_paths: list[Path]) -> None: + if not cleanup_paths: + return + + done = {"cleaned": False} + + def _cleanup(*_args) -> None: + if done["cleaned"]: + return + done["cleaned"] = True + for path in cleanup_paths: + try: + self._callbacks.remove_path(path) + except OSError: + pass + + signals.finished.connect(_cleanup) + signals.failed.connect(_cleanup) + signals.cancelled.connect(_cleanup) + + def bind_matroska_segment_muxing_patch(self, signals: TaskSignals, output: Path) -> None: + self._callbacks.muxing_bind_on_success(signals, output) + self._callbacks.language_bind_on_success(signals, output) + + def bind_nfo_write(self, signals: TaskSignals, output: Path) -> None: + def _write(*_args) -> None: + self._callbacks.write_nfo(output) + + signals.finished.connect(_write) + + def bind_output_hooks( + self, + signals: TaskSignals, + *, + output: Path, + cleanup_paths: list[Path] | None = None, + include_temp_cleanup: bool = True, + include_segment_patch: bool = True, + include_nfo: bool = True, + ) -> None: + if include_temp_cleanup and cleanup_paths is not None: + self.bind_temp_cleanup(signals, cleanup_paths) + if include_segment_patch: + self.bind_matroska_segment_muxing_patch(signals, output) + if include_nfo: + self.bind_nfo_write(signals, output) diff --git a/core/workflows/encode/runtime/command_builders.py b/core/workflows/encode/runtime/command_builders.py new file mode 100644 index 0000000..45e1f7e --- /dev/null +++ b/core/workflows/encode/runtime/command_builders.py @@ -0,0 +1,386 @@ +from __future__ import annotations + +import os +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.runner import TaskSignals +from core.workflows.common.timeline_sync import sync_cleanup_paths as _common_sync_cleanup_paths +from core.workflows.encode.domain import ( + EncodeCodecDomainCallbacks as _EncodeCodecDomainCallbacks, + build_encoder_vf as _build_encoder_vf_domain, + hardware_input_args as _hardware_input_args_domain, +) +from core.workflows.encode.models import EncodeConfig, VideoEncodeSettings +from core.workflows.encode.planning.offsets import build_offset_specs as _build_offset_specs_plan +from core.workflows.encode.planning.plan_models import ( + EncodePlan as _EncodePlan, + MaterializedContainerMetadataPlan as _MaterializedContainerMetadataPlan, + ResolvedTrackAssembly as _ResolvedTrackAssembly, +) +from core.workflows.encode.planning.track_assembly import build_track_input_paths as _build_track_input_paths_plan +from core.workflows.remux_timeline_sync import LiveSyncSession + + +@dataclass(frozen=True) +class EncodeCommandBuilderCallbacks: + ffmpeg_bin: str + ffmpeg_progress_args: Callable[[], list[str]] + ffmpeg_thread_args: Callable[[int | None], list[str]] + primary_video_settings: Callable[[EncodeConfig], VideoEncodeSettings] + build_encode_plan: Callable[[EncodeConfig], _EncodePlan] + size_to_bitrate_kbps: Callable[[EncodeConfig], int] + codec_domain_callbacks: Callable[[], _EncodeCodecDomainCallbacks] + materialize_container_metadata_inputs: Callable[..., _MaterializedContainerMetadataPlan] + resolve_track_assembly_and_offset_remap: Callable[..., tuple[_ResolvedTrackAssembly, dict[tuple[Path, int, str], tuple[int, int]]]] + append_primary_video_map_and_codec: Callable[..., None] + append_common_streams_and_metadata: Callable[..., None] + prepare_multisource_sync: Callable[..., tuple[dict[tuple[Path, int, str], tuple[int, int]], list[Path | str], LiveSyncSession | None, bool]] + append_sync_inputs: Callable[[list[str], list[Path | str]], None] + append_offset_aux_inputs: Callable[..., tuple[int, dict[tuple[Path, int, str], tuple[int, int]]]] + video_track_mapping: Callable[..., tuple[tuple[Path, int, str], Path | str, int]] + + +def build_single_pass( + callbacks: EncodeCommandBuilderCallbacks, + config: EncodeConfig, + *, + chapter_materialize_dir: Path | None = None, + plan: _EncodePlan | None = None, +) -> list[str]: + plan = plan or callbacks.build_encode_plan(config) + video = callbacks.primary_video_settings(config) + all_sources = list(plan.all_sources) + source_idx = dict(plan.source_idx) + + cmd: list[str] = [callbacks.ffmpeg_bin, "-hide_banner", "-y"] + cmd.extend(callbacks.ffmpeg_progress_args()) + cmd.extend(_hardware_input_args_domain(video, callbacks=callbacks.codec_domain_callbacks())) + for src in all_sources: + cmd.extend(["-i", str(src)]) + metadata_inputs = callbacks.materialize_container_metadata_inputs( + config, + source_idx=source_idx, + next_input_index=len(all_sources), + plan=plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=config.source, + ) + cmd.extend(metadata_inputs.input_args) + + vf = _build_encoder_vf_domain(video, callbacks=callbacks.codec_domain_callbacks()) + if vf: + cmd.extend(["-vf", vf]) + + cmd.extend(callbacks.ffmpeg_thread_args(None)) + track_assembly, offset_remap = callbacks.resolve_track_assembly_and_offset_remap( + cmd=cmd, + config=config, + plan=plan, + source_idx=source_idx, + track_input_paths=_build_track_input_paths_plan(all_sources=all_sources), + start_input_index=metadata_inputs.next_input_index, + ) + + callbacks.append_primary_video_map_and_codec( + cmd, + plan=plan, + video_map=track_assembly.video_map, + offset_remap=offset_remap, + video=video, + ) + callbacks.append_common_streams_and_metadata( + cmd, + config=config, + source_idx=source_idx, + all_sources_count=len(all_sources), + plan=plan, + metadata_inputs=metadata_inputs, + offset_remap=offset_remap, + ) + cmd.append(str(config.output)) + return cmd + + +def build_two_pass( + callbacks: EncodeCommandBuilderCallbacks, + config: EncodeConfig, + *, + chapter_materialize_dir: Path | None = None, + plan: _EncodePlan | None = None, +) -> list[list[str]]: + video = callbacks.primary_video_settings(config) + bitrate = callbacks.size_to_bitrate_kbps(config) + vf = _build_encoder_vf_domain(video, callbacks=callbacks.codec_domain_callbacks()) + plan = plan or callbacks.build_encode_plan(config) + all_sources = list(plan.all_sources) + source_idx = dict(plan.source_idx) + + def _base() -> list[str]: + c = [callbacks.ffmpeg_bin, "-hide_banner", "-y"] + c.extend(callbacks.ffmpeg_progress_args()) + c.extend(_hardware_input_args_domain(video, callbacks=callbacks.codec_domain_callbacks())) + for src in all_sources: + c.extend(["-i", str(src)]) + if vf: + c.extend(["-vf", vf]) + c.extend(callbacks.ffmpeg_thread_args(None)) + return c + + pass1 = _base() + _next1, pass1_offset_remap = callbacks.append_offset_aux_inputs( + pass1, + _build_offset_specs_plan( + config, + track_mappings=[callbacks.video_track_mapping(config, all_sources[plan.video_input_idx])], + offset_lookup=dict(plan.offset_lookup), + ), + start_input_index=len(all_sources), + ) + _ = _next1 + callbacks.append_primary_video_map_and_codec( + pass1, + plan=plan, + video_map=plan.video_default_map, + offset_remap=pass1_offset_remap, + video=video, + bitrate_kbps=bitrate, + include_hdr_meta=False, + ) + pass1.extend(["-pass", "1", "-an", "-f", "null", os.devnull]) + + pass2 = _base() + metadata_inputs = callbacks.materialize_container_metadata_inputs( + config, + source_idx=source_idx, + next_input_index=len(all_sources), + plan=plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=config.source, + ) + pass2.extend(metadata_inputs.input_args) + + track_assembly, pass2_offset_remap = callbacks.resolve_track_assembly_and_offset_remap( + cmd=pass2, + config=config, + plan=plan, + source_idx=source_idx, + track_input_paths=_build_track_input_paths_plan(all_sources=all_sources), + start_input_index=metadata_inputs.next_input_index, + ) + + callbacks.append_primary_video_map_and_codec( + pass2, + plan=plan, + video_map=track_assembly.video_map, + offset_remap=pass2_offset_remap, + video=video, + bitrate_kbps=bitrate, + ) + pass2.extend(["-pass", "2"]) + callbacks.append_common_streams_and_metadata( + pass2, + config=config, + source_idx=source_idx, + all_sources_count=len(all_sources), + plan=plan, + metadata_inputs=metadata_inputs, + offset_remap=pass2_offset_remap, + ) + pass2.append(str(config.output)) + return [pass1, pass2] + + +def build_runtime_single_pass_with_sync( + callbacks: EncodeCommandBuilderCallbacks, + config: EncodeConfig, + *, + chapter_materialize_dir: Path | None = None, + signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, +) -> tuple[list[str], LiveSyncSession | None, list[Path]]: + plan = plan or callbacks.build_encode_plan(config) + video = callbacks.primary_video_settings(config) + all_sources = list(plan.all_sources) + source_idx = dict(plan.source_idx) + work_dir = config.work_dir or config.source.parent + + sync_remap, sync_inputs, live_session, strict_interleave = callbacks.prepare_multisource_sync( + config=config, + all_sources=all_sources, + sync_base_input_idx=len(all_sources), + work_dir=work_dir, + signals=signals, + allow_live=True, + plan=plan, + ) + + cmd: list[str] = [callbacks.ffmpeg_bin, "-hide_banner", "-y"] + cmd.extend(callbacks.ffmpeg_progress_args()) + cmd.extend(_hardware_input_args_domain(video, callbacks=callbacks.codec_domain_callbacks())) + for src in all_sources: + cmd.extend(["-i", str(src)]) + callbacks.append_sync_inputs(cmd, sync_inputs) + + metadata_inputs = callbacks.materialize_container_metadata_inputs( + config, + source_idx=source_idx, + next_input_index=len(all_sources) + len(sync_inputs), + plan=plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=config.source, + ) + cmd.extend(metadata_inputs.input_args) + + vf = _build_encoder_vf_domain(video, callbacks=callbacks.codec_domain_callbacks()) + if vf: + cmd.extend(["-vf", vf]) + + cmd.extend(callbacks.ffmpeg_thread_args(None)) + track_assembly, offset_remap = callbacks.resolve_track_assembly_and_offset_remap( + cmd=cmd, + config=config, + plan=plan, + source_idx=source_idx, + track_input_paths=_build_track_input_paths_plan( + all_sources=all_sources, + sync_inputs=sync_inputs, + ), + start_input_index=metadata_inputs.next_input_index, + sync_remap=sync_remap, + video_fallback_input=plan.video_source, + ) + callbacks.append_primary_video_map_and_codec( + cmd, + plan=plan, + video_map=track_assembly.video_map, + offset_remap=offset_remap, + video=video, + ) + callbacks.append_common_streams_and_metadata( + cmd, + config=config, + source_idx=source_idx, + all_sources_count=len(all_sources), + plan=plan, + metadata_inputs=metadata_inputs, + offset_remap=offset_remap, + sync_remap=sync_remap, + strict_interleave=strict_interleave, + ) + cmd.append(str(config.output)) + return cmd, live_session, _common_sync_cleanup_paths(sync_inputs) + + +def build_runtime_two_pass_with_sync( + callbacks: EncodeCommandBuilderCallbacks, + config: EncodeConfig, + *, + chapter_materialize_dir: Path | None = None, + signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, +) -> tuple[list[list[str]], LiveSyncSession | None, list[Path]]: + video = callbacks.primary_video_settings(config) + bitrate = callbacks.size_to_bitrate_kbps(config) + vf = _build_encoder_vf_domain(video, callbacks=callbacks.codec_domain_callbacks()) + plan = plan or callbacks.build_encode_plan(config) + all_sources = list(plan.all_sources) + source_idx = dict(plan.source_idx) + work_dir = config.work_dir or config.source.parent + + sync_remap, sync_inputs, live_session, strict_interleave = callbacks.prepare_multisource_sync( + config=config, + all_sources=all_sources, + sync_base_input_idx=len(all_sources), + work_dir=work_dir, + signals=signals, + allow_live=False, + plan=plan, + ) + + def _base(include_sync_inputs: bool) -> list[str]: + c = [callbacks.ffmpeg_bin, "-hide_banner", "-y"] + c.extend(callbacks.ffmpeg_progress_args()) + c.extend(_hardware_input_args_domain(video, callbacks=callbacks.codec_domain_callbacks())) + for src in all_sources: + c.extend(["-i", str(src)]) + if include_sync_inputs: + callbacks.append_sync_inputs(c, sync_inputs) + if vf: + c.extend(["-vf", vf]) + c.extend(callbacks.ffmpeg_thread_args(None)) + return c + + video_key = plan.video_key + video_default_map = plan.video_default_map + + pass1 = _base(False) + _next1, pass1_offset_remap = callbacks.append_offset_aux_inputs( + pass1, + _build_offset_specs_plan( + config, + track_mappings=[callbacks.video_track_mapping(config, plan.video_source)], + offset_lookup=dict(plan.offset_lookup), + ), + start_input_index=len(all_sources), + ) + _ = _next1 + callbacks.append_primary_video_map_and_codec( + pass1, + plan=plan, + video_map=video_default_map, + offset_remap=pass1_offset_remap, + video=video, + bitrate_kbps=bitrate, + include_hdr_meta=False, + ) + pass1.extend(["-pass", "1", "-an", "-f", "null", os.devnull]) + + pass2 = _base(True) + metadata_inputs = callbacks.materialize_container_metadata_inputs( + config, + source_idx=source_idx, + next_input_index=len(all_sources) + len(sync_inputs), + plan=plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=config.source, + ) + pass2.extend(metadata_inputs.input_args) + + track_assembly, pass2_offset_remap = callbacks.resolve_track_assembly_and_offset_remap( + cmd=pass2, + config=config, + plan=plan, + source_idx=source_idx, + track_input_paths=_build_track_input_paths_plan( + all_sources=all_sources, + sync_inputs=sync_inputs, + ), + start_input_index=metadata_inputs.next_input_index, + sync_remap=sync_remap, + video_default_map=sync_remap.get(video_key, video_default_map), + video_fallback_input=plan.video_source, + ) + callbacks.append_primary_video_map_and_codec( + pass2, + plan=plan, + video_map=track_assembly.video_map, + offset_remap=pass2_offset_remap, + video=video, + bitrate_kbps=bitrate, + ) + pass2.extend(["-pass", "2"]) + callbacks.append_common_streams_and_metadata( + pass2, + config=config, + source_idx=source_idx, + all_sources_count=len(all_sources), + plan=plan, + metadata_inputs=metadata_inputs, + offset_remap=pass2_offset_remap, + sync_remap=sync_remap, + strict_interleave=strict_interleave, + ) + pass2.append(str(config.output)) + return [pass1, pass2], live_session, _common_sync_cleanup_paths(sync_inputs) diff --git a/core/workflows/encode/runtime/direct_output.py b/core/workflows/encode/runtime/direct_output.py new file mode 100644 index 0000000..a9a95e7 --- /dev/null +++ b/core/workflows/encode/runtime/direct_output.py @@ -0,0 +1,233 @@ +from __future__ import annotations + +import tempfile +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.runner import TaskCancelledError, TaskSignals +from core.workflows.encode.models import EncodeConfig +from core.workflows.encode.planning.plan_models import EncodePlan +from core.workflows.remux_timeline_sync import LiveSyncSession + + +@dataclass(frozen=True) +class DirectOutputRunnerCallbacks: + check_cancelled: Callable[[TaskSignals | None], None] + log_step: Callable[[int, str], None] + log_info: Callable[[str], None] + uses_two_pass: Callable[[EncodeConfig], bool] + build_encode_plan: Callable[[EncodeConfig], EncodePlan] + build_runtime_two_pass_with_sync: Callable[ + ..., + tuple[list[list[str]], LiveSyncSession | None, list[Path]], + ] + build_runtime_single_pass_with_sync: Callable[ + ..., + tuple[list[str], LiveSyncSession | None, list[Path]], + ] + run_two_pass: Callable[[list[list[str]], Path | None, TaskSignals | None], TaskSignals] + run_cmd: Callable[[list[str], Path | None, str, Callable[[str], None], TaskSignals], str] + run_tool: Callable[[list[str], Path | None, str], TaskSignals] + bind_live_sync_cleanup: Callable[[TaskSignals, LiveSyncSession | None], None] + cleanup_two_pass_logs: Callable[[Path | None], None] + + +class DirectOutputRunner: + """Runner dédié au pipeline encode en sortie directe.""" + + def __init__(self, callbacks: DirectOutputRunnerCallbacks) -> None: + self._callbacks = callbacks + + def run( + self, + *, + config: EncodeConfig, + cleanup_paths: list[Path], + cwd: Path, + prep_signals: TaskSignals | None = None, + plan: EncodePlan | None = None, + ) -> TaskSignals: + cb = self._callbacks + cb.check_cancelled(prep_signals) + cb.log_step(5, "Construction de la commande ffmpeg (sortie directe)") + encode_plan = plan or cb.build_encode_plan(config) + + if len(encode_plan.all_sources) > 1: + return self.run_multisource_async( + config=config, + cleanup_paths=cleanup_paths, + cwd=cwd, + prep_signals=prep_signals, + plan=encode_plan, + ) + + chapter_dir: Path | None = None + if config.chapter_overrides: + chapter_dir = Path( + tempfile.mkdtemp( + prefix="enc_chapters_", + dir=str(config.work_dir) if config.work_dir else None, + ) + ) + cleanup_paths.append(chapter_dir) + + cb.check_cancelled(prep_signals) + if cb.uses_two_pass(config): + cb.log_step(6, "Préparation sync/remap + commandes ffmpeg (2 passes)") + cmds: list[list[str]] + live_sync_session: LiveSyncSession | None = None + sync_cleanup_paths: list[Path] = [] + try: + cmds, live_sync_session, sync_cleanup_paths = cb.build_runtime_two_pass_with_sync( + config, + chapter_materialize_dir=chapter_dir, + signals=prep_signals, + plan=encode_plan, + ) + cleanup_paths.extend(sync_cleanup_paths) + cb.check_cancelled(prep_signals) + cb.log_step(7, "Exécution ffmpeg en 2 passes (sortie directe)") + signals = cb.run_two_pass(cmds, cwd, prep_signals) + except Exception: + if live_sync_session is not None: + live_sync_session.close() + raise + cb.bind_live_sync_cleanup(signals, live_sync_session) + return signals + + cb.log_step(6, "Préparation sync/remap + commande ffmpeg (single pass)") + cmd: list[str] + live_sync_session = None + sync_cleanup_paths: list[Path] = [] + try: + cmd, live_sync_session, sync_cleanup_paths = cb.build_runtime_single_pass_with_sync( + config, + chapter_materialize_dir=chapter_dir, + signals=prep_signals, + plan=encode_plan, + ) + cleanup_paths.extend(sync_cleanup_paths) + cb.check_cancelled(prep_signals) + cb.log_step(7, "Exécution ffmpeg en single pass (sortie directe)") + if prep_signals is not None: + output = cb.run_cmd( + cmd, + cwd, + "ffmpeg", + lambda line: prep_signals.progress.emit(line), + prep_signals, + ) + prep_signals.finished.emit(output) + signals = prep_signals + else: + signals = cb.run_tool(cmd, cwd, "ffmpeg") + except Exception: + if live_sync_session is not None: + live_sync_session.close() + raise + cb.bind_live_sync_cleanup(signals, live_sync_session) + return signals + + def run_multisource_async( + self, + *, + config: EncodeConfig, + cleanup_paths: list[Path], + cwd: Path, + prep_signals: TaskSignals | None = None, + plan: EncodePlan | None = None, + ) -> TaskSignals: + cb = self._callbacks + signals = prep_signals or TaskSignals() + executor = ThreadPoolExecutor(max_workers=1) + + def _task() -> None: + chapter_dir: Path | None = None + live_sync_session: LiveSyncSession | None = None + sync_cleanup_paths: list[Path] = [] + is_two_pass = cb.uses_two_pass(config) + encode_plan = plan or cb.build_encode_plan(config) + + try: + cb.check_cancelled(signals) + if config.chapter_overrides: + chapter_dir = Path( + tempfile.mkdtemp( + prefix="enc_chapters_", + dir=str(config.work_dir) if config.work_dir else None, + ) + ) + cleanup_paths.append(chapter_dir) + + if is_two_pass: + cb.log_step(6, "Préparation sync/remap + commandes ffmpeg (2 passes)") + cmds, live_sync_session, sync_cleanup_paths = cb.build_runtime_two_pass_with_sync( + config, + chapter_materialize_dir=chapter_dir, + signals=signals, + plan=encode_plan, + ) + cleanup_paths.extend(sync_cleanup_paths) + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._register_proc(proc) + cb.check_cancelled(signals) + cb.log_step(7, "Exécution ffmpeg en 2 passes (sortie directe)") + + cb.log_info("Passe 1/2 (analyse)…") + cb.run_cmd( + cmds[0], + cwd, + "ffmpeg-pass1", + lambda line: signals.progress.emit(line), + signals, + ) + cb.check_cancelled(signals) + cb.log_info("Passe 2/2 (encodage)…") + output = cb.run_cmd( + cmds[1], + cwd, + "ffmpeg-pass2", + lambda line: signals.progress.emit(line), + signals, + ) + signals.finished.emit(output) + else: + cb.log_step(6, "Préparation sync/remap + commande ffmpeg (single pass)") + cmd, live_sync_session, sync_cleanup_paths = cb.build_runtime_single_pass_with_sync( + config, + chapter_materialize_dir=chapter_dir, + signals=signals, + plan=encode_plan, + ) + cleanup_paths.extend(sync_cleanup_paths) + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._register_proc(proc) + cb.check_cancelled(signals) + cb.log_step(7, "Exécution ffmpeg en single pass (sortie directe)") + output = cb.run_cmd( + cmd, + cwd, + "ffmpeg", + lambda line: signals.progress.emit(line), + signals, + ) + signals.finished.emit(output) + except TaskCancelledError: + signals.cancelled.emit() + except Exception as exc: + signals.failed.emit(str(exc), exc) + finally: + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._unregister_proc(proc) + live_sync_session.close() + if is_two_pass: + cb.cleanup_two_pass_logs(cwd) + executor.shutdown(wait=False) + + executor.submit(_task) + return signals diff --git a/core/workflows/encode/runtime/metadata_inject.py b/core/workflows/encode/runtime/metadata_inject.py new file mode 100644 index 0000000..5a56d49 --- /dev/null +++ b/core/workflows/encode/runtime/metadata_inject.py @@ -0,0 +1,333 @@ +from __future__ import annotations + +import shutil +import tempfile +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.runner import TaskCancelledError, TaskSignals +from core.workdir import remove_path +from core.workflows.encode.models import EncodeConfig, QualityMode +from core.workflows.encode.planning.track_assembly import build_track_input_paths, resolve_track_assembly +from core.workflows.encode.planning.plan_models import EncodePlan +from core.workflows.remux_timeline_sync import LiveSyncSession + + +@dataclass(frozen=True) +class MetadataInjectRunnerCallbacks: + ffmpeg_bin: str + bins: dict[str, str] + log_step: Callable[[int, str], None] + log_info: Callable[[str], None] + check_cancelled: Callable[[TaskSignals | None], None] + video_source_path: Callable[[EncodeConfig], Path] + build_video_only_two_pass: Callable[[EncodeConfig, Path], list[list[str]]] + build_video_only_cmd: Callable[[EncodeConfig, Path], list[str]] + wrap_injected_hevc_for_reconstruction: Callable[..., list[str]] + build_encode_plan: Callable[[EncodeConfig], EncodePlan] + source_input_index_map: Callable[[list[Path]], dict[Path, int]] + prepare_multisource_sync: Callable[..., tuple[dict[tuple[Path, int, str], tuple[int, int]], list[Path | str], LiveSyncSession | None, bool]] + sync_cleanup_paths: Callable[[list[Path | str]], list[Path]] + append_sync_inputs: Callable[[list[str], list[Path | str]], None] + prepare_container_metadata_inputs: Callable[..., tuple[int, int | None, int | None]] + ffmpeg_thread_args: Callable[[], list[str]] + ffmpeg_progress_args: Callable[[], list[str]] + video_map_key: Callable[[EncodeConfig], tuple[Path, int, str]] + append_offset_aux_inputs: Callable[..., tuple[int, dict[tuple[Path, int, str], tuple[int, int]]]] + build_offset_specs: Callable[..., object] + video_map_arg: Callable[..., str] + append_stream_maps_and_attachments: Callable[..., None] + append_strict_interleave_mux_flags: Callable[[list[str]], None] + append_container_metadata_args: Callable[..., None] + run_cmd: Callable[[list[str], TaskSignals, Path | None, Callable[[str], None] | None], str] + bind_matroska_segment_muxing_patch: Callable[[TaskSignals, Path], None] + bind_nfo_write: Callable[[TaskSignals, Path], None] + + +class MetadataInjectRunner: + """Runner dédié au pipeline d'injection DoVi/HDR10+.""" + + def __init__(self, callbacks: MetadataInjectRunnerCallbacks) -> None: + self._callbacks = callbacks + + def run( + self, + config: EncodeConfig, + *, + prep_signals: TaskSignals | None = None, + plan: EncodePlan | None = None, + ) -> TaskSignals: + cb = self._callbacks + signals = prep_signals or TaskSignals() + executor = None if prep_signals is not None else ThreadPoolExecutor(max_workers=1) + + def _task() -> None: + work = config.work_dir or Path(tempfile.gettempdir()) + work.mkdir(parents=True, exist_ok=True) + tmp_dir = tempfile.mkdtemp( + prefix="mediarecode_encode_", + dir=str(work), + ) + tmp = Path(tmp_dir) + ext_files: list[Path] = [] + live_sync_session: LiveSyncSession | None = None + sync_cleanup_paths: list[Path] = [] + + def _run(cmd: list[str]) -> str: + return cb.run_cmd( + cmd, + signals, + tmp, + lambda line: signals.progress.emit(line), + ) + + def _check() -> None: + if signals._cancel_event.is_set(): + raise TaskCancelledError() + + def _alloc(name: str, ref_size: int) -> Path: + _ = ref_size + return tmp / name + + def _free(path: Path) -> None: + try: + path.unlink(missing_ok=True) + except OSError: + pass + try: + ext_files.remove(path) + except ValueError: + pass + + try: + src_size_est = config.source.stat().st_size + video = config.video + if video is None: + raise ValueError("EncodeConfig.video is required for metadata injection") + + cb.log_step(5, "Extraction des métadonnées dynamiques (DoVi/HDR10+)") + rpu_bin = tmp / "rpu.bin" + if video.copy_dv: + signals.progress.emit("Extraction RPU Dolby Vision…") + _run([ + cb.bins["dovi_tool"], "extract-rpu", + "-i", str(cb.video_source_path(config)), "-o", str(rpu_bin), + ]) + _check() + + hdr10p_json = tmp / "hdr10p.json" + if video.copy_hdr10plus: + signals.progress.emit("Extraction métadonnées HDR10+…") + _run([ + cb.bins["hdr10plus_tool"], "extract", + str(cb.video_source_path(config)), "-o", str(hdr10p_json), + ]) + _check() + + cb.log_step(6, "Encodage vidéo seule (HEVC brut)") + enc_hevc = _alloc("enc.hevc", src_size_est) + signals.progress.emit("Encodage vidéo…") + if video.quality_mode == QualityMode.SIZE: + v_cmds = cb.build_video_only_two_pass(config, enc_hevc) + cb.log_info("Passe 1/2 (analyse)…") + _run(v_cmds[0]) + _check() + cb.log_info("Passe 2/2 (encodage)…") + _run(v_cmds[1]) + else: + _run(cb.build_video_only_cmd(config, enc_hevc)) + _check() + current_hevc = enc_hevc + + cb.log_step(7, "Injection HDR10+ puis DoVi (si demandé)") + if video.copy_hdr10plus and hdr10p_json.exists(): + cur_size = current_hevc.stat().st_size + out_hdr10p = _alloc("enc_hdr10p.hevc", cur_size) + signals.progress.emit("Injection métadonnées HDR10+…") + _run([ + cb.bins["hdr10plus_tool"], "inject", + "-i", str(current_hevc), + "-j", str(hdr10p_json), + "-o", str(out_hdr10p), + ]) + _free(current_hevc) + current_hevc = out_hdr10p + _check() + + if video.copy_dv and rpu_bin.exists(): + cur_size = current_hevc.stat().st_size + out_dv = _alloc("enc_dv.hevc", cur_size) + signals.progress.emit("Injection RPU Dolby Vision…") + _run([ + cb.bins["dovi_tool"], + "-m", video.dovi_profile, + "inject-rpu", + "-i", str(current_hevc), + "-r", str(rpu_bin), + "-o", str(out_dv), + ]) + _free(current_hevc) + current_hevc = out_dv + _check() + + cb.log_step(8, "Encapsulation timeline vidéo injectée") + wrapped_video = _alloc("enc_wrapped.mkv", current_hevc.stat().st_size) + signals.progress.emit("Encapsulation vidéo injectée…") + _run( + cb.wrap_injected_hevc_for_reconstruction( + source=config.source, + hevc_input=current_hevc, + mkv_output=wrapped_video, + ) + ) + _free(current_hevc) + current_video_input = wrapped_video + _check() + + cb.log_step(9, "Reconstruction finale du conteneur MKV") + signals.progress.emit("Reconstitution finale…") + encode_plan = plan or cb.build_encode_plan(config) + all_sources = list(encode_plan.all_sources) + extra_sources = all_sources[1:] + recon_source_idx = cb.source_input_index_map(all_sources) + sync_remap: dict[tuple[Path, int, str], tuple[int, int]] = {} + sync_inputs: list[Path | str] = [] + strict_interleave = False + + recon_cmd = [ + cb.ffmpeg_bin, + "-hide_banner", + "-y", + *cb.ffmpeg_progress_args(), + "-i", + str(current_video_input), + "-i", + str(config.source), + ] + for sp in extra_sources: + recon_cmd.extend(["-i", str(sp)]) + + sync_remap, sync_inputs, live_sync_session, strict_interleave = cb.prepare_multisource_sync( + config=config, + all_sources=all_sources, + sync_base_input_idx=2 + len(extra_sources), + work_dir=tmp, + signals=signals, + allow_live=True, + plan=encode_plan, + ) + sync_cleanup_paths = cb.sync_cleanup_paths(sync_inputs) + cb.append_sync_inputs(recon_cmd, sync_inputs) + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._register_proc(proc) + + next_input_index, chapter_input_index, tag_input_index = cb.prepare_container_metadata_inputs( + recon_cmd, + config, + source_idx=recon_source_idx, + next_input_index=2 + len(extra_sources) + len(sync_inputs), + plan=encode_plan, + chapter_materialize_dir=tmp, + chapter_probe_source=config.source, + ) + + recon_cmd.extend(cb.ffmpeg_thread_args()) + resolved_subtitle_tracks = list(encode_plan.resolved_subtitle_tracks) + video_key = cb.video_map_key(config) + video_default_map = (0, 0) + track_assembly = resolve_track_assembly( + config, + encode_plan, + source_idx=recon_source_idx, + track_input_paths=build_track_input_paths( + leading_inputs=[current_video_input], + all_sources=all_sources, + sync_inputs=sync_inputs, + ), + sync_remap=sync_remap, + video_default_map=video_default_map, + video_fallback_input=current_video_input, + ) + + next_input_index, offset_remap = cb.append_offset_aux_inputs( + recon_cmd, + cb.build_offset_specs( + config, + track_mappings=list(track_assembly.track_mappings), + offset_lookup=dict(encode_plan.offset_lookup), + ), + start_input_index=next_input_index, + ) + _ = next_input_index + + recon_cmd.extend([ + "-map", + cb.video_map_arg( + track_assembly.video_map, + offset_remap=offset_remap, + map_key=video_key, + ), + "-c:v", + "copy", + ]) + cb.append_stream_maps_and_attachments( + recon_cmd, + config, + source_idx=recon_source_idx, + subtitle_copy_input_indices=list(range(1, 2 + len(extra_sources))), + sync_remap=sync_remap, + offset_remap=offset_remap, + subtitle_tracks_override=resolved_subtitle_tracks, + force_copy_subtitles_wildcard=(config.copy_subtitles and not encode_plan.subtitles_resolved), + ) + if strict_interleave: + cb.append_strict_interleave_mux_flags(recon_cmd) + + cb.append_container_metadata_args( + recon_cmd, + config, + default_metadata_input_index=0, + default_chapter_input_index=1, + chapter_input_index=chapter_input_index, + tag_input_index=tag_input_index, + plan=encode_plan, + ) + recon_cmd.append(str(config.output)) + _run(recon_cmd) + + signals.finished.emit(f"Encodage terminé → {config.output.name}") + + except TaskCancelledError: + signals.cancelled.emit() + except Exception as exc: + signals.failed.emit(str(exc), exc) + finally: + if executor is not None: + executor.shutdown(wait=False) + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._unregister_proc(proc) + live_sync_session.close() + for path in sync_cleanup_paths: + try: + remove_path(path) + except OSError: + pass + for p in list(ext_files): + try: + p.unlink(missing_ok=True) + except OSError: + pass + shutil.rmtree(tmp_dir, ignore_errors=True) + + if prep_signals is None: + cb.bind_matroska_segment_muxing_patch(signals, config.output) + cb.bind_nfo_write(signals, config.output) + assert executor is not None + executor.submit(_task) + else: + _task() + return signals diff --git a/core/workflows/encode/runtime/multi_video.py b/core/workflows/encode/runtime/multi_video.py new file mode 100644 index 0000000..77b6d9b --- /dev/null +++ b/core/workflows/encode/runtime/multi_video.py @@ -0,0 +1,510 @@ +from __future__ import annotations + +import tempfile +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, cast + +from core.runner import TaskCancelledError, TaskSignals +from core.workdir import remove_path +from core.workflows.encode.models import EncodeConfig, EncodeError, QualityMode, VideoEncodeSettings +from core.workflows.encode.planning.plan_models import EncodePlan +from core.workflows.encode.planning.track_assembly import build_track_input_paths, resolve_track_assembly +from core.workflows.encode.runtime_helpers import ( + VideoTrackPreparationOrchestrator, + VideoTrackPrepSpec, + VideoTrackPrepTask, +) +from core.workflows.remux_timeline_sync import LiveSyncSession + + +@dataclass(frozen=True) +class PreparedVideoInput: + input_args: list[str] + path: Path | str + map_arg: str + + +@dataclass(frozen=True) +class MultiVideoPipelineRunnerCallbacks: + ffmpeg_bin: str + bins: dict[str, str] + max_parallel_video_encodes: int + check_cancelled: Callable[[TaskSignals | None], None] + build_encode_plan: Callable[[EncodeConfig], EncodePlan] + video_tracks: Callable[[EncodeConfig], list[VideoEncodeSettings]] + video_source_from_settings: Callable[[EncodeConfig, VideoEncodeSettings], Path] + video_stream_from_settings: Callable[[VideoEncodeSettings], int] + track_offset_ms: Callable[..., int] + offset_input_args: Callable[[int], list[str]] + parallel_video_worker_thread_count: Callable[..., int | None] + video_encode_resource_key: Callable[[VideoEncodeSettings], str] + parallel_video_min_available_ram_bytes: Callable[[], int] + video_prep_estimated_ram_bytes: Callable[[VideoTrackPrepSpec], int] + format_bytes: Callable[[int], str] + available_ram_bytes: Callable[[], int] + source_input_index_map: Callable[[list[Path], int], dict[Path, int]] + prepare_multisource_sync: Callable[..., tuple[dict[tuple[Path, int, str], tuple[int, int]], list[Path | str], LiveSyncSession | None, bool]] + sync_cleanup_paths: Callable[[list[Path | str]], list[Path]] + append_sync_inputs: Callable[[list[str], list[Path | str]], None] + prepare_container_metadata_inputs: Callable[..., tuple[int, int | None, int | None]] + ffmpeg_thread_args: Callable[[], list[str]] + append_offset_aux_inputs: Callable[..., tuple[int, dict[tuple[Path, int, str], tuple[int, int]]]] + build_offset_specs: Callable[..., object] + append_stream_maps_and_attachments: Callable[..., None] + append_strict_interleave_mux_flags: Callable[[list[str]], None] + append_container_metadata_args: Callable[..., None] + ffmpeg_progress_args: Callable[[], list[str]] + run_cmd: Callable[[list[str], Path | None, str, Callable[[str], None], TaskSignals], str] + log_step: Callable[[int, str], None] + log_info: Callable[[str], None] + ui_encode_progress_message: Callable[..., str] + build_video_only_two_pass_for_track: Callable[..., list[list[str]]] + cleanup_two_pass_logs_for_prefix: Callable[[Path], None] + build_video_only_cmd_for_track: Callable[..., list[str]] + wrap_injected_hevc_for_reconstruction: Callable[..., list[str]] + build_multi_video_track_encode_commands: Callable[..., list[list[str]]] + two_pass_log_prefix: Callable[[Path, str], Path] + + +class MultiVideoPipelineRunner: + """Runner dédié au pipeline multi-pistes vidéo.""" + + def __init__(self, callbacks: MultiVideoPipelineRunnerCallbacks) -> None: + self._callbacks = callbacks + + def prepare_multi_video_track( + self, + *, + config: EncodeConfig, + spec: VideoTrackPrepSpec, + work_dir: Path, + total_tracks: int, + thread_count: int | None, + signals: TaskSignals, + run_cmd: Callable[[list[str], str], str], + ) -> tuple[PreparedVideoInput, list[Path]]: + cb = self._callbacks + order = spec.order + video = spec.video + source = spec.source + offset_ms = spec.offset_ms + index = order + 1 + local_cleanup: list[Path] = [] + + cb.check_cancelled(signals) + cb.log_info(f"Préparation vidéo {index}/{total_tracks}…") + + if video.copy_dv or video.copy_hdr10plus: + rpu_bin = work_dir / f"video_{index}.rpu.bin" + hdr10p_json = work_dir / f"video_{index}.hdr10plus.json" + current_hevc = work_dir / f"video_{index}.enc.hevc" + if video.copy_dv: + run_cmd([ + cb.bins["dovi_tool"], "extract-rpu", + "-i", str(source), "-o", str(rpu_bin), + ], f"dovi-extract-{index}") + local_cleanup.append(rpu_bin) + if video.copy_hdr10plus: + run_cmd([ + cb.bins["hdr10plus_tool"], "extract", + str(source), "-o", str(hdr10p_json), + ], f"hdr10plus-extract-{index}") + local_cleanup.append(hdr10p_json) + + if video.quality_mode == QualityMode.SIZE: + passlog_prefix = cb.two_pass_log_prefix(work_dir, f"video_{index}") + try: + for pass_index, cmd in enumerate( + cb.build_video_only_two_pass_for_track( + config, + video, + source, + current_hevc, + offset_ms=offset_ms, + passlog_prefix=passlog_prefix, + thread_count=thread_count, + ), + start=1, + ): + run_cmd(cmd, f"ffmpeg-video-{index}-pass{pass_index}") + finally: + cb.cleanup_two_pass_logs_for_prefix(passlog_prefix) + else: + run_cmd( + cb.build_video_only_cmd_for_track( + config, + video, + source, + current_hevc, + offset_ms=offset_ms, + thread_count=thread_count, + ), + f"ffmpeg-video-{index}", + ) + local_cleanup.append(current_hevc) + + if video.copy_hdr10plus and hdr10p_json.exists(): + hdr10_out = work_dir / f"video_{index}.hdr10plus.hevc" + run_cmd([ + cb.bins["hdr10plus_tool"], "inject", + "-i", str(current_hevc), + "-j", str(hdr10p_json), + "-o", str(hdr10_out), + ], f"hdr10plus-inject-{index}") + local_cleanup.append(hdr10_out) + current_hevc = hdr10_out + if video.copy_dv and rpu_bin.exists(): + dovi_out = work_dir / f"video_{index}.dovi.hevc" + run_cmd([ + cb.bins["dovi_tool"], + "-m", video.dovi_profile, + "inject-rpu", + "-i", str(current_hevc), + "-r", str(rpu_bin), + "-o", str(dovi_out), + ], f"dovi-inject-{index}") + local_cleanup.append(dovi_out) + current_hevc = dovi_out + + wrapped = work_dir / f"video_{index}.wrapped.mkv" + run_cmd( + cb.wrap_injected_hevc_for_reconstruction( + source=source, + hevc_input=current_hevc, + mkv_output=wrapped, + ), + f"ffmpeg-wrap-video-{index}", + ) + local_cleanup.append(wrapped) + return PreparedVideoInput( + input_args=[], + path=wrapped, + map_arg=f"{order}:v:0", + ), local_cleanup + + output_path = work_dir / f"video_{index}.mkv" + passlog_prefix = ( + cb.two_pass_log_prefix(work_dir, f"video_{index}") + if video.quality_mode == QualityMode.SIZE + else None + ) + commands = cb.build_multi_video_track_encode_commands( + config, + video, + source, + output_path, + offset_ms=offset_ms, + passlog_prefix=passlog_prefix, + thread_count=thread_count, + ) + try: + for pass_index, cmd in enumerate(commands, start=1): + label = ( + f"ffmpeg-video-{index}" + if len(commands) == 1 + else f"ffmpeg-video-{index}-pass{pass_index}" + ) + run_cmd(cmd, label) + finally: + if passlog_prefix is not None: + cb.cleanup_two_pass_logs_for_prefix(passlog_prefix) + local_cleanup.append(output_path) + return PreparedVideoInput( + input_args=[], + path=output_path, + map_arg=f"{order}:v:0", + ), local_cleanup + + def run( + self, + config: EncodeConfig, + cleanup_paths: list[Path], + *, + prep_signals: TaskSignals | None = None, + plan: EncodePlan | None = None, + ) -> TaskSignals: + cb = self._callbacks + signals = prep_signals or TaskSignals() + executor = None if prep_signals is not None else ThreadPoolExecutor(max_workers=1) + + def _run_pipeline() -> None: + work_dir = config.work_dir or config.source.parent + encode_plan = plan or cb.build_encode_plan(config) + chapter_dir: Path | None = None + prepared_inputs: list[PreparedVideoInput | None] = [] + live_sync_session: LiveSyncSession | None = None + sync_cleanup_paths: list[Path] = [] + + def _run(cmd: list[str], *, cwd: Path | None = work_dir, label: str = "ffmpeg") -> str: + is_multi_video_ffmpeg = label.startswith("ffmpeg-video-") + + def _progress(line: str) -> None: + if is_multi_video_ffmpeg and not line.startswith("$ "): + signals.progress.emit( + cb.ui_encode_progress_message(label=label, event="line", line=line) + ) + return + signals.progress.emit(line) + + output = cb.run_cmd( + cmd, + cwd, + label, + _progress, + signals, + ) + if is_multi_video_ffmpeg: + signals.progress.emit( + cb.ui_encode_progress_message(label=label, event="done") + ) + return output + + try: + if config.chapter_overrides: + chapter_dir = Path( + tempfile.mkdtemp( + prefix="enc_multi_video_chapters_", + dir=str(config.work_dir) if config.work_dir else None, + ) + ) + cleanup_paths.append(chapter_dir) + + video_tracks = cb.video_tracks(config) + if not video_tracks: + raise EncodeError("Aucune piste vidéo configurée pour le pipeline multi-pistes.") + prepared_inputs = [None] * len(video_tracks) + + track_specs: list[VideoTrackPrepSpec] = [] + for order, video in enumerate(video_tracks): + source = cb.video_source_from_settings(config, video) + stream_index = cb.video_stream_from_settings(video) + offset_ms = cb.track_offset_ms( + dict(encode_plan.offset_lookup), + track_type="video", + source_path=source, + stream_index=stream_index, + allow_single_video_source_fallback=False, + ) + track_specs.append( + VideoTrackPrepSpec( + order=order, + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + ) + ) + + transcode_specs: list[VideoTrackPrepSpec] = [] + for spec in track_specs: + if spec.video.codec == "copy": + order = spec.order + prepared_inputs[order] = PreparedVideoInput( + input_args=cb.offset_input_args(spec.offset_ms), + path=spec.source, + map_arg=f"{order}:{spec.stream_index}", + ) + continue + transcode_specs.append(spec) + + max_parallel = max(1, int(cb.max_parallel_video_encodes)) + prep_thread_count = cb.parallel_video_worker_thread_count( + resource_keys=[ + cb.video_encode_resource_key(spec.video) + for spec in transcode_specs + ], + max_parallel=max_parallel, + ) + if transcode_specs and max_parallel > 1: + cb.log_info( + f"Préparation vidéo parallèle activée ({min(max_parallel, len(transcode_specs))} piste(s) max)." + ) + if prep_thread_count is not None: + cb.log_info( + "Répartition threads FFmpeg sur la préparation vidéo parallèle: " + f"{prep_thread_count} thread(s) par worker." + ) + + if transcode_specs: + min_available_ram = cb.parallel_video_min_available_ram_bytes() + if max_parallel > 1 and min_available_ram > 0: + cb.log_info( + "Garde-fou RAM parallèle actif: réserve minimale " + f"{cb.format_bytes(min_available_ram)}." + ) + + ram_wait_notified: set[int] = set() + + def _on_ram_wait(order: int, required: int, available: int) -> None: + if order in ram_wait_notified: + return + ram_wait_notified.add(order) + cb.log_info( + "Préparation vidéo différée par le garde-fou RAM " + f"(piste #{order + 1}, libre={cb.format_bytes(available)}, " + f"requis={cb.format_bytes(required)})." + ) + + orchestrator = VideoTrackPreparationOrchestrator( + max_parallel=max_parallel, + cancel_cb=lambda: cb.check_cancelled(signals), + on_worker_failure=signals.cancel, + min_available_ram_bytes=min_available_ram, + available_ram_cb=cb.available_ram_bytes, + on_ram_wait=_on_ram_wait, + ) + + tasks = [ + VideoTrackPrepTask( + order=spec.order, + resource_key=cb.video_encode_resource_key(spec.video), + estimated_ram_bytes=cb.video_prep_estimated_ram_bytes(spec), + run=cast( + Callable[[], tuple[dict[str, object], list[Path]]], + lambda spec=spec: self.prepare_multi_video_track( + config=config, + spec=spec, + work_dir=work_dir, + total_tracks=len(video_tracks), + thread_count=prep_thread_count, + signals=signals, + run_cmd=lambda cmd, label: _run(cmd, label=label), + ), + ), + ) + for spec in transcode_specs + ] + + for order, prepared, local_cleanup in orchestrator.execute(tasks): + prepared_inputs[order] = cast(PreparedVideoInput, prepared) + cleanup_paths.extend(local_cleanup) + + if any(spec is None for spec in prepared_inputs): + raise EncodeError("Préparation vidéo incomplète: au moins une piste n'a pas été préparée.") + prepared_inputs_ready: list[PreparedVideoInput] = [ + cast(PreparedVideoInput, spec) + for spec in prepared_inputs + if spec is not None + ] + + cb.log_step(5, "Reconstruction finale multi-pistes vidéo") + all_sources = list(encode_plan.all_sources) + source_idx = cb.source_input_index_map(all_sources, len(prepared_inputs_ready)) + sync_remap: dict[tuple[Path, int, str], tuple[int, int]] = {} + sync_inputs: list[Path | str] = [] + strict_interleave = False + + final_cmd: list[str] = [cb.ffmpeg_bin, "-hide_banner", "-y"] + final_cmd.extend(cb.ffmpeg_progress_args()) + for spec in prepared_inputs_ready: + final_cmd.extend([*spec.input_args, "-i", str(spec.path)]) + for src in all_sources: + final_cmd.extend(["-i", str(src)]) + + sync_remap, sync_inputs, live_sync_session, strict_interleave = cb.prepare_multisource_sync( + config=config, + all_sources=all_sources, + sync_base_input_idx=len(prepared_inputs_ready) + len(all_sources), + work_dir=work_dir, + signals=signals, + allow_live=True, + plan=encode_plan, + ) + sync_cleanup_paths = cb.sync_cleanup_paths(sync_inputs) + cb.append_sync_inputs(final_cmd, sync_inputs) + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._register_proc(proc) + + next_input_index, chapter_input_index, tag_input_index = cb.prepare_container_metadata_inputs( + final_cmd, + config, + source_idx=source_idx, + next_input_index=len(prepared_inputs_ready) + len(all_sources) + len(sync_inputs), + plan=encode_plan, + chapter_materialize_dir=chapter_dir, + chapter_probe_source=config.source, + ) + final_cmd.extend(cb.ffmpeg_thread_args()) + resolved_subtitle_tracks = list(encode_plan.resolved_subtitle_tracks) + track_assembly = resolve_track_assembly( + config, + encode_plan, + source_idx=source_idx, + track_input_paths=build_track_input_paths( + leading_inputs=[spec.path for spec in prepared_inputs_ready], + all_sources=all_sources, + sync_inputs=sync_inputs, + ), + sync_remap=sync_remap, + include_video=False, + ) + + next_input_index, offset_remap = cb.append_offset_aux_inputs( + final_cmd, + cb.build_offset_specs( + config, + track_mappings=list(track_assembly.track_mappings), + offset_lookup=dict(encode_plan.offset_lookup), + ), + start_input_index=next_input_index, + ) + _ = next_input_index + + for out_idx, spec in enumerate(prepared_inputs_ready): + final_cmd.extend(["-map", str(spec.map_arg)]) + final_cmd.extend([f"-c:v:{out_idx}", "copy"]) + + cb.append_stream_maps_and_attachments( + final_cmd, + config, + source_idx=source_idx, + subtitle_copy_input_indices=list(source_idx.values()), + sync_remap=sync_remap, + offset_remap=offset_remap, + subtitle_tracks_override=resolved_subtitle_tracks, + force_copy_subtitles_wildcard=(config.copy_subtitles and not encode_plan.subtitles_resolved), + ) + if strict_interleave: + cb.append_strict_interleave_mux_flags(final_cmd) + + default_source_index = source_idx.get(config.source, len(prepared_inputs_ready)) + cb.append_container_metadata_args( + final_cmd, + config, + default_metadata_input_index=default_source_index, + default_chapter_input_index=default_source_index, + chapter_input_index=chapter_input_index, + tag_input_index=tag_input_index, + include_copy_video_stream_passthrough=False, + plan=encode_plan, + ) + final_cmd.append(str(config.output)) + output = _run(final_cmd, label="ffmpeg-multi-video") + signals.finished.emit(output) + except TaskCancelledError: + signals.cancelled.emit() + except Exception as exc: + signals.failed.emit(str(exc), exc) + finally: + if live_sync_session is not None: + for proc in live_sync_session.processes: + signals._unregister_proc(proc) + live_sync_session.close() + for path in sync_cleanup_paths: + try: + remove_path(path) + except OSError: + pass + if executor is not None: + executor.shutdown(wait=False) + + if prep_signals is not None: + _run_pipeline() + return signals + + assert executor is not None + executor.submit(_run_pipeline) + return signals diff --git a/core/workflows/encode/runtime/ram_buffer.py b/core/workflows/encode/runtime/ram_buffer.py new file mode 100644 index 0000000..06f9164 --- /dev/null +++ b/core/workflows/encode/runtime/ram_buffer.py @@ -0,0 +1,160 @@ +""" +core/workflows/encode/runtime/ram_buffer.py — Helpers RAM/SHM cross-platform. + +Fonctions publiques : + total_ram_bytes() — RAM physique totale (Linux/macOS/Windows) + available_ram_bytes() — RAM disponible + macos_available_ram() — détail macOS via vm_stat + ram_buffer_dir() — répertoire RAM-backed (/dev/shm) ou None + shm_path() — résout un chemin RAM ou disque selon seuils + +Conventions : + - Toutes les fonctions sont pures et thread-safe. + - Aucun état partagé : `shm_path` reçoit explicitement les paramètres de configuration. + - `Exception` capturée au niveau de l'appel OS pour ne jamais lever depuis ces helpers. +""" + +from __future__ import annotations + +import ctypes +import os +import re +import subprocess +import sys +from pathlib import Path + +from core.subprocess_utils import subprocess_text_kwargs + + +def total_ram_bytes() -> int: + """ + RAM physique totale en octets. + Linux : /proc/meminfo · macOS : sysctl hw.memsize · Windows : GlobalMemoryStatusEx. + Retourne 0 si la valeur ne peut pas être lue. + """ + try: + if sys.platform == "linux": + text = Path("/proc/meminfo").read_text(encoding="ascii") + m = re.search(r"MemTotal:\s+(\d+)\s+kB", text) + return int(m.group(1)) * 1024 if m else 0 + if sys.platform == "darwin": + r = subprocess.run( + ["sysctl", "-n", "hw.memsize"], + capture_output=True, check=False, timeout=5, **subprocess_text_kwargs(), + ) + v = r.stdout.strip() + return int(v) if r.returncode == 0 and v.isdigit() else 0 + if sys.platform == "win32": + return _win_mem_status().ullTotalPhys + except Exception: + pass + return 0 + + +def available_ram_bytes() -> int: + """RAM disponible en octets, équivalent MemAvailable sur Linux. 0 si non déterminable.""" + try: + if sys.platform == "linux": + text = Path("/proc/meminfo").read_text(encoding="ascii") + m = re.search(r"MemAvailable:\s+(\d+)\s+kB", text) + return int(m.group(1)) * 1024 if m else 0 + if sys.platform == "darwin": + return macos_available_ram() + if sys.platform == "win32": + return _win_mem_status().ullAvailPhys + except Exception: + pass + return 0 + + +def macos_available_ram() -> int: + """RAM disponible sur macOS via vm_stat (free + inactive + speculative + purgeable).""" + r = subprocess.run( + ["vm_stat"], capture_output=True, check=False, timeout=5, **subprocess_text_kwargs() + ) + if r.returncode != 0: + return 0 + page_m = re.search(r"page size of (\d+) bytes", r.stdout) + page = int(page_m.group(1)) if page_m else 4096 + pages = 0 + for field in ("Pages free", "Pages inactive", "Pages speculative", "Pages purgeable"): + m = re.search(rf"{re.escape(field)}:\s*(\d+)", r.stdout) + if m: + pages += int(m.group(1)) + return pages * page + + +def _win_mem_status(): + """Structure MEMORYSTATUSEX remplie (Windows uniquement).""" + class _MEMSTATEX(ctypes.Structure): + _fields_ = [ + ("dwLength", ctypes.c_ulong), + ("dwMemoryLoad", ctypes.c_ulong), + ("ullTotalPhys", ctypes.c_ulonglong), + ("ullAvailPhys", ctypes.c_ulonglong), + ("ullTotalPageFile", ctypes.c_ulonglong), + ("ullAvailPageFile", ctypes.c_ulonglong), + ("ullTotalVirtual", ctypes.c_ulonglong), + ("ullAvailVirtual", ctypes.c_ulonglong), + ("ullAvailExtendedVirtual", ctypes.c_ulonglong), + ] + stat = _MEMSTATEX() + stat.dwLength = ctypes.sizeof(stat) + ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) # type: ignore[attr-defined] + return stat + + +def ram_buffer_dir() -> Path | None: + """ + Répertoire RAM-backed disponible, ou None. + + · Linux : /dev/shm (tmpfs kernel) + · macOS : /dev/shm (writable sur macOS ≥ 10.15) + · Windows: aucun équivalent standard → None + """ + if sys.platform in ("linux", "darwin"): + shm = Path("/dev/shm") + if shm.is_dir() and os.access(shm, os.W_OK): + return shm + return None + + +def shm_path( + tmp: Path, + name: str, + file_size: int, + *, + enabled: bool, + threshold_pct: int, +) -> Path: + """ + Résout un chemin RAM si les conditions sont réunies, sinon un chemin dans `tmp`. + + Conditions cumulatives : + 1. `enabled` True + 2. Un répertoire RAM existe (ram_buffer_dir()) + 3. RAM disponible après chargement ≥ threshold_pct % de la RAM totale + formule : available - file_size ≥ total × threshold_pct / 100 + """ + if not enabled: + return tmp / name + ram_dir = ram_buffer_dir() + if ram_dir is None: + return tmp / name + total = total_ram_bytes() + available = available_ram_bytes() + if total <= 0 or available <= 0: + return tmp / name + min_free_after = int(total * threshold_pct / 100) + if available - file_size >= min_free_after: + return ram_dir / name + return tmp / name + + +__all__ = [ + "total_ram_bytes", + "available_ram_bytes", + "macos_available_ram", + "ram_buffer_dir", + "shm_path", +] diff --git a/core/workflows/encode/runtime/storage_guard.py b/core/workflows/encode/runtime/storage_guard.py new file mode 100644 index 0000000..d383344 --- /dev/null +++ b/core/workflows/encode/runtime/storage_guard.py @@ -0,0 +1,183 @@ +from __future__ import annotations + +import os +import shutil +import tempfile +from pathlib import Path +from typing import Callable + +from core.workflows.encode.models import EncodeConfig, EncodeError, QualityMode, normalize_audio_bitrate_kbps + + +def format_bytes(value: int) -> str: + size = float(max(0, value)) + units = ("B", "KiB", "MiB", "GiB", "TiB") + idx = 0 + while size >= 1024.0 and idx < len(units) - 1: + size /= 1024.0 + idx += 1 + return f"{size:.1f} {units[idx]}" + + +def estimate_duration_seconds( + config: EncodeConfig, + *, + probe_duration_seconds: Callable[[Path], float | None], +) -> float: + duration_s = config.duration_s + if duration_s is not None and duration_s > 0: + return float(duration_s) + probed = probe_duration_seconds(config.source) + if probed is not None and probed > 0: + return float(probed) + return 3600.0 + + +def estimate_inject_video_bytes( + config: EncodeConfig, + *, + duration_s: float, + source_size: int, + size_to_bitrate_kbps: Callable[[EncodeConfig], int], +) -> int: + video = config.video + if video is None: + raise EncodeError("Configuration vidéo absente pour l'estimation d'injection") + if video.quality_mode == QualityMode.SIZE: + video_kbps = size_to_bitrate_kbps(config) + return int((video_kbps * 1000 / 8) * duration_s) + if video.quality_mode == QualityMode.BITRATE: + video_kbps = max(1, int(video.bitrate_kbps or 1)) + return int((video_kbps * 1000 / 8) * duration_s) + return max(source_size, int(source_size * 3 / 4)) + + +def estimate_inject_storage_requirements( + config: EncodeConfig, + *, + probe_duration_seconds: Callable[[Path], float | None], + size_to_bitrate_kbps: Callable[[EncodeConfig], int], +) -> tuple[int, int]: + """ + Retourne (work_required_bytes, output_required_bytes) pour le chemin injection. + """ + source_size = max(0, config.source.stat().st_size if config.source.exists() else 0) + duration_s = estimate_duration_seconds( + config, + probe_duration_seconds=probe_duration_seconds, + ) + video_bytes = max( + 128 * 1024 * 1024, + estimate_inject_video_bytes( + config, + duration_s=duration_s, + source_size=source_size, + size_to_bitrate_kbps=size_to_bitrate_kbps, + ), + ) + + video = config.video + if video is None: + raise EncodeError("Configuration vidéo absente pour l'estimation de stockage") + + sidecars = 32 * 1024 * 1024 + if video.copy_dv: + sidecars += 64 * 1024 * 1024 + if video.copy_hdr10plus: + sidecars += 64 * 1024 * 1024 + if config.chapter_overrides: + sidecars += 16 * 1024 * 1024 + + work_required = (2 * video_bytes) + sidecars + + encoded_audio_bytes = 0 + for audio in config.audio_tracks: + if audio.codec == "copy": + continue + if audio.codec == "flac": + kbps = max(1000, int(audio.bitrate_kbps or 0)) + else: + kbps = normalize_audio_bitrate_kbps( + audio.codec, + audio.bitrate_kbps, + audio.input_channels, + None, + audio.input_channel_layout, + ) + encoded_audio_bytes += int((kbps * 1000 / 8) * duration_s) + + extra_attachments_bytes = sum( + max(0, Path(p).stat().st_size) + for p in config.extra_attachments + if Path(p).exists() + ) + output_required = max( + source_size, + video_bytes + encoded_audio_bytes + extra_attachments_bytes, + ) + (64 * 1024 * 1024) + return work_required, output_required + + +def ensure_inject_storage_available( + config: EncodeConfig, + *, + estimate_requirements: Callable[[EncodeConfig], tuple[int, int]], + log_info: Callable[[str], None], + ram_buffer_enabled: bool, + ram_buffer_dir: Callable[[], Path | None], + disk_usage: Callable[[str | bytes | os.PathLike[str] | os.PathLike[bytes]], shutil._ntuple_diskusage] = shutil.disk_usage, + stat: Callable[[str | bytes | os.PathLike[str] | os.PathLike[bytes]], os.stat_result] = os.stat, + temp_dir: Callable[[], str] = tempfile.gettempdir, + format_bytes_fn: Callable[[int], str] = format_bytes, +) -> None: + """ + Vérifie l'espace libre avant le chemin d'injection DV/HDR10+. + """ + work_required, output_required = estimate_requirements(config) + + work_root = config.work_dir or Path(temp_dir()) + work_root.mkdir(parents=True, exist_ok=True) + output_root = config.output.parent + output_root.mkdir(parents=True, exist_ok=True) + + work_free = disk_usage(work_root).free + output_free = disk_usage(output_root).free + + log_info( + "Estimation espace injection: " + f"temp≈{format_bytes_fn(work_required)} sur {work_root} ; " + f"sortie≈{format_bytes_fn(output_required)} sur {output_root}." + ) + + if ram_buffer_enabled and ram_buffer_dir() is not None: + log_info("Buffer RAM actif: estimation disque conservative (fallback disque).") + + same_fs = False + try: + same_fs = stat(work_root).st_dev == stat(output_root).st_dev + except OSError: + same_fs = False + + if same_fs: + required = work_required + output_required + free = min(work_free, output_free) + if free < required: + raise EncodeError( + "Espace disque insuffisant pour l'injection DoVi/HDR10+ " + f"(requis≈{format_bytes_fn(required)}, libre≈{format_bytes_fn(free)} " + f"sur {output_root})." + ) + return + + if work_free < work_required: + raise EncodeError( + "Espace disque insuffisant pour les temporaires d'injection " + f"(requis≈{format_bytes_fn(work_required)}, " + f"libre≈{format_bytes_fn(work_free)} sur {work_root})." + ) + if output_free < output_required: + raise EncodeError( + "Espace disque insuffisant pour le fichier de sortie " + f"(requis≈{format_bytes_fn(output_required)}, " + f"libre≈{format_bytes_fn(output_free)} sur {output_root})." + ) diff --git a/core/workflows/encode/runtime_helpers.py b/core/workflows/encode/runtime_helpers.py new file mode 100644 index 0000000..d7f7f57 --- /dev/null +++ b/core/workflows/encode/runtime_helpers.py @@ -0,0 +1,231 @@ +"""Internal helpers extracted from encode.workflow to keep the façade smaller.""" + +from __future__ import annotations + +import json +import threading +from concurrent.futures import ThreadPoolExecutor, as_completed +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.workflows.encode.catalog import ( + AMF_VIDEO_CODECS, + NVENC_VIDEO_CODECS, + QSV_VIDEO_CODECS, + VAAPI_VIDEO_CODECS, +) +from core.workflows.encode.models import QualityMode, VideoEncodeSettings +_UI_ENCODE_PROGRESS_PREFIX = "__MRE_PROGRESS__ " + + +def ui_encode_progress_message(*, label: str, event: str, line: str = "") -> str: + payload = { + "kind": "encode_ffmpeg", + "label": str(label), + "event": str(event), + "line": str(line), + } + return _UI_ENCODE_PROGRESS_PREFIX + json.dumps(payload, ensure_ascii=False, separators=(",", ":")) + + +@dataclass(frozen=True) +class EncodeSyncTrack: + track_type: str + + +@dataclass(frozen=True) +class EncodeSyncMappedTrack: + source_file_index: int + stream_index: int + track: EncodeSyncTrack + + +@dataclass(frozen=True) +class EncodeOffsetInputSpec: + map_key: tuple[Path, int, str] + input_path: Path | str + input_stream_index: int + offset_ms: int + + +@dataclass(frozen=True) +class VideoTrackPrepSpec: + order: int + video: VideoEncodeSettings + source: Path + stream_index: int + offset_ms: int + + +@dataclass(frozen=True) +class VideoTrackPrepTask: + order: int + resource_key: str + estimated_ram_bytes: int + run: Callable[[], tuple[dict[str, object], list[Path]]] + + +@dataclass(frozen=True) +class VideoPreparationResourcePolicy: + """Politique explicite d'allocation des ressources encodeur.""" + + vaapi_device: str | None = None + ffmpeg_threads: int = 1 + + def resource_key(self, video: VideoEncodeSettings) -> str: + codec = str(video.codec or "").strip().lower() + if codec in NVENC_VIDEO_CODECS: + return "gpu:nvenc" + if codec in VAAPI_VIDEO_CODECS: + return f"gpu:vaapi:{self.vaapi_device or 'auto'}" + if codec in QSV_VIDEO_CODECS: + return "gpu:qsv" + if codec in AMF_VIDEO_CODECS: + return "gpu:amf" + return "cpu" + + def estimated_ram_bytes( + self, + video: VideoEncodeSettings, + *, + source_size: int, + ) -> int: + mib = 1024 * 1024 + resource_key = self.resource_key(video) + threads = max(1, int(self.ffmpeg_threads or 1)) + + if resource_key == "cpu": + base = 768 * mib + per_thread = 96 * mib + else: + base = 384 * mib + per_thread = 32 * mib + + if video.quality_mode == QualityMode.SIZE: + base += 128 * mib + if video.copy_dv or video.copy_hdr10plus: + base += 256 * mib + + source_component = 0 + if source_size > 0: + source_component = min(max(source_size // 32, 128 * mib), 1024 * mib) + + return base + source_component + (threads * per_thread) + + +class VideoTrackPreparationOrchestrator: + """Orchestrateur de préparation des pistes vidéo avec contrôle de collisions.""" + + def __init__( + self, + *, + max_parallel: int, + cancel_cb: Callable[[], None], + on_worker_failure: Callable[[], None] | None = None, + min_available_ram_bytes: int = 0, + available_ram_cb: Callable[[], int] | None = None, + on_ram_wait: Callable[[int, int, int], None] | None = None, + ram_wait_timeout_s: float = 0.25, + ) -> None: + self._max_parallel = max(1, int(max_parallel)) + self._cancel_cb = cancel_cb + self._on_worker_failure = on_worker_failure + self._min_available_ram_bytes = max(0, int(min_available_ram_bytes)) + self._available_ram_cb = available_ram_cb + self._on_ram_wait = on_ram_wait + self._ram_wait_timeout_s = max(0.05, float(ram_wait_timeout_s)) + self._resource_semaphores: dict[str, threading.Semaphore] = {} + self._resource_guard = threading.Lock() + self._ram_guard = threading.Condition() + self._reserved_ram_bytes = 0 + + def _semaphore(self, resource_key: str) -> threading.Semaphore: + with self._resource_guard: + semaphore = self._resource_semaphores.get(resource_key) + if semaphore is None: + semaphore = threading.Semaphore(1) + self._resource_semaphores[resource_key] = semaphore + return semaphore + + def _claim_ram_budget(self, task: VideoTrackPrepTask) -> int: + if ( + self._available_ram_cb is None + or self._min_available_ram_bytes <= 0 + or task.estimated_ram_bytes <= 0 + ): + return 0 + + warned = False + with self._ram_guard: + while True: + self._cancel_cb() + available = max(0, int(self._available_ram_cb() or 0)) + if available <= 0: + return 0 + + required = ( + self._min_available_ram_bytes + + self._reserved_ram_bytes + + task.estimated_ram_bytes + ) + if available >= required: + self._reserved_ram_bytes += task.estimated_ram_bytes + return task.estimated_ram_bytes + + if self._reserved_ram_bytes == 0 and available > self._min_available_ram_bytes: + claim = max(1, available - self._min_available_ram_bytes) + self._reserved_ram_bytes += claim + return claim + + if (not warned) and self._on_ram_wait is not None: + self._on_ram_wait(task.order, required, available) + warned = True + + self._ram_guard.wait(timeout=self._ram_wait_timeout_s) + + def _release_ram_budget(self, reserved_bytes: int) -> None: + if reserved_bytes <= 0: + return + with self._ram_guard: + self._reserved_ram_bytes = max(0, self._reserved_ram_bytes - int(reserved_bytes)) + self._ram_guard.notify_all() + + def _run_task( + self, + task: VideoTrackPrepTask, + ) -> tuple[int, dict[str, object], list[Path]]: + self._cancel_cb() + semaphore = self._semaphore(task.resource_key) + with semaphore: + reserved_bytes = self._claim_ram_budget(task) + try: + self._cancel_cb() + prepared_input, cleanup = task.run() + return task.order, prepared_input, cleanup + finally: + self._release_ram_budget(reserved_bytes) + + def execute( + self, + tasks: list[VideoTrackPrepTask], + ) -> list[tuple[int, dict[str, object], list[Path]]]: + if not tasks: + return [] + + if self._max_parallel <= 1 or len(tasks) == 1: + return [self._run_task(task) for task in tasks] + + results: list[tuple[int, dict[str, object], list[Path]]] = [] + worker_count = min(self._max_parallel, len(tasks)) + with ThreadPoolExecutor(max_workers=worker_count) as executor: + futures = [executor.submit(self._run_task, task) for task in tasks] + for future in as_completed(futures): + self._cancel_cb() + try: + results.append(future.result()) + except Exception: + if self._on_worker_failure is not None: + self._on_worker_failure() + raise + return results diff --git a/core/workflows/encode/workflow.py b/core/workflows/encode/workflow.py index 7d7f436..a4d56cb 100644 --- a/core/workflows/encode/workflow.py +++ b/core/workflows/encode/workflow.py @@ -7,7 +7,6 @@ from __future__ import annotations -import ctypes import json import os import re @@ -15,13 +14,12 @@ import subprocess import sys import tempfile -from concurrent.futures import ThreadPoolExecutor -from dataclasses import dataclass, replace +from concurrent.futures import ThreadPoolExecutor, as_completed +from dataclasses import replace from pathlib import Path -from typing import cast +from typing import Callable, cast from PySide6.QtCore import QObject, Signal -from core.lang_tags import Rfc5646LanguageTags as LangTags from core.runner import TaskCancelledError, TaskSignals, ToolRunner from core.subprocess_utils import subprocess_text_kwargs from core.subtitle_codec import plan_subtitle_codec @@ -32,81 +30,146 @@ relocate_tmdb_covers_to_process_dir, remove_path, ) -from core.workflows.remux_models import RemuxConfig, SourceInput, TrackEntry -from core.workflows.remux import RemuxWorkflow, write_mediainfo_nfo +from core.workflows.remux_models import SourceInput +from core.workflows.remux import write_mediainfo_nfo +from core.workflows.common.attachments import ( + extension_for_mime, + mime_for_path, +) +from core.workflows.common.remux_postprocess import RemuxPostprocessService +from core.workflows.common.ffmpeg_runtime import ( + default_ffmpeg_thread_count as _default_ffmpeg_thread_count, + ffmpeg_progress_args as _common_ffmpeg_progress_args, + ffmpeg_thread_args as _common_ffmpeg_thread_args, + normalize_ffmpeg_thread_count as _normalize_ffmpeg_thread_count, + normalize_max_parallel_video_encodes as _normalize_max_parallel_video_encodes, +) +from core.workflows.common.metadata import ( + disposition_value as _common_disposition_value, + normalize_track_language as _common_normalize_track_language, + resolve_global_tags as _common_resolve_global_tags, +) +from core.workflows.common.timeline_sync import ( + append_strict_interleave_mux_flags as _common_append_strict_interleave_mux_flags, + append_sync_inputs as _common_append_sync_inputs, + sync_cleanup_paths as _common_sync_cleanup_paths, +) +from core.workflows.encode.catalog import ( + AMF_VIDEO_CODECS as _AMF_CODECS, + H264_VIDEO_CODECS, + NVENC_VIDEO_CODECS as _NVENC_CODECS, + QSV_VIDEO_CODECS as _QSV_CODECS, + VAAPI_VIDEO_CODECS as _VAAPI_CODECS, + is_h264_video_codec, +) +from core.workflows.encode.domain import ( + EncodeCodecDomainCallbacks as _EncodeCodecDomainCallbacks, + audio_codec_args as _audio_codec_args_domain, + build_encoder_vf as _build_encoder_vf_domain, + hardware_input_args as _hardware_input_args_domain, + hdr_meta_args as _hdr_meta_args_domain, + video_codec_args as _video_codec_args_domain, + video_codec_args_bitrate as _video_codec_args_bitrate_domain, +) from core.workflows.remux_timeline_sync import ( LiveSyncSession, FfmpegTimelineSync, TimelineSyncFallbackHelper, ) +from core.workflows.encode.runtime_helpers import ( + EncodeOffsetInputSpec as _EncodeOffsetInputSpec, + VideoPreparationResourcePolicy as _VideoPreparationResourcePolicy, + VideoTrackPreparationOrchestrator as _VideoTrackPreparationOrchestrator, + VideoTrackPrepSpec as _VideoTrackPrepSpec, + VideoTrackPrepTask as _VideoTrackPrepTask, + ui_encode_progress_message as _ui_encode_progress_message, +) +from core.workflows.encode.runtime import ram_buffer as _ram_buffer_module +from core.workflows.encode.runtime import ( + AttachmentPreparationService as _AttachmentPreparationService, + AttachmentPreparationServiceCallbacks as _AttachmentPreparationServiceCallbacks, + DirectOutputRunner as _DirectOutputRunner, + DirectOutputRunnerCallbacks as _DirectOutputRunnerCallbacks, + MetadataInjectRunner as _MetadataInjectRunner, + MetadataInjectRunnerCallbacks as _MetadataInjectRunnerCallbacks, + MultiVideoPipelineRunner as _MultiVideoPipelineRunner, + MultiVideoPipelineRunnerCallbacks as _MultiVideoPipelineRunnerCallbacks, + SignalBindingService as _SignalBindingService, + SignalBindingServiceCallbacks as _SignalBindingServiceCallbacks, + default_attachment_filename as _default_attachment_filename, + ensure_inject_storage_available as _ensure_inject_storage_available_runtime, + estimate_duration_seconds as _estimate_duration_seconds_runtime, + estimate_inject_storage_requirements as _estimate_inject_storage_requirements_runtime, + estimate_inject_video_bytes as _estimate_inject_video_bytes_runtime, + extract_attached_pic as _extract_attached_pic_runtime, + format_bytes as _format_bytes_runtime, + probe_attachment_stream as _probe_attachment_stream_runtime, + unique_attachment_path as _unique_attachment_path_runtime, +) +from core.workflows.encode.runtime.command_builders import ( + EncodeCommandBuilderCallbacks as _EncodeCommandBuilderCallbacks, + build_runtime_single_pass_with_sync as _build_runtime_single_pass_with_sync_runtime, + build_runtime_two_pass_with_sync as _build_runtime_two_pass_with_sync_runtime, + build_single_pass as _build_single_pass_runtime, + build_two_pass as _build_two_pass_runtime, +) +from core.workflows.encode.hw_devices import ( + select_linux_hwaccel_device, + select_windows_hwaccel_device, +) +from core.workflows.encode.planning.command_plan import ( + build_encode_command_selection as _build_encode_command_selection_plan, +) +from core.workflows.encode.planning.metadata_plan import ( + append_container_metadata_args as _append_container_metadata_args_plan, + materialize_container_metadata_inputs as _materialize_container_metadata_inputs_plan, + prepare_container_metadata_inputs as _prepare_container_metadata_inputs_plan, +) +from core.workflows.encode.planning.encode_plan import build_encode_plan as _build_encode_plan_data +from core.workflows.encode.planning.offsets import ( + build_offset_specs as _build_offset_specs_plan, + offset_seconds as _offset_seconds_plan, + track_offset_ms as _track_offset_ms_plan, + track_time_offset_lookup as _track_time_offset_lookup_plan, + video_map_arg as _video_map_arg_plan, +) +from core.workflows.encode.planning.preview import ( + format_preview_command as _format_preview_command_plan, + format_preview_commands as _format_preview_commands_plan, + format_preview_selection as _format_preview_selection_plan, +) +from core.workflows.encode.planning.track_assembly import ( + build_track_input_paths as _build_track_input_paths_plan, + resolve_track_assembly as _resolve_track_assembly_plan, +) +from core.workflows.encode.planning.sources import ( + resolve_source_layout as _resolve_source_layout, + source_input_index_map as _source_input_index_map_plan, +) +from core.workflows.encode.planning.subtitles import ( + probe_stream_indices as _probe_stream_indices_plan, + resolve_subtitle_tracks_for_encode as _resolve_subtitle_tracks_for_encode_plan, +) +from core.workflows.encode.planning.sync_plan import ( + build_sync_analysis_plan as _build_sync_analysis_plan, +) +from core.workflows.encode.planning.validation import ( + is_dir_writable as _is_dir_writable_plan, + validate_encode_config as _validate_encode_config_plan, +) from core.workflows.encode.models import ( EncodeConfig, EncodeError, QualityMode, - VideoEncodeSettings, AudioTrackSettings, TrackTimeOffset, + VideoEncodeSettings, normalize_audio_bitrate_kbps, ) - - -_MIME_BY_EXT: dict[str, str] = { - ".jpg": "image/jpeg", - ".jpeg": "image/jpeg", - ".png": "image/png", - ".gif": "image/gif", - ".bmp": "image/bmp", - ".webp": "image/webp", - ".tif": "image/tiff", - ".tiff": "image/tiff", -} - -_EXT_BY_MIME: dict[str, str] = { - "image/jpeg": ".jpg", - "image/png": ".png", - "image/gif": ".gif", - "image/bmp": ".bmp", - "image/webp": ".webp", - "image/tiff": ".tiff", -} - -_VAAPI_CODECS = {"hevc_vaapi", "h264_vaapi", "av1_vaapi"} +from core.workflows.encode.planning.plan_models import ( + EncodePlan as _EncodePlan, + MaterializedContainerMetadataPlan as _MaterializedContainerMetadataPlan, + ResolvedTrackAssembly as _ResolvedTrackAssembly, +) _FALLBACK_HEVC_FRAME_RATE = "24000/1001" -@dataclass(frozen=True) -class _EncodeSyncTrack: - track_type: str - - -@dataclass(frozen=True) -class _EncodeSyncMappedTrack: - source_file_index: int - stream_index: int - track: _EncodeSyncTrack - - -@dataclass(frozen=True) -class _EncodeOffsetInputSpec: - map_key: tuple[Path, int, str] - input_path: Path | str - input_stream_index: int - offset_ms: int - - -def _default_ffmpeg_thread_count() -> int: - """Default FFmpeg thread count: logical CPU count × 0.75, rounded up.""" - cpu_count = os.cpu_count() or 1 - return max(1, (cpu_count * 3 + 3) // 4) - - -def _normalize_ffmpeg_thread_count(value: int | None) -> int: - """Return a safe FFmpeg thread count, preserving 0 as ffmpeg auto mode.""" - if value is None or value < 0: - return _default_ffmpeg_thread_count() - return value - - -def _mime_for(path: Path) -> str: - return _MIME_BY_EXT.get(path.suffix.lower(), "application/octet-stream") - - class EncodeWorkflow(QObject): """ Construit et exécute un encodage ffmpeg. @@ -120,6 +183,26 @@ class EncodeWorkflow(QObject): Signaux : log_message(level, message) + + API étendue (préfixe `_`, testable mais non publique) : + - Hooks d'orchestration : `_run_with_preparation`, `_run_with_metadata_inject`, + `_run_multi_video_pipeline`, `_run_two_pass`, `_check_cancelled`. + - Builders FFmpeg internes : `_build_video_only_cmd`, + `_build_video_only_cmd_for_track`, `_build_video_only_two_pass`, + `_build_video_only_two_pass_for_track`, + `_build_multi_video_track_encode_commands`, + `_build_runtime_single_pass_with_sync`, + `_build_runtime_two_pass_with_sync`, `_build_track_meta_args`. + - Sondes / décisions : `_prepare_multisource_sync`, + `_detect_source_dynamic_hdr_presence`, `_bind_nfo_write`. + - Attributs init exposés pour assertion de configuration : `_runner`, + `_ffmpeg_threads`, `_max_parallel_video_encodes`, `_ram_buffer_threshold_pct`, + `_generate_nfo`, `_postprocess_service`, `_bins`, `_mediainfo_bin`, + `_muxing_post_action`. + + Toutes les autres méthodes `_xxx` sont des wrappers triviaux vers + `core/workflows/common/`, `domain/`, `planning/` ou `runtime/` ; leur + suppression est planifiée au Step 9 du PLAN_REFONTE. """ log_message = Signal(str, str) @@ -133,6 +216,7 @@ def __init__( ram_buffer_enabled: bool = True, ram_buffer_threshold_pct: int = 15, ffmpeg_threads: int | None = None, + max_parallel_video_encodes: int | None = 1, parent: QObject | None = None, *, writing_application: str = "", @@ -151,20 +235,17 @@ def __init__( # Clé : (abs_path, mtime_ns, size). Invalide automatiquement si le # fichier a été modifié. self._ffprobe_payload_cache: dict[tuple[str, int, int], dict[str, object] | None] = {} + self._ffprobe_frame_hdr_cache: dict[tuple[str, int, int], tuple[bool, bool] | None] = {} self._mediainfo_hdr_cache: dict[tuple[str, int, int], tuple[bool, bool] | None] = {} self._generate_nfo = generate_nfo self._runner = ToolRunner(max_workers=1, parent=self) self._ram_buffer_enabled = ram_buffer_enabled self._ram_buffer_threshold_pct = max(0, min(ram_buffer_threshold_pct, 90)) self._ffmpeg_threads = _normalize_ffmpeg_thread_count(ffmpeg_threads) + self._max_parallel_video_encodes = _normalize_max_parallel_video_encodes(max_parallel_video_encodes) self._writing_application = writing_application.strip() - self._postproc_helper = RemuxWorkflow( - ffmpeg_bin=ffmpeg_bin, + self._postprocess_service = RemuxPostprocessService( ffprobe_bin=self._ffprobe_bin_from_ffmpeg(ffmpeg_bin), - parent=self, - writing_application=writing_application, - generate_nfo=False, # NFO géré directement par EncodeWorkflow via _bind_nfo_write - mediainfo_bin=mediainfo_bin, ) from core.workflows.matroska_header_editor import MatroskaMuxingAppPostAction from core.workflows.matroska_language_editor import MatroskaLanguagePostAction @@ -175,30 +256,65 @@ def __init__( self._language_post_action = MatroskaLanguagePostAction( log_cb=self.log_message.emit, ) + self._signal_binding_service = _SignalBindingService( + _SignalBindingServiceCallbacks( + muxing_bind_on_success=lambda signals, output: self._muxing_post_action.bind_on_success(signals, output), + language_bind_on_success=lambda signals, output: self._language_post_action.bind_on_success(signals, output), + write_nfo=lambda output: write_mediainfo_nfo( + output, + log_cb=self.log_message.emit, + mediainfo_bin=self._bins.get("mediainfo") or "mediainfo", + ), + remove_path=remove_path, + ) + ) def set_ffmpeg(self, ffmpeg_bin: str) -> None: """Met à jour le binaire ffmpeg utilisé pour l'encodage (ex: ffmpeg système pour HW).""" self._ffmpeg = ffmpeg_bin - self._postproc_helper.set_ffmpeg_bin(ffmpeg_bin) - self._postproc_helper.set_ffprobe_bin(self._ffprobe_bin_from_ffmpeg(ffmpeg_bin)) + self._postprocess_service.set_ffprobe_bin(self._ffprobe_bin_from_ffmpeg(ffmpeg_bin)) def set_writing_application(self, writing_application: str) -> None: """Met à jour la valeur du tag Multiplexing Application.""" self._writing_application = writing_application.strip() - self._postproc_helper.set_writing_application(self._writing_application) def set_ffmpeg_threads(self, ffmpeg_threads: int | None) -> None: """Met à jour le nombre de threads passé à FFmpeg via `-threads`.""" self._ffmpeg_threads = _normalize_ffmpeg_thread_count(ffmpeg_threads) + def set_max_parallel_video_encodes(self, max_parallel_video_encodes: int | None) -> None: + """Met à jour le niveau max de parallélisme pour la préparation multi-pistes vidéo.""" + self._max_parallel_video_encodes = _normalize_max_parallel_video_encodes(max_parallel_video_encodes) + def set_mediainfo_bin(self, mediainfo_bin: str) -> None: self._bins["mediainfo"] = mediainfo_bin def set_generate_nfo(self, generate_nfo: bool) -> None: self._generate_nfo = generate_nfo - def _ffmpeg_thread_args(self) -> list[str]: - return ["-threads", str(self._ffmpeg_threads)] + def _ffmpeg_thread_args(self, thread_count: int | None = None) -> list[str]: + effective = self._ffmpeg_threads if thread_count is None else thread_count + return _common_ffmpeg_thread_args(effective) + + def _parallel_video_worker_thread_count( + self, + *, + resource_keys: list[str], + max_parallel: int, + ) -> int | None: + worker_count = min( + max(1, len(set(resource_keys))), + max(1, int(max_parallel)), + ) + if worker_count <= 1: + return None + + base_threads = ( + self._ffmpeg_threads + if self._ffmpeg_threads > 0 + else _default_ffmpeg_thread_count() + ) + return max(1, base_threads // worker_count) @staticmethod def _ffmpeg_progress_args() -> list[str]: @@ -209,7 +325,7 @@ def _ffmpeg_progress_args() -> list[str]: codec utilisé. `-progress pipe:1` garantit une sortie structurée (`out_time=...`) que l'UI peut parser de façon fiable. """ - return ["-progress", "pipe:1", "-nostats"] + return _common_ffmpeg_progress_args() @staticmethod def _ffprobe_bin_from_ffmpeg(ffmpeg_bin: str) -> str: @@ -219,17 +335,26 @@ def _ffprobe_bin_from_ffmpeg(ffmpeg_bin: str) -> str: return str(ffmpeg_path.with_name("ffprobe" + ffmpeg_path.suffix)) return "ffprobe" + @classmethod + def _primary_video_settings(cls, config: EncodeConfig) -> VideoEncodeSettings: + videos = cls._video_tracks(config) + if videos: + return videos[0] + return VideoEncodeSettings() + @staticmethod def _is_video_passthrough(config: EncodeConfig) -> bool: - return config.video.codec == "copy" + return EncodeWorkflow._primary_video_settings(config).codec == "copy" @classmethod def _uses_two_pass(cls, config: EncodeConfig) -> bool: - return not cls._is_video_passthrough(config) and config.video.quality_mode == QualityMode.SIZE + video = cls._primary_video_settings(config) + return video.codec != "copy" and video.quality_mode == QualityMode.SIZE @staticmethod def _wants_dynamic_hdr_copy(config: EncodeConfig) -> bool: - return config.copy_dv or config.copy_hdr10plus + video = EncodeWorkflow._primary_video_settings(config) + return bool(video.copy_dv or video.copy_hdr10plus) @classmethod def _needs_metadata_inject(cls, config: EncodeConfig) -> bool: @@ -237,11 +362,13 @@ def _needs_metadata_inject(cls, config: EncodeConfig) -> bool: @staticmethod def _video_source_path(config: EncodeConfig) -> Path: - return Path(config.video.source_path or config.source) + video = EncodeWorkflow._primary_video_settings(config) + return Path(video.source_path or config.source) @staticmethod def _video_stream_index(config: EncodeConfig) -> int: - return int(getattr(config.video, "stream_index", 0) or 0) + video = EncodeWorkflow._primary_video_settings(config) + return int(getattr(video, "stream_index", 0) or 0) @classmethod def _video_map_key(cls, config: EncodeConfig) -> tuple[Path, int, str]: @@ -272,6 +399,7 @@ def _detect_source_dynamic_hdr_presence(self, source: Path) -> tuple[bool, bool] payload = self._ffprobe_streams_payload(source) has_dv = False has_hdr10plus = False + frame_flags: tuple[bool, bool] | None = None if payload is not None: for stream in self._ffprobe_stream_dicts(payload): if stream.get("codec_type") != "video": @@ -293,11 +421,21 @@ def _detect_source_dynamic_hdr_presence(self, source: Path) -> tuple[bool, bool] break mediainfo_flags = self._mediainfo_hdr_flags(source) - if mediainfo_flags is None: - return (has_dv, has_hdr10plus) if payload is not None else None - - mi_dv, mi_hdr10plus = mediainfo_flags - return has_dv or mi_dv, has_hdr10plus or mi_hdr10plus + if mediainfo_flags is not None: + mi_dv, mi_hdr10plus = mediainfo_flags + has_dv = has_dv or mi_dv + has_hdr10plus = has_hdr10plus or mi_hdr10plus + + if not has_dv or not has_hdr10plus: + frame_flags = self._ffprobe_frame_dynamic_hdr_flags(source) + if frame_flags is not None: + frame_dv, frame_hdr10plus = frame_flags + has_dv = has_dv or frame_dv + has_hdr10plus = has_hdr10plus or frame_hdr10plus + + if payload is None and mediainfo_flags is None and frame_flags is None: + return None + return has_dv, has_hdr10plus def _subtitle_codec_of(self, source: Path, stream_index: int) -> str: """Retourne le codec (ffprobe ``codec_name``) du stream ``stream_index``. @@ -309,8 +447,12 @@ def _subtitle_codec_of(self, source: Path, stream_index: int) -> str: return "" for stream in self._ffprobe_stream_dicts(payload): raw_idx = stream.get("index", -1) + if isinstance(raw_idx, bool): + continue + if not isinstance(raw_idx, (int, float, str, bytes, bytearray)): + continue try: - idx_val = int(raw_idx) # type: ignore[arg-type] + idx_val = int(raw_idx) except (TypeError, ValueError): continue if idx_val == int(stream_index): @@ -398,6 +540,80 @@ def _ffprobe_stream_dicts(payload: dict[str, object]) -> list[dict[str, object]] out.append(cast(dict[str, object], item)) return out + def _ffprobe_frame_dynamic_hdr_flags( + self, + source: Path, + *, + max_frames: int = 240, + ) -> tuple[bool, bool] | None: + cache_key = self._source_cache_key(source) + if cache_key is not None and cache_key in self._ffprobe_frame_hdr_cache: + return self._ffprobe_frame_hdr_cache[cache_key] + + ffprobe_bin = self._ffprobe_bin_from_ffmpeg(self._ffmpeg) + cmd = [ + ffprobe_bin, + "-v", "quiet", + "-print_format", "json", + "-select_streams", "v:0", + "-read_intervals", f"%+#{max(1, int(max_frames))}", + "-show_frames", + "-show_entries", "frame_side_data=side_data_type", + str(source), + ] + try: + result = subprocess.run( + cmd, + capture_output=True, + check=False, + timeout=30, + **subprocess_text_kwargs(), + ) + except FileNotFoundError: + flags: tuple[bool, bool] | None = None + else: + if result.returncode != 0: + flags = None + else: + try: + payload = json.loads(result.stdout or "{}") + except json.JSONDecodeError: + flags = None + else: + frames_obj = payload.get("frames") + has_dv = False + has_hdr10plus = False + if isinstance(frames_obj, list): + for frame in frames_obj: + if not isinstance(frame, dict): + continue + side_data_obj = frame.get("side_data_list") + if not isinstance(side_data_obj, list): + continue + for side_data in side_data_obj: + if not isinstance(side_data, dict): + continue + side_type = str(side_data.get("side_data_type", "") or "") + side_type_lower = side_type.lower() + if ("dolby vision" in side_type_lower) or (side_type == "DOVI configuration record"): + has_dv = True + if ( + "hdr dynamic metadata smpte2094-40" in side_type_lower + or "hdr10+" in side_type_lower + or "smpte st 2094" in side_type_lower + or "smpte2094" in side_type_lower + ): + has_hdr10plus = True + if has_dv and has_hdr10plus: + break + if has_dv and has_hdr10plus: + break + flags = (has_dv, has_hdr10plus) + + if cache_key is not None: + self._ffprobe_frame_hdr_cache[cache_key] = flags + return flags + def _mediainfo_hdr_flags(self, source: Path) -> tuple[bool, bool] | None: cache_key = self._source_cache_key(source) if cache_key is not None and cache_key in self._mediainfo_hdr_cache: @@ -422,9 +638,14 @@ def _mediainfo_hdr_flags(self, source: Path) -> tuple[bool, bool] | None: except FileNotFoundError: result: tuple[bool, bool] | None = None else: + hdr_text = f"{hdr_format.stdout or ''}\n{hdr_compat.stdout or ''}".lower() result = ( - "dolby vision" in (hdr_format.stdout or "").lower(), - "hdr10+" in (hdr_compat.stdout or "").lower(), + "dolby vision" in hdr_text, + ( + "hdr10+" in hdr_text + or "smpte st 2094" in hdr_text + or "smpte2094" in hdr_text + ), ) if cache_key is not None: @@ -450,25 +671,39 @@ def _normalize_dynamic_hdr_config(self, config: EncodeConfig) -> EncodeConfig: ) return config + video = self._primary_video_settings(config) has_dv, has_hdr10plus = detected - copy_dv = config.copy_dv and has_dv - copy_hdr10plus = config.copy_hdr10plus and has_hdr10plus + copy_dv = video.copy_dv and has_dv + copy_hdr10plus = video.copy_hdr10plus and has_hdr10plus - if config.copy_dv and not copy_dv: + if video.copy_dv and not copy_dv: self.log_message.emit( "WARN", "Copy DoVi demandé mais aucune donnée DoVi détectée — option ignorée.", ) - if config.copy_hdr10plus and not copy_hdr10plus: + if video.copy_hdr10plus and not copy_hdr10plus: self.log_message.emit( "WARN", "Copy HDR10+ demandé mais aucune donnée HDR10+ détectée — option ignorée.", ) + normalized_video = replace( + video, + copy_dv=copy_dv, + copy_hdr10plus=copy_hdr10plus, + ) + normalized_tracks = list(config.video_tracks) + if normalized_tracks: + normalized_tracks = [normalized_video, *normalized_tracks[1:]] + else: + normalized_tracks = [normalized_video] normalized = replace( config, + video=normalized_video, + video_tracks=normalized_tracks, copy_dv=copy_dv, copy_hdr10plus=copy_hdr10plus, + dovi_profile=normalized_video.dovi_profile, ) if not self._wants_dynamic_hdr_copy(normalized) and self._is_video_passthrough(config): @@ -478,6 +713,47 @@ def _normalize_dynamic_hdr_config(self, config: EncodeConfig) -> EncodeConfig: ) return normalized + def _normalize_dynamic_hdr_multi(self, config: EncodeConfig) -> EncodeConfig: + videos: list[VideoEncodeSettings] = [] + for index, video in enumerate(self._video_tracks(config), start=1): + if not (video.copy_dv or video.copy_hdr10plus): + videos.append(video) + continue + detected = self._detect_source_dynamic_hdr_presence( + self._video_source_from_settings(config, video) + ) + if detected is None: + self.log_message.emit( + "WARN", + f"Détection DoVi/HDR10+ impossible pour la piste vidéo #{index} — demande conservée.", + ) + videos.append(video) + continue + has_dv, has_hdr10plus = detected + copy_dv = video.copy_dv and has_dv + copy_hdr10plus = video.copy_hdr10plus and has_hdr10plus + if video.copy_dv and not copy_dv: + self.log_message.emit( + "WARN", + f"Copy DoVi demandé mais aucune donnée DoVi détectée pour la piste vidéo #{index} — option ignorée.", + ) + if video.copy_hdr10plus and not copy_hdr10plus: + self.log_message.emit( + "WARN", + f"Copy HDR10+ demandé mais aucune donnée HDR10+ détectée pour la piste vidéo #{index} — option ignorée.", + ) + videos.append(replace(video, copy_dv=copy_dv, copy_hdr10plus=copy_hdr10plus)) + + primary = videos[0] + return replace( + config, + video=primary, + video_tracks=videos, + copy_dv=primary.copy_dv, + copy_hdr10plus=primary.copy_hdr10plus, + dovi_profile=primary.dovi_profile, + ) + # ------------------------------------------------------------------ # Construction de la commande # ------------------------------------------------------------------ @@ -486,13 +762,32 @@ def build_command(self, config: EncodeConfig) -> list[str] | list[list[str]]: """ Retourne une commande (list[str]) ou deux commandes pour la double passe (list[list[str]]). """ - return self._build_direct_output_commands(config) + selection = _build_encode_command_selection_plan( + config, + plan=self._build_encode_plan(config), + is_multi_video=self._is_multi_video, + uses_two_pass=self._uses_two_pass, + build_multi_video_preview=self._build_multi_video_command_preview, + build_two_pass=self._build_two_pass, + build_single_pass=self._build_single_pass, + ) + if len(selection.commands) <= 1: + return list(selection.preview_command) + return [list(cmd) for cmd in selection.commands] def build_command_single(self, config: EncodeConfig) -> list[str]: """Toujours une seule commande — pour l'aperçu UI.""" - if self._uses_two_pass(config): - return self._build_two_pass(config)[1] - return self._build_single_pass(config) + return list( + _build_encode_command_selection_plan( + config, + plan=self._build_encode_plan(config), + is_multi_video=self._is_multi_video, + uses_two_pass=self._uses_two_pass, + build_multi_video_preview=self._build_multi_video_command_preview, + build_two_pass=self._build_two_pass, + build_single_pass=self._build_single_pass, + ).preview_command + ) def _build_direct_output_commands( self, @@ -500,108 +795,102 @@ def _build_direct_output_commands( *, chapter_materialize_dir: Path | None = None, ) -> list[str] | list[list[str]]: - if self._uses_two_pass(config): - return self._build_two_pass( - config, - chapter_materialize_dir=chapter_materialize_dir, - ) - return self._build_single_pass( + selection = _build_encode_command_selection_plan( config, + plan=self._build_encode_plan(config), + is_multi_video=self._is_multi_video, + uses_two_pass=self._uses_two_pass, + build_multi_video_preview=self._build_multi_video_command_preview, + build_two_pass=self._build_two_pass, + build_single_pass=self._build_single_pass, chapter_materialize_dir=chapter_materialize_dir, ) + if len(selection.commands) <= 1: + return list(selection.preview_command) + return [list(cmd) for cmd in selection.commands] + + def _build_encode_plan(self, config: EncodeConfig) -> _EncodePlan: + return _build_encode_plan_data( + config, + resolve_subtitle_tracks=self._resolved_subtitle_tracks_for_encode, + resolve_global_tags=self._resolve_global_tags, + video_tracks=self._video_tracks, + video_source_from_settings=self._video_source_from_settings, + video_source_path=self._video_source_path, + video_stream_index=self._video_stream_index, + video_map_key=self._video_map_key, + ) + + def _build_multi_video_command_preview( + self, + config: EncodeConfig, + *, + plan: _EncodePlan | None = None, + ) -> list[list[str]]: + commands: list[list[str]] = [] + plan = plan or self._build_encode_plan(config) + resource_keys = [ + self._video_encode_resource_key(video) + for video in self._video_tracks(config) + if video.codec != "copy" + ] + thread_count = self._parallel_video_worker_thread_count( + resource_keys=resource_keys, + max_parallel=self._max_parallel_video_encodes, + ) + for idx, video in enumerate(self._video_tracks(config), start=1): + source = self._video_source_from_settings(config, video) + if video.codec == "copy": + continue + if video.quality_mode == QualityMode.SIZE: + commands.extend( + self._build_multi_video_track_encode_commands( + config, + video, + source, + Path(f""), + thread_count=thread_count, + for_preview=True, + ) + ) + else: + commands.append( + self._build_multi_video_track_encode_commands( + config, + video, + source, + Path(f""), + thread_count=thread_count, + for_preview=True, + )[-1] + ) + commands.append(self._build_multi_video_final_mux_command(config, [], plan=plan)) + return commands @staticmethod def _collect_all_sources(config: EncodeConfig) -> list[Path]: """Retourne les sources uniques (source principale puis extras).""" - all_sources: list[Path] = [config.source] - video_source = EncodeWorkflow._video_source_path(config) - if video_source not in all_sources: - all_sources.append(video_source) - for a in config.audio_tracks: - sp = a.source_path or config.source - if sp not in all_sources: - all_sources.append(sp) - for src_path, _idx in config.subtitle_tracks: - if src_path not in all_sources: - all_sources.append(src_path) - for src_path, _idx in config.attachment_streams: - if src_path not in all_sources: - all_sources.append(src_path) - return all_sources + return list(_resolve_source_layout(config).sources) @staticmethod - def _source_input_index_map(sources: list[Path], *, start_index: int = 0) -> dict[Path, int]: - """Construit un mapping source -> index d'input ffmpeg.""" - return {src: start_index + i for i, src in enumerate(sources)} + def _video_tracks(config: EncodeConfig) -> list[VideoEncodeSettings]: + if config.video_tracks: + return list(config.video_tracks) + if config.video is not None: + return [config.video] + return [] - @staticmethod - def _offset_seconds(offset_ms: int) -> str: - return f"{abs(int(offset_ms)) / 1000.0:.3f}" + @classmethod + def _is_multi_video(cls, config: EncodeConfig) -> bool: + return len(cls._video_tracks(config)) > 1 @staticmethod - def _track_time_offset_lookup(config: EncodeConfig) -> dict[tuple[str, Path, int], int]: - lookup: dict[tuple[str, Path, int], int] = {} - for raw in config.track_time_offsets: - if not isinstance(raw, TrackTimeOffset): - continue - track_type = str(raw.track_type or "").strip().lower() - if track_type not in {"video", "audio", "subtitle"}: - continue - lookup[(track_type, Path(raw.source_path), int(raw.stream_index))] = int(raw.offset_ms) - return lookup + def _video_source_from_settings(config: EncodeConfig, video: VideoEncodeSettings) -> Path: + return Path(video.source_path or config.source) @staticmethod - def _track_offset_ms( - lookup: dict[tuple[str, Path, int], int], - *, - track_type: str, - source_path: Path, - stream_index: int, - ) -> int: - key = (str(track_type).strip().lower(), Path(source_path), int(stream_index)) - if key in lookup: - return int(lookup[key]) - if key[0] == "video": - matches = [ - int(v) - for (tt, sp, _), v in lookup.items() - if tt == "video" and sp == key[1] - ] - if len(matches) == 1: - return matches[0] - return 0 - - def _build_offset_specs( - self, - config: EncodeConfig, - *, - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]], - offset_lookup: dict[tuple[str, Path, int], int] | None = None, - ) -> list[_EncodeOffsetInputSpec]: - lookup = offset_lookup if offset_lookup is not None else self._track_time_offset_lookup(config) - specs: list[_EncodeOffsetInputSpec] = [] - for map_key, input_path, input_stream_index in track_mappings: - src_path, src_stream_idx, track_type = map_key - offset_ms = self._track_offset_ms( - lookup, - track_type=track_type, - source_path=src_path, - stream_index=src_stream_idx, - ) - if offset_ms == 0: - continue - if track_type == "video" and offset_ms < 0: - raise EncodeError( - "Décalage vidéo négatif interdit : " - f"source={src_path}, stream={src_stream_idx}, offset={offset_ms} ms" - ) - specs.append(_EncodeOffsetInputSpec( - map_key=(Path(src_path), int(src_stream_idx), str(track_type)), - input_path=input_path, - input_stream_index=int(input_stream_index), - offset_ms=int(offset_ms), - )) - return specs + def _video_stream_from_settings(video: VideoEncodeSettings) -> int: + return int(getattr(video, "stream_index", 0) or 0) def _append_offset_aux_inputs( self, @@ -624,9 +913,9 @@ def _append_offset_aux_inputs( input_idx = input_by_key.get(input_key) if input_idx is None: if int(spec.offset_ms) > 0: - cmd.extend(["-itsoffset", self._offset_seconds(spec.offset_ms), "-i", str(spec.input_path)]) + cmd.extend(["-itsoffset", _offset_seconds_plan(spec.offset_ms), "-i", str(spec.input_path)]) else: - cmd.extend(["-ss", self._offset_seconds(spec.offset_ms), "-i", str(spec.input_path)]) + cmd.extend(["-ss", _offset_seconds_plan(spec.offset_ms), "-i", str(spec.input_path)]) input_idx = next_input_index input_by_key[input_key] = input_idx next_input_index += 1 @@ -635,37 +924,13 @@ def _append_offset_aux_inputs( return next_input_index, remap - @staticmethod - def _video_map_arg( - default_map: tuple[int, int], - *, - offset_remap: dict[tuple[Path, int, str], tuple[int, int]], - map_key: tuple[Path, int, str], - ) -> str: - remapped = offset_remap.get(map_key) - if remapped is None: - stream_index = int(default_map[1]) - if stream_index == 0: - return f"{int(default_map[0])}:v:0" - return f"{int(default_map[0])}:{stream_index}" - return f"{int(remapped[0])}:{int(remapped[1])}" - def _probe_stream_indices(self, source: Path, codec_type: str) -> list[int] | None: - payload = self._ffprobe_streams_payload(source) - if payload is None: - return None - indices: list[int] = [] - for stream in self._ffprobe_stream_dicts(payload): - if stream.get("codec_type") != codec_type: - continue - idx_raw = stream.get("index") - if not isinstance(idx_raw, (int, str)): - continue - try: - indices.append(int(idx_raw)) - except (TypeError, ValueError): - continue - return sorted(set(indices)) + return _probe_stream_indices_plan( + source, + codec_type, + ffprobe_streams_payload=self._ffprobe_streams_payload, + ffprobe_stream_dicts=self._ffprobe_stream_dicts, + ) def _resolved_subtitle_tracks_for_encode( self, @@ -680,178 +945,12 @@ def _resolved_subtitle_tracks_for_encode( - copy_subtitles=True: tentative de résolution via ffprobe sur chaque source. Si une source n'est pas sondable, on renvoie complete=False. """ - if config.subtitle_tracks: - deduped: list[tuple[Path, int]] = [] - seen: set[tuple[Path, int]] = set() - for src_path, stream_idx in config.subtitle_tracks: - key = (src_path, int(stream_idx)) - if key in seen: - continue - seen.add(key) - deduped.append(key) - return deduped, True - - if not config.copy_subtitles: - return [], True - - resolved: list[tuple[Path, int]] = [] - seen: set[tuple[Path, int]] = set() - for src_path in all_sources: - subtitle_indices = self._probe_stream_indices(src_path, "subtitle") - if subtitle_indices is None: - return [], False - for stream_idx in subtitle_indices: - key = (src_path, int(stream_idx)) - if key in seen: - continue - seen.add(key) - resolved.append(key) - return resolved, True - - def _build_probe_remux_config( - self, - config: EncodeConfig, - all_sources: list[Path], - source_idx_local: dict[Path, int], - resolved_subtitle_tracks: list[tuple[Path, int]], - ) -> RemuxConfig: - """ - Construit une config Remux minimale pour réutiliser la logique de détection - de risque multi-source (strict interleave + pré-scan sous-titres). - """ - tracks_by_source: dict[int, list[TrackEntry]] = {i: [] for i in range(len(all_sources))} - - def _push_track(src_idx: int, stream_idx: int, track_type: str) -> None: - bucket = tracks_by_source.setdefault(src_idx, []) - if any(t.mkv_tid == stream_idx and t.track_type == track_type for t in bucket): - return - bucket.append(TrackEntry( - mkv_tid=stream_idx, - track_type=track_type, - codec="COPY", - display_info="", - language="", - title="", - )) - - # Vidéo de référence : input source principal. - _push_track(0, 0, "video") - for a in config.audio_tracks: - src = a.source_path or config.source - src_idx = source_idx_local.get(src, 0) - _push_track(src_idx, int(a.stream_index), "audio") - for src, stream_idx in resolved_subtitle_tracks: - src_idx = source_idx_local.get(src) - if src_idx is None: - continue - _push_track(src_idx, int(stream_idx), "subtitle") - - sources: list[SourceInput] = [] - track_order: list[tuple[int, int] | tuple[int, int, str]] = [] - for i, src in enumerate(all_sources): - source_tracks = tracks_by_source.get(i, []) - sources.append(SourceInput(path=src, file_index=i, tracks=source_tracks)) - for t in source_tracks: - track_order.append((i, int(t.mkv_tid))) - - return RemuxConfig( - sources=sources, - output=config.output, - track_order=track_order, - keep_chapters=config.keep_chapters, - ) - - @staticmethod - def _build_sync_mapped_tracks( - config: EncodeConfig, - source_idx_local: dict[Path, int], - resolved_subtitle_tracks: list[tuple[Path, int]], - ) -> list[_EncodeSyncMappedTrack]: - mapped: list[_EncodeSyncMappedTrack] = [ - _EncodeSyncMappedTrack( - source_file_index=0, - stream_index=0, - track=_EncodeSyncTrack("video"), - ) - ] - for a in config.audio_tracks: - src = a.source_path or config.source - src_idx = source_idx_local.get(src, 0) - mapped.append(_EncodeSyncMappedTrack( - source_file_index=src_idx, - stream_index=int(a.stream_index), - track=_EncodeSyncTrack("audio"), - )) - for src, stream_idx in resolved_subtitle_tracks: - src_idx = source_idx_local.get(src) - if src_idx is None: - continue - mapped.append(_EncodeSyncMappedTrack( - source_file_index=src_idx, - stream_index=int(stream_idx), - track=_EncodeSyncTrack("subtitle"), - )) - return mapped - - @staticmethod - def _needs_strict_interleave_for_encode( - mapped_tracks: list[_EncodeSyncMappedTrack], - ) -> bool: - """ - Heuristique rapide (sans pré-scan ffprobe) pour éviter de bloquer l'UI - au lancement: - - multi-source effectif, - - présence de sous-titres en sortie, - - au moins un audio provenant d'une autre source que la vidéo primaire. - """ - used_sources = {mt.source_file_index for mt in mapped_tracks} - if len(used_sources) < 2: - return False - - has_subtitle_output = any(mt.track.track_type == "subtitle" for mt in mapped_tracks) - if not has_subtitle_output: - return False - - primary_video = next((mt for mt in mapped_tracks if mt.track.track_type == "video"), None) - if primary_video is None: - return False - - return any( - mt.track.track_type == "audio" and mt.source_file_index != primary_video.source_file_index - for mt in mapped_tracks + resolved = _resolve_subtitle_tracks_for_encode_plan( + config, + all_sources, + probe_indices=self._probe_stream_indices, ) - - def _requires_file_sync_fallback_for_offsets( - self, - config: EncodeConfig, - mapped_tracks: list[_EncodeSyncMappedTrack], - source_by_index: dict[int, Path], - *, - offset_lookup: dict[tuple[str, Path, int], int] | None = None, - ) -> bool: - """Forcer le fallback fichier si offsets étrangers incompatibles avec le mode live.""" - primary_video = next((mt for mt in mapped_tracks if mt.track.track_type == "video"), None) - if primary_video is None: - return False - - lookup = offset_lookup if offset_lookup is not None else self._track_time_offset_lookup(config) - for mt in mapped_tracks: - if mt.track.track_type not in {"audio", "subtitle"}: - continue - if mt.source_file_index == primary_video.source_file_index: - continue - src_path = source_by_index.get(mt.source_file_index) - if src_path is None: - continue - offset_ms = self._track_offset_ms( - lookup, - track_type=mt.track.track_type, - source_path=src_path, - stream_index=mt.stream_index, - ) - if offset_ms != 0: - return True - return False + return list(resolved.tracks), resolved.complete def _prepare_multisource_sync( self, @@ -862,11 +961,13 @@ def _prepare_multisource_sync( work_dir: Path, signals: TaskSignals | None = None, allow_live: bool = True, + plan: _EncodePlan | None = None, ) -> tuple[dict[tuple[Path, int, str], tuple[int, int]], list[Path | str], LiveSyncSession | None, bool]: """ Prépare la normalisation timeline pour les flux multi-source dans le workflow encode. """ + encode_plan = plan source_idx_local = {p: i for i, p in enumerate(all_sources)} if len(source_idx_local) < 2: return {}, [], None, False @@ -874,61 +975,41 @@ def _prepare_multisource_sync( "INFO", "Analyse sync timeline multi-source : pré-scan/remap en cours…", ) - - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, - ) - mapped_tracks = self._build_sync_mapped_tracks( - config, - source_idx_local, - resolved_subtitle_tracks, - ) - path_by_source_idx = {idx: path for path, idx in source_idx_local.items()} - offset_lookup = self._track_time_offset_lookup(config) - offset_requires_sync = self._requires_file_sync_fallback_for_offsets( - config, - mapped_tracks, - path_by_source_idx, - offset_lookup=offset_lookup, - ) + if encode_plan is None: + encode_plan = self._build_encode_plan(config) + sync_analysis = encode_plan.sync_analysis + if not sync_analysis.enabled: + return {}, [], None, False strict_interleave = False - if offset_requires_sync: + if sync_analysis.offset_requires_file_fallback: strict_interleave = True self.log_message.emit( "INFO", "Décalage sur piste étrangère détecté : sync timeline activé.", ) - elif subtitles_resolved: + elif sync_analysis.needs_subtitle_prescan and sync_analysis.probe_remux_config is not None: self.log_message.emit( "INFO", "Pré-scan ffprobe des sous-titres (décision interleave strict)…", ) - strict_interleave = self._postproc_helper._decide_strict_interleave_with_prescan( - self._build_probe_remux_config( - config, - all_sources, - source_idx_local, - resolved_subtitle_tracks, - ) + strict_interleave = self._postprocess_service.decide_strict_interleave_with_prescan( + sync_analysis.probe_remux_config, + log_cb=self.log_message.emit, + ) + elif sync_analysis.strict_interleave_without_prescan: + strict_interleave = True + self.log_message.emit( + "WARNING", + "Pré-scan sous-titres indisponible en mode copy_subtitles ; " + "activation sync timeline par sécurité.", ) - elif config.copy_subtitles: - # copy_subtitles sans résolution explicite des streams: on reste - # conservateur dès qu'il y a audio étranger multi-source. - strict_interleave = self._needs_strict_interleave_for_encode(mapped_tracks) - if strict_interleave: - self.log_message.emit( - "WARNING", - "Pré-scan sous-titres indisponible en mode copy_subtitles ; " - "activation sync timeline par sécurité.", - ) if not strict_interleave: return {}, [], None, False allow_live_sync = bool(allow_live) - if allow_live_sync and offset_requires_sync: + if allow_live_sync and not sync_analysis.allow_live_sync: allow_live_sync = False self.log_message.emit( "INFO", @@ -953,7 +1034,7 @@ def _prepare_multisource_sync( ram_dir=ram_dir, log_cb=lambda msg: self.log_message.emit("INFO", msg), ).prepare( - mapped_tracks=mapped_tracks, + mapped_tracks=list(sync_analysis.mapped_tracks), sources=sync_sources, base_input_idx=sync_base_input_idx, allow_live=allow_live_sync, @@ -976,21 +1057,11 @@ def _prepare_multisource_sync( @staticmethod def _append_strict_interleave_mux_flags(cmd: list[str]) -> None: - cmd.extend(["-max_interleave_delta", "0"]) - cmd.extend(["-max_muxing_queue_size", "9999"]) + _common_append_strict_interleave_mux_flags(cmd) @staticmethod def _append_sync_inputs(cmd: list[str], sync_inputs: list[Path | str]) -> None: - """ - Ajoute les entrées synchronisées (FIFO/fichiers temporaires) en forçant - le demuxer Matroska pour éviter les blocages de probing sur flux live. - """ - for sync_input in sync_inputs: - cmd.extend(["-f", "matroska", "-i", str(sync_input)]) - - @staticmethod - def _sync_cleanup_paths(sync_inputs: list[Path | str]) -> list[Path]: - return [p for p in sync_inputs if isinstance(p, Path)] + _common_append_sync_inputs(cmd, sync_inputs) def _append_stream_maps_and_attachments( self, @@ -1014,14 +1085,15 @@ def _append_stream_maps_and_attachments( if remapped is None: remapped = sync_remap.get((src_path, int(a.stream_index), "audio")) if remapped is not None: - inp, stream_idx = remapped + mapped_audio_inp_idx, stream_idx = remapped else: - inp = source_idx.get(src_path) - if inp is None: - inp = source_idx.get(config.source, 0) + source_audio_inp_idx = source_idx.get(Path(src_path)) + if source_audio_inp_idx is None: + source_audio_inp_idx = source_idx.get(config.source) + mapped_audio_inp_idx = source_audio_inp_idx if source_audio_inp_idx is not None else 0 stream_idx = int(a.stream_index) - cmd.extend(["-map", f"{inp}:{stream_idx}"]) - cmd.extend(self._audio_codec_args(i, a)) + cmd.extend(["-map", f"{mapped_audio_inp_idx}:{stream_idx}"]) + cmd.extend(_audio_codec_args_domain(i, a)) subtitle_tracks = ( subtitle_tracks_override @@ -1035,13 +1107,13 @@ def _append_stream_maps_and_attachments( if remapped is None: remapped = sync_remap.get((src_path, int(stream_idx), "subtitle")) if remapped is not None: - inp, mapped_stream_idx = remapped - cmd.extend(["-map", f"{inp}:{mapped_stream_idx}"]) + mapped_subtitle_inp_idx, mapped_stream_idx = remapped + cmd.extend(["-map", f"{mapped_subtitle_inp_idx}:{mapped_stream_idx}"]) continue - inp = source_idx.get(src_path) - if inp is None: + source_subtitle_inp_idx = source_idx.get(Path(src_path)) + if source_subtitle_inp_idx is None: continue - cmd.extend(["-map", f"{inp}:{stream_idx}"]) + cmd.extend(["-map", f"{source_subtitle_inp_idx}:{stream_idx}"]) # Routage par piste : copy quand MKV l'accepte, sinon conversion srt. cmd.extend(self._subtitle_codec_args([ (t[0], int(t[1])) for t in subtitle_tracks @@ -1058,10 +1130,10 @@ def _append_stream_maps_and_attachments( mapped_attachment_meta: list[tuple[int, dict[str, object]]] = [] if config.attachment_streams: for src_path, stream_idx in config.attachment_streams: - inp = source_idx.get(src_path) - if inp is None: + attachment_inp_idx = source_idx.get(Path(src_path)) + if attachment_inp_idx is None: continue - cmd.extend(["-map", f"{inp}:{stream_idx}"]) + cmd.extend(["-map", f"{attachment_inp_idx}:{stream_idx}"]) mapped_attachment_meta.append( (stream_idx, self._describe_attachment_stream(src_path, stream_idx)) ) @@ -1074,7 +1146,7 @@ def _append_stream_maps_and_attachments( ]) cmd.extend([ f"-metadata:s:t:{out_idx}", - f"filename={self._attachment_filename(meta, stream_idx)}", + f"filename={_default_attachment_filename(meta, stream_idx)}", ]) existing_att = len(mapped_attachment_meta) @@ -1082,7 +1154,7 @@ def _append_stream_maps_and_attachments( att_idx = existing_att + i att_name = "cover" if att_path.stem.lower() == "cover" else att_path.name cmd.extend(["-attach", str(att_path)]) - cmd.extend([f"-metadata:s:t:{att_idx}", f"mimetype={_mime_for(att_path)}"]) + cmd.extend([f"-metadata:s:t:{att_idx}", f"mimetype={mime_for_path(att_path)}"]) cmd.extend([f"-metadata:s:t:{att_idx}", f"filename={att_name}"]) def _prepare_container_metadata_inputs( @@ -1092,39 +1164,56 @@ def _prepare_container_metadata_inputs( *, source_idx: dict[Path, int], next_input_index: int, + plan: _EncodePlan | None = None, chapter_materialize_dir: Path | None = None, chapter_probe_source: Path | None = None, ) -> tuple[int, int | None, int | None]: """Ajoute les inputs nécessaires aux metadata (chapitres/tags) et retourne leurs index.""" - chapter_input_index: int | None = None - if config.chapter_overrides: - if chapter_materialize_dir is not None: - duration_s = self._postproc_helper._probe_duration_seconds( - chapter_probe_source or config.source - ) - chapter_file = self._postproc_helper._write_ffmetadata_chapters( - entries=config.chapter_overrides, - out_dir=chapter_materialize_dir, - duration_s=duration_s, - ) - chapter_ref = str(chapter_file) - else: - chapter_ref = "" - chapter_input_index = next_input_index - next_input_index += 1 - cmd.extend(["-i", chapter_ref]) - - tag_input_index: int | None = None - if config.tag_overrides is None and config.tag_sources: - tag_source = Path(config.tag_sources[-1]) - mapped_idx = source_idx.get(tag_source) - if mapped_idx is not None: - tag_input_index = mapped_idx - else: - tag_input_index = next_input_index - next_input_index += 1 - cmd.extend(["-i", str(tag_source)]) - return next_input_index, chapter_input_index, tag_input_index + planned = _prepare_container_metadata_inputs_plan( + cmd, + config, + source_idx=source_idx, + next_input_index=next_input_index, + container_metadata_plan=(plan.container_metadata if plan is not None else None), + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=chapter_probe_source, + probe_duration_seconds=self._postprocess_service.probe_duration_seconds, + write_ffmetadata_chapters=lambda entries, out_dir, duration_s: self._postprocess_service.write_ffmetadata_chapters( + entries=entries, + out_dir=out_dir, + duration_s=duration_s, + ), + ) + return ( + planned.next_input_index, + planned.chapter_input_index, + planned.tag_input_index, + ) + + def _materialize_container_metadata_inputs( + self, + config: EncodeConfig, + *, + source_idx: dict[Path, int], + next_input_index: int, + plan: _EncodePlan | None = None, + chapter_materialize_dir: Path | None = None, + chapter_probe_source: Path | None = None, + ) -> _MaterializedContainerMetadataPlan: + return _materialize_container_metadata_inputs_plan( + config, + source_idx=source_idx, + next_input_index=next_input_index, + container_metadata_plan=(plan.container_metadata if plan is not None else None), + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=chapter_probe_source, + probe_duration_seconds=self._postprocess_service.probe_duration_seconds, + write_ffmetadata_chapters=lambda entries, out_dir, duration_s: self._postprocess_service.write_ffmetadata_chapters( + entries=entries, + out_dir=out_dir, + duration_s=duration_s, + ), + ) def _append_container_metadata_args( self, @@ -1136,306 +1225,162 @@ def _append_container_metadata_args( chapter_input_index: int | None, tag_input_index: int | None, include_copy_video_stream_passthrough: bool = False, + plan: _EncodePlan | None = None, ) -> None: """Ajoute les options metadata/chapitres/tags/track-meta en une passe.""" - chapter_map = self._container_chapter_map_value( - config, - default_chapter_input_index=default_chapter_input_index, - chapter_input_index=chapter_input_index, - ) - metadata_map = self._container_metadata_map_value( + _append_container_metadata_args_plan( + cmd, config, default_metadata_input_index=default_metadata_input_index, + default_chapter_input_index=default_chapter_input_index, chapter_input_index=chapter_input_index, tag_input_index=tag_input_index, include_copy_video_stream_passthrough=include_copy_video_stream_passthrough, - chapter_map=chapter_map, + is_video_passthrough=self._is_video_passthrough, + resolve_global_tags=self._resolve_global_tags, + build_track_meta_args=self._build_track_meta_args, + container_metadata_plan=(plan.container_metadata if plan is not None else None), ) - if metadata_map is not None: - cmd.extend(["-map_metadata", metadata_map]) - if include_copy_video_stream_passthrough and self._is_video_passthrough(config): - cmd.extend([ - "-map_metadata:s:v:0", - f"{default_metadata_input_index}:s:v:0", - ]) - - cmd.extend(["-map_chapters", chapter_map]) - global_tags = self._resolve_global_tags(config) - title_value = global_tags.pop("title", None) - if title_value is not None: - cmd.extend(["-metadata", f"title={title_value}"]) - - # Suppression des balises ENCODER et CREATION_TIME transportées depuis la source. - # On garde ces resets avant les autres tags pour qu'un éventuel override utilisateur - # puisse les redéfinir ensuite. - cmd.extend(["-metadata", "encoder=", "-metadata", "creation_time="]) - - for key, value in global_tags.items(): - cmd.extend(["-metadata", f"{key}={value}"]) - cmd.extend(self._build_track_meta_args(config)) - - def _container_metadata_map_value( + def _resolve_track_assembly_and_offset_remap( self, - config: EncodeConfig, *, - default_metadata_input_index: int, - chapter_input_index: int | None, - tag_input_index: int | None, - include_copy_video_stream_passthrough: bool, - chapter_map: str | None = None, - ) -> str | None: - if config.tag_overrides is not None: - if chapter_input_index is not None: - return str(chapter_input_index) - if chapter_map is not None and chapter_map not in ("-1", ""): - return chapter_map - return "-1" - if tag_input_index is not None: - return str(tag_input_index) - if include_copy_video_stream_passthrough and self._is_video_passthrough(config): - return str(default_metadata_input_index) - if config.chapter_overrides is not None or bool(config.track_meta_edits): - return str(default_metadata_input_index) - return None - - @staticmethod - def _container_chapter_map_value( + cmd: list[str], config: EncodeConfig, - *, - default_chapter_input_index: int, - chapter_input_index: int | None, - ) -> str: - if config.chapter_overrides is not None: - if config.chapter_overrides and chapter_input_index is not None: - return str(chapter_input_index) - return "-1" - return str(default_chapter_input_index) if config.keep_chapters else "-1" - - def _build_single_pass( - self, - config: EncodeConfig, - *, - chapter_materialize_dir: Path | None = None, - ) -> list[str]: - all_sources = self._collect_all_sources(config) - source_idx = self._source_input_index_map(all_sources) - offset_lookup = self._track_time_offset_lookup(config) - - cmd: list[str] = [self._ffmpeg, "-hide_banner", "-y"] - cmd.extend(self._ffmpeg_progress_args()) - cmd.extend(self._hardware_input_args(config.video)) - for src in all_sources: - cmd.extend(["-i", str(src)]) - next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( - cmd, + plan: _EncodePlan, + source_idx: dict[Path, int], + track_input_paths: tuple[Path | str, ...], + start_input_index: int, + sync_remap: dict[tuple[Path, int, str], tuple[int, int]] | None = None, + video_default_map: tuple[int, int] | None = None, + video_fallback_input: Path | str | None = None, + ) -> tuple[_ResolvedTrackAssembly, dict[tuple[Path, int, str], tuple[int, int]]]: + track_assembly = _resolve_track_assembly_plan( config, + plan, source_idx=source_idx, - next_input_index=len(all_sources), - chapter_materialize_dir=chapter_materialize_dir, - chapter_probe_source=config.source, - ) - - vf = self._build_encoder_vf(config.video) - if vf: - cmd.extend(["-vf", vf]) - - cmd.extend(self._ffmpeg_thread_args()) - - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, + track_input_paths=track_input_paths, + sync_remap=sync_remap, + video_default_map=video_default_map, + video_fallback_input=video_fallback_input, ) - - video_source = self._video_source_path(config) - video_stream = self._video_stream_index(config) - video_input_idx = source_idx.get(video_source, source_idx.get(config.source, 0)) - video_default_map = (video_input_idx, video_stream) - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [ - self._video_track_mapping(config, all_sources[video_input_idx]) - ] - - for a in config.audio_tracks: - src_path = Path(a.source_path or config.source) - inp = source_idx.get(src_path) - if inp is None: - inp = source_idx.get(config.source, 0) - track_mappings.append(((src_path, int(a.stream_index), "audio"), all_sources[inp], int(a.stream_index))) - - for sub_path, stream_idx in resolved_subtitle_tracks: - src_path = Path(sub_path) - inp = source_idx.get(src_path) - if inp is None: - continue - track_mappings.append(((src_path, int(stream_idx), "subtitle"), all_sources[inp], int(stream_idx))) - - next_input_index, offset_remap = self._append_offset_aux_inputs( + _next_input_index, offset_remap = self._append_offset_aux_inputs( cmd, - self._build_offset_specs( + _build_offset_specs_plan( config, - track_mappings=track_mappings, - offset_lookup=offset_lookup, + track_mappings=list(track_assembly.track_mappings), + offset_lookup=dict(plan.offset_lookup), ), - start_input_index=next_input_index, + start_input_index=start_input_index, ) - _ = next_input_index + _ = _next_input_index + return track_assembly, offset_remap - video_map_key = self._video_map_key(config) - cmd.extend(["-map", self._video_map_arg( - video_default_map, + def _append_primary_video_map_and_codec( + self, + cmd: list[str], + *, + plan: _EncodePlan, + video_map: tuple[int, int], + offset_remap: dict[tuple[Path, int, str], tuple[int, int]], + video: VideoEncodeSettings, + bitrate_kbps: int | None = None, + include_hdr_meta: bool = True, + ) -> None: + cmd.extend(["-map", _video_map_arg_plan( + video_map, offset_remap=offset_remap, - map_key=video_map_key, + map_key=plan.video_key, )]) - cmd.extend(self._video_codec_args(config.video, config.video.bitrate_kbps)) - - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - cmd.extend(self._hdr_meta_args(config.video)) + self._append_video_codec_and_hdr_args( + cmd, + video, + bitrate_kbps=bitrate_kbps, + include_hdr_meta=include_hdr_meta, + ) + def _append_common_streams_and_metadata( + self, + cmd: list[str], + *, + config: EncodeConfig, + source_idx: dict[Path, int], + all_sources_count: int, + plan: _EncodePlan, + metadata_inputs: _MaterializedContainerMetadataPlan, + offset_remap: dict[tuple[Path, int, str], tuple[int, int]], + sync_remap: dict[tuple[Path, int, str], tuple[int, int]] | None = None, + strict_interleave: bool = False, + ) -> None: self._append_stream_maps_and_attachments( cmd, config, source_idx=source_idx, - subtitle_copy_input_indices=list(range(len(all_sources))), + subtitle_copy_input_indices=list(range(all_sources_count)), + sync_remap=sync_remap, offset_remap=offset_remap, - subtitle_tracks_override=resolved_subtitle_tracks, - force_copy_subtitles_wildcard=(config.copy_subtitles and not subtitles_resolved), + subtitle_tracks_override=list(plan.resolved_subtitle_tracks), + force_copy_subtitles_wildcard=(config.copy_subtitles and not plan.subtitles_resolved), ) - + if strict_interleave: + self._append_strict_interleave_mux_flags(cmd) self._append_container_metadata_args( cmd, config, default_metadata_input_index=0, default_chapter_input_index=0, - chapter_input_index=chapter_input_index, - tag_input_index=tag_input_index, + chapter_input_index=metadata_inputs.chapter_input_index, + tag_input_index=metadata_inputs.tag_input_index, include_copy_video_stream_passthrough=True, + plan=plan, ) - cmd.append(str(config.output)) - return cmd - def _build_two_pass( + def _encode_command_builder_callbacks(self) -> _EncodeCommandBuilderCallbacks: + return _EncodeCommandBuilderCallbacks( + ffmpeg_bin=self._ffmpeg, + ffmpeg_progress_args=self._ffmpeg_progress_args, + ffmpeg_thread_args=self._ffmpeg_thread_args, + primary_video_settings=self._primary_video_settings, + build_encode_plan=self._build_encode_plan, + size_to_bitrate_kbps=self._size_to_bitrate_kbps, + codec_domain_callbacks=self._codec_domain_callbacks, + materialize_container_metadata_inputs=self._materialize_container_metadata_inputs, + resolve_track_assembly_and_offset_remap=self._resolve_track_assembly_and_offset_remap, + append_primary_video_map_and_codec=self._append_primary_video_map_and_codec, + append_common_streams_and_metadata=self._append_common_streams_and_metadata, + prepare_multisource_sync=self._prepare_multisource_sync, + append_sync_inputs=self._append_sync_inputs, + append_offset_aux_inputs=self._append_offset_aux_inputs, + video_track_mapping=self._video_track_mapping, + ) + + def _build_single_pass( self, config: EncodeConfig, *, chapter_materialize_dir: Path | None = None, - ) -> list[list[str]]: - bitrate = self._size_to_bitrate_kbps(config) - vf = self._build_encoder_vf(config.video) - - all_sources = self._collect_all_sources(config) - source_idx = self._source_input_index_map(all_sources) - offset_lookup = self._track_time_offset_lookup(config) - - def _base() -> list[str]: - c = [self._ffmpeg, "-hide_banner", "-y"] - c.extend(self._ffmpeg_progress_args()) - c.extend(self._hardware_input_args(config.video)) - for src in all_sources: - c.extend(["-i", str(src)]) - if vf: - c.extend(["-vf", vf]) - c.extend(self._ffmpeg_thread_args()) - return c - - video_source = self._video_source_path(config) - video_stream = self._video_stream_index(config) - video_input_idx = source_idx.get(video_source, source_idx.get(config.source, 0)) - video_default_map = (video_input_idx, video_stream) - - pass1 = _base() - _next1, pass1_offset_remap = self._append_offset_aux_inputs( - pass1, - self._build_offset_specs( - config, - track_mappings=[self._video_track_mapping(config, all_sources[video_input_idx])], - offset_lookup=offset_lookup, - ), - start_input_index=len(all_sources), - ) - _ = _next1 - pass1_video_map_key = self._video_map_key(config) - pass1.extend(["-map", self._video_map_arg( - video_default_map, - offset_remap=pass1_offset_remap, - map_key=pass1_video_map_key, - )]) - pass1.extend(self._video_codec_args_bitrate(config.video, bitrate)) - pass1.extend(["-pass", "1", "-an", "-f", "null", os.devnull]) - - pass2 = _base() - next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( - pass2, + plan: _EncodePlan | None = None, + ) -> list[str]: + return _build_single_pass_runtime( + self._encode_command_builder_callbacks(), config, - source_idx=source_idx, - next_input_index=len(all_sources), chapter_materialize_dir=chapter_materialize_dir, - chapter_probe_source=config.source, - ) - - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, + plan=plan, ) - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [ - self._video_track_mapping(config, all_sources[video_input_idx]) - ] - for a in config.audio_tracks: - src_path = Path(a.source_path or config.source) - inp = source_idx.get(src_path) - if inp is None: - inp = source_idx.get(config.source, 0) - track_mappings.append(((src_path, int(a.stream_index), "audio"), all_sources[inp], int(a.stream_index))) - for sub_path, stream_idx in resolved_subtitle_tracks: - src_path = Path(sub_path) - inp = source_idx.get(src_path) - if inp is None: - continue - track_mappings.append(((src_path, int(stream_idx), "subtitle"), all_sources[inp], int(stream_idx))) - next_input_index, pass2_offset_remap = self._append_offset_aux_inputs( - pass2, - self._build_offset_specs( - config, - track_mappings=track_mappings, - offset_lookup=offset_lookup, - ), - start_input_index=next_input_index, - ) - _ = next_input_index - - pass2_video_map_key = self._video_map_key(config) - pass2.extend(["-map", self._video_map_arg( - video_default_map, - offset_remap=pass2_offset_remap, - map_key=pass2_video_map_key, - )]) - pass2.extend(self._video_codec_args_bitrate(config.video, bitrate)) - pass2.extend(["-pass", "2"]) - - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - pass2.extend(self._hdr_meta_args(config.video)) - self._append_stream_maps_and_attachments( - pass2, - config, - source_idx=source_idx, - subtitle_copy_input_indices=list(range(len(all_sources))), - offset_remap=pass2_offset_remap, - subtitle_tracks_override=resolved_subtitle_tracks, - force_copy_subtitles_wildcard=(config.copy_subtitles and not subtitles_resolved), - ) - - self._append_container_metadata_args( - pass2, + def _build_two_pass( + self, + config: EncodeConfig, + *, + chapter_materialize_dir: Path | None = None, + plan: _EncodePlan | None = None, + ) -> list[list[str]]: + return _build_two_pass_runtime( + self._encode_command_builder_callbacks(), config, - default_metadata_input_index=0, - default_chapter_input_index=0, - chapter_input_index=chapter_input_index, - tag_input_index=tag_input_index, - include_copy_video_stream_passthrough=True, + chapter_materialize_dir=chapter_materialize_dir, + plan=plan, ) - pass2.append(str(config.output)) - - return [pass1, pass2] def _build_runtime_single_pass_with_sync( self, @@ -1443,141 +1388,15 @@ def _build_runtime_single_pass_with_sync( *, chapter_materialize_dir: Path | None = None, signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, ) -> tuple[list[str], LiveSyncSession | None, list[Path]]: - all_sources = self._collect_all_sources(config) - source_idx = self._source_input_index_map(all_sources) - work_dir = config.work_dir or config.source.parent - offset_lookup = self._track_time_offset_lookup(config) - - sync_remap, sync_inputs, live_session, strict_interleave = self._prepare_multisource_sync( - config=config, - all_sources=all_sources, - sync_base_input_idx=len(all_sources), - work_dir=work_dir, - signals=signals, - allow_live=True, - ) - - cmd: list[str] = [self._ffmpeg, "-hide_banner", "-y"] - cmd.extend(self._ffmpeg_progress_args()) - cmd.extend(self._hardware_input_args(config.video)) - for src in all_sources: - cmd.extend(["-i", str(src)]) - self._append_sync_inputs(cmd, sync_inputs) - - next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( - cmd, + return _build_runtime_single_pass_with_sync_runtime( + self._encode_command_builder_callbacks(), config, - source_idx=source_idx, - next_input_index=len(all_sources) + len(sync_inputs), chapter_materialize_dir=chapter_materialize_dir, - chapter_probe_source=config.source, - ) - - vf = self._build_encoder_vf(config.video) - if vf: - cmd.extend(["-vf", vf]) - - cmd.extend(self._ffmpeg_thread_args()) - - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, - ) - - track_input_paths: list[Path | str] = [*all_sources, *sync_inputs] - - def _input_path(idx: int, fallback: Path | str) -> Path | str: - if 0 <= idx < len(track_input_paths): - return track_input_paths[idx] - return fallback - - video_key = self._video_map_key(config) - video_source = self._video_source_path(config) - video_stream = self._video_stream_index(config) - video_default_map = sync_remap.get( - video_key, - (source_idx.get(video_source, source_idx.get(config.source, 0)), video_stream), - ) - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [ - ( - video_key, - _input_path(int(video_default_map[0]), video_source), - int(video_default_map[1]), - ) - ] - - for a in config.audio_tracks: - src_path = Path(a.source_path or config.source) - remapped = sync_remap.get((src_path, int(a.stream_index), "audio")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = source_idx.get(src_path) - if inp is None: - inp = source_idx.get(config.source, 0) - stream_idx = int(a.stream_index) - track_mappings.append(((src_path, int(a.stream_index), "audio"), _input_path(int(inp), src_path), int(stream_idx))) - - for src_path_raw, stream_idx_raw in resolved_subtitle_tracks: - src_path = Path(src_path_raw) - remapped = sync_remap.get((src_path, int(stream_idx_raw), "subtitle")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = source_idx.get(src_path) - if inp is None: - continue - stream_idx = int(stream_idx_raw) - track_mappings.append(((src_path, int(stream_idx_raw), "subtitle"), _input_path(int(inp), src_path), int(stream_idx))) - - next_input_index, offset_remap = self._append_offset_aux_inputs( - cmd, - self._build_offset_specs( - config, - track_mappings=track_mappings, - offset_lookup=offset_lookup, - ), - start_input_index=next_input_index, - ) - _ = next_input_index - - video_map_key = video_key - cmd.extend(["-map", self._video_map_arg( - (int(video_default_map[0]), int(video_default_map[1])), - offset_remap=offset_remap, - map_key=video_map_key, - )]) - cmd.extend(self._video_codec_args(config.video, config.video.bitrate_kbps)) - - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - cmd.extend(self._hdr_meta_args(config.video)) - - self._append_stream_maps_and_attachments( - cmd, - config, - source_idx=source_idx, - subtitle_copy_input_indices=list(range(len(all_sources))), - sync_remap=sync_remap, - offset_remap=offset_remap, - subtitle_tracks_override=resolved_subtitle_tracks, - force_copy_subtitles_wildcard=(config.copy_subtitles and not subtitles_resolved), - ) - - if strict_interleave: - self._append_strict_interleave_mux_flags(cmd) - - self._append_container_metadata_args( - cmd, - config, - default_metadata_input_index=0, - default_chapter_input_index=0, - chapter_input_index=chapter_input_index, - tag_input_index=tag_input_index, - include_copy_video_stream_passthrough=True, + signals=signals, + plan=plan, ) - cmd.append(str(config.output)) - return cmd, live_session, self._sync_cleanup_paths(sync_inputs) def _build_runtime_two_pass_with_sync( self, @@ -1585,548 +1404,293 @@ def _build_runtime_two_pass_with_sync( *, chapter_materialize_dir: Path | None = None, signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, ) -> tuple[list[list[str]], LiveSyncSession | None, list[Path]]: - bitrate = self._size_to_bitrate_kbps(config) - vf = self._build_encoder_vf(config.video) - - all_sources = self._collect_all_sources(config) - source_idx = self._source_input_index_map(all_sources) - work_dir = config.work_dir or config.source.parent - offset_lookup = self._track_time_offset_lookup(config) - - sync_remap, sync_inputs, live_session, strict_interleave = self._prepare_multisource_sync( - config=config, - all_sources=all_sources, - sync_base_input_idx=len(all_sources), - work_dir=work_dir, - signals=signals, - allow_live=False, - ) - - def _base(include_sync_inputs: bool) -> list[str]: - c = [self._ffmpeg, "-hide_banner", "-y"] - c.extend(self._ffmpeg_progress_args()) - c.extend(self._hardware_input_args(config.video)) - for src in all_sources: - c.extend(["-i", str(src)]) - if include_sync_inputs: - self._append_sync_inputs(c, sync_inputs) - if vf: - c.extend(["-vf", vf]) - c.extend(self._ffmpeg_thread_args()) - return c - - video_key = self._video_map_key(config) - video_source = self._video_source_path(config) - video_stream = self._video_stream_index(config) - video_default_map = ( - source_idx.get(video_source, source_idx.get(config.source, 0)), - video_stream, - ) - - pass1 = _base(False) - _next1, pass1_offset_remap = self._append_offset_aux_inputs( - pass1, - self._build_offset_specs( - config, - track_mappings=[self._video_track_mapping(config, video_source)], - offset_lookup=offset_lookup, - ), - start_input_index=len(all_sources), - ) - _ = _next1 - pass1_video_map_key = video_key - pass1.extend(["-map", self._video_map_arg( - video_default_map, - offset_remap=pass1_offset_remap, - map_key=pass1_video_map_key, - )]) - pass1.extend(self._video_codec_args_bitrate(config.video, bitrate)) - pass1.extend(["-pass", "1", "-an", "-f", "null", os.devnull]) - - pass2 = _base(True) - next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( - pass2, + return _build_runtime_two_pass_with_sync_runtime( + self._encode_command_builder_callbacks(), config, - source_idx=source_idx, - next_input_index=len(all_sources) + len(sync_inputs), chapter_materialize_dir=chapter_materialize_dir, - chapter_probe_source=config.source, - ) - - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, - ) - - track_input_paths: list[Path | str] = [*all_sources, *sync_inputs] - - def _input_path(idx: int, fallback: Path | str) -> Path | str: - if 0 <= idx < len(track_input_paths): - return track_input_paths[idx] - return fallback - - video_base_map = sync_remap.get(video_key, video_default_map) - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [ - ( - video_key, - _input_path(int(video_base_map[0]), video_source), - int(video_base_map[1]), - ) - ] - - for a in config.audio_tracks: - src_path = Path(a.source_path or config.source) - remapped = sync_remap.get((src_path, int(a.stream_index), "audio")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = source_idx.get(src_path) - if inp is None: - inp = source_idx.get(config.source, 0) - stream_idx = int(a.stream_index) - track_mappings.append(((src_path, int(a.stream_index), "audio"), _input_path(int(inp), src_path), int(stream_idx))) - - for src_path_raw, stream_idx_raw in resolved_subtitle_tracks: - src_path = Path(src_path_raw) - remapped = sync_remap.get((src_path, int(stream_idx_raw), "subtitle")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = source_idx.get(src_path) - if inp is None: - continue - stream_idx = int(stream_idx_raw) - track_mappings.append(((src_path, int(stream_idx_raw), "subtitle"), _input_path(int(inp), src_path), int(stream_idx))) - - next_input_index, pass2_offset_remap = self._append_offset_aux_inputs( - pass2, - self._build_offset_specs( - config, - track_mappings=track_mappings, - offset_lookup=offset_lookup, - ), - start_input_index=next_input_index, - ) - _ = next_input_index - - pass2_video_map_key = video_key - pass2.extend(["-map", self._video_map_arg( - (int(video_base_map[0]), int(video_base_map[1])), - offset_remap=pass2_offset_remap, - map_key=pass2_video_map_key, - )]) - pass2.extend(self._video_codec_args_bitrate(config.video, bitrate)) - pass2.extend(["-pass", "2"]) - - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - pass2.extend(self._hdr_meta_args(config.video)) - self._append_stream_maps_and_attachments( - pass2, - config, - source_idx=source_idx, - subtitle_copy_input_indices=list(range(len(all_sources))), - sync_remap=sync_remap, - offset_remap=pass2_offset_remap, - subtitle_tracks_override=resolved_subtitle_tracks, - force_copy_subtitles_wildcard=(config.copy_subtitles and not subtitles_resolved), - ) - if strict_interleave: - self._append_strict_interleave_mux_flags(pass2) - self._append_container_metadata_args( - pass2, - config, - default_metadata_input_index=0, - default_chapter_input_index=0, - chapter_input_index=chapter_input_index, - tag_input_index=tag_input_index, - include_copy_video_stream_passthrough=True, + signals=signals, + plan=plan, ) - pass2.append(str(config.output)) - return [pass1, pass2], live_session, self._sync_cleanup_paths(sync_inputs) # ------------------------------------------------------------------ # Arguments par codec # ------------------------------------------------------------------ - def _video_codec_args(self, v: VideoEncodeSettings, bitrate_kbps: int) -> list[str]: - if v.quality_mode == QualityMode.CRF: - return self._video_codec_args_crf(v) - return self._video_codec_args_bitrate(v, bitrate_kbps) - - def _video_codec_args_crf(self, v: VideoEncodeSettings) -> list[str]: - match v.codec: - case "copy": - return ["-c:v", "copy"] - case "libx265": - args = ["-c:v", "libx265", "-crf", str(v.crf), "-preset", v.preset] - x265 = self._x265_params(v) - if x265: - args.extend(["-x265-params", x265]) - return args - case "libx264": - args = ["-c:v", "libx264", "-crf", str(v.crf), "-preset", v.preset] - return args - case "libsvtav1": - args = ["-c:v", "libsvtav1", "-crf", str(v.crf), "-preset", v.preset] - if v.extra_params: - args.extend(["-svtav1-params", v.extra_params]) - return args - case "hevc_nvenc": - return ["-c:v", "hevc_nvenc", "-rc:v", "vbr", "-cq:v", str(v.crf), "-preset:v", v.preset] - case "hevc_amf": - args = ["-c:v", "hevc_amf", "-rc", "cqp", "-qp_p", str(v.crf), "-qp_i", str(v.crf)] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "hevc_qsv": - args = ["-c:v", "hevc_qsv", "-global_quality", str(v.crf), - "-look_ahead", "1", "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "hevc_vaapi": - return ["-c:v", "hevc_vaapi", "-rc_mode", "CQP", "-qp", str(v.crf), - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case "h264_nvenc": - return ["-c:v", "h264_nvenc", "-rc:v", "vbr", "-cq:v", str(v.crf), "-preset:v", v.preset] - case "h264_amf": - args = ["-c:v", "h264_amf", "-rc", "cqp", "-qp_p", str(v.crf), "-qp_i", str(v.crf)] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "h264_qsv": - args = ["-c:v", "h264_qsv", "-global_quality", str(v.crf), "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "h264_vaapi": - return ["-c:v", "h264_vaapi", "-rc_mode", "CQP", "-qp", str(v.crf), - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case "av1_nvenc": - return ["-c:v", "av1_nvenc", "-rc:v", "vbr", "-cq:v", str(v.crf), "-preset:v", v.preset] - case "av1_amf": - args = ["-c:v", "av1_amf", "-rc", "cqp", "-qp_p", str(v.crf), "-qp_i", str(v.crf)] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "av1_qsv": - args = ["-c:v", "av1_qsv", "-global_quality", str(v.crf), "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "av1_vaapi": - return ["-c:v", "av1_vaapi", "-rc_mode", "CQP", "-qp", str(v.crf), - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case _: - return ["-c:v", v.codec, "-crf", str(v.crf)] - - def _video_codec_args_bitrate(self, v: VideoEncodeSettings, bitrate_kbps: int) -> list[str]: - match v.codec: - case "copy": - return ["-c:v", "copy"] - case "libx265": - args = ["-c:v", "libx265", "-b:v", f"{bitrate_kbps}k", "-preset", v.preset] - x265 = self._x265_params(v) - if x265: - args.extend(["-x265-params", x265]) - return args - case "libx264": - return ["-c:v", "libx264", "-b:v", f"{bitrate_kbps}k", "-preset", v.preset] - case "libsvtav1": - return ["-c:v", "libsvtav1", "-b:v", f"{bitrate_kbps}k", "-preset", v.preset] - case "hevc_nvenc": - return ["-c:v", "hevc_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", v.preset] - case "hevc_amf": - args = ["-c:v", "hevc_amf", "-b:v", f"{bitrate_kbps}k"] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "hevc_qsv": - args = ["-c:v", "hevc_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "hevc_vaapi": - return ["-c:v", "hevc_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case "h264_nvenc": - return ["-c:v", "h264_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", v.preset] - case "h264_amf": - args = ["-c:v", "h264_amf", "-b:v", f"{bitrate_kbps}k"] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "h264_qsv": - args = ["-c:v", "h264_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "h264_vaapi": - return ["-c:v", "h264_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case "av1_nvenc": - return ["-c:v", "av1_nvenc", "-b:v", f"{bitrate_kbps}k", "-preset:v", v.preset] - case "av1_amf": - args = ["-c:v", "av1_amf", "-b:v", f"{bitrate_kbps}k"] - if v.preset: - args.extend(["-quality", v.preset]) - return args - case "av1_qsv": - args = ["-c:v", "av1_qsv", "-b:v", f"{bitrate_kbps}k", "-async_depth", "4"] - if v.preset: - args.extend(["-preset", v.preset]) - return args - case "av1_vaapi": - return ["-c:v", "av1_vaapi", "-rc_mode", "VBR", "-b:v", f"{bitrate_kbps}k", - "-compression_level", (v.preset or "4"), - "-async_depth", "4"] - case _: - return ["-c:v", v.codec, "-b:v", f"{bitrate_kbps}k"] - - def _build_vf(self, v: VideoEncodeSettings) -> str: - """Filtre vidéo pour le tone mapping HDR→SDR (BT.2020 PQ → BT.709).""" - if not v.tonemap_to_sdr: - return "" - algo = v.tonemap_algorithm or "hable" - return ( - "zscale=transfer=linear:npl=100," - "format=gbrpf32le," - "zscale=primaries=bt709," - f"tonemap=tonemap={algo}:desat=0," - "zscale=transfer=bt709:matrix=bt709:range=tv," - "format=yuv420p" + def _codec_domain_callbacks(self) -> _EncodeCodecDomainCallbacks: + return _EncodeCodecDomainCallbacks( + platform=sys.platform, + vaapi_device=self._vaapi_device(), + qsv_device=self._qsv_device(), + amf_device=self._amf_device(), + nvenc_device=self._nvenc_device(), ) - def _build_encoder_vf(self, v: VideoEncodeSettings) -> str: - """ - Retourne la chaîne de filtres finale adaptée au codec de sortie. - - Encodeurs VAAPI : - - Sans tone-mapping : décodage déjà fait en VAAPI (frames sur GPU), - pas besoin de `hwupload` — l'encodeur `*_vaapi` consomme la - surface directement. - - Avec tone-mapping : zscale/tonemap tournent en CPU et produisent - des frames nv12 CPU, il faut alors remonter sur GPU via - `format=nv12,hwupload`. - """ - vf = self._build_vf(v) - if v.codec not in _VAAPI_CODECS: - return vf - if v.tonemap_to_sdr: - vaapi_upload = "format=nv12,hwupload" - return f"{vf},{vaapi_upload}" if vf else vaapi_upload - return vf - - def _hardware_input_args(self, v: VideoEncodeSettings) -> list[str]: - """Flags ffmpeg requis avant les entrées pour certains encodeurs matériels. - - - `-vaapi_device` : option globale pour les encodeurs `_vaapi`. - - `-hwaccel` : option d'entrée appliquée au prochain `-i` uniquement - (la source vidéo principale est toujours le premier input). Permet - de décoder en GPU et d'envoyer les frames directement à l'encodeur - sans passer par le CPU. - - Tone-mapping HDR→SDR : utilise le pipeline zscale CPU, donc on - désactive l'option `hwaccel_output_format` (le décodage hardware - reste possible mais ffmpeg fait un download automatique). - """ - args: list[str] = [] - tonemap = bool(v.tonemap_to_sdr) - - if v.codec in _VAAPI_CODECS: - vaapi_device = self._vaapi_device() - if vaapi_device: - args.extend(["-vaapi_device", vaapi_device]) - if not tonemap: - args.extend([ - "-hwaccel", "vaapi", - "-hwaccel_output_format", "vaapi", - ]) - return args - - if tonemap: - return args - - if v.codec in {"hevc_nvenc", "h264_nvenc", "av1_nvenc"}: - args.extend([ - "-hwaccel", "cuda", - "-hwaccel_output_format", "cuda", - ]) - elif v.codec in {"hevc_qsv", "h264_qsv", "av1_qsv"}: - args.extend([ - "-hwaccel", "qsv", - "-hwaccel_output_format", "qsv", - ]) - elif v.codec in {"hevc_amf", "h264_amf", "av1_amf"}: - if sys.platform == "win32": - args.extend([ - "-hwaccel", "d3d11va", - "-hwaccel_output_format", "d3d11", - ]) + @staticmethod + def _is_h264_codec(codec: str) -> bool: + return is_h264_video_codec(codec) - return args + def _nvenc_device_args(self) -> list[str]: + if sys.platform != "win32": + return [] + nvenc_device = self._nvenc_device() + if not nvenc_device: + return [] + return ["-gpu", nvenc_device] @staticmethod def _vaapi_device() -> str | None: - """Retourne le premier render node VAAPI disponible, ou None.""" - for i in range(8): - node = Path(f"/dev/dri/renderD{128 + i}") - if node.exists(): - return str(node) - return None + """Retourne le render node Linux ciblé pour VAAPI, ou None.""" + return select_linux_hwaccel_device("hevc_vaapi") - def _x265_params(self, v: VideoEncodeSettings) -> str: - """ - Construit la valeur de -x265-params en fusionnant extra_params et les - métadonnées HDR10 statiques (master-display, max-cll) si inject_hdr_meta est actif. + def _qsv_device(self) -> str | None: + """Retourne le device ciblé pour QSV selon l'OS, ou None.""" + if sys.platform == "win32": + return select_windows_hwaccel_device("hevc_qsv", ffmpeg_bin=self._ffmpeg) + return select_linux_hwaccel_device("hevc_qsv") - Retourne une chaîne vide si aucun paramètre n'est à passer. - """ - parts: list[str] = [] - if v.extra_params: - parts.append(v.extra_params.strip(":")) - if v.inject_hdr_meta and not v.tonemap_to_sdr: - if v.master_display: - parts.append(f"master-display={v.master_display}") - if v.max_cll: - parts.append(f"max-cll={v.max_cll}") - return ":".join(p for p in parts if p) - - def _hdr_meta_args(self, v: VideoEncodeSettings) -> list[str]: - """ - Flags de couleur container-level + métadonnées SEI selon le codec. + def _amf_device(self) -> str | None: + if sys.platform != "win32": + return None + return select_windows_hwaccel_device("hevc_amf", ffmpeg_bin=self._ffmpeg) - Couleur (valides pour tout codec HEVC/AV1 re-encodé) : - -color_primaries bt2020 -color_trc smpte2084 -colorspace bt2020nc + def _nvenc_device(self) -> str | None: + if sys.platform != "win32": + return None + return select_windows_hwaccel_device("hevc_nvenc", ffmpeg_bin=self._ffmpeg) - master_display / max_cll par codec : - libx265 → injectés via -x265-params (dans _video_codec_args_crf/bitrate) - hevc_nvenc → options privées du codec (-master_display / -max_cll) - hevc_amf, hevc_qsv, libsvtav1 → pas de mécanisme standardisé → ignorés - copy, h264_*, libx264 → couleur non applicable / pas de HDR10 → rien - """ - if v.codec in ("copy", "libx264", "h264_nvenc", "h264_amf", "h264_qsv"): - return [] - args = ["-color_primaries", "bt2020", "-color_trc", "smpte2084", "-colorspace", "bt2020nc"] - if v.codec == "hevc_nvenc": - if v.master_display: - args.extend(["-master_display", v.master_display]) - if v.max_cll: - args.extend(["-max_cll", v.max_cll]) - # libx265 : master_display/max_cll déjà fusionnés dans -x265-params - # hevc_amf, hevc_qsv, libsvtav1 : couleur only, SEI non géré - return args + def _video_resource_policy(self) -> _VideoPreparationResourcePolicy: + effective_threads = self._ffmpeg_threads if self._ffmpeg_threads > 0 else _default_ffmpeg_thread_count() + return _VideoPreparationResourcePolicy( + vaapi_device=self._vaapi_device(), + ffmpeg_threads=effective_threads, + ) - def _audio_codec_args(self, out_idx: int, a: AudioTrackSettings) -> list[str]: - args: list[str] = [] - needs_downmix_51 = self._needs_ac3_51_downmix(a) - bitrate_kbps = normalize_audio_bitrate_kbps( - a.codec, - a.bitrate_kbps, - a.input_channels, - None, - a.input_channel_layout, - ) - match a.codec: - case "copy": - args.extend([f"-c:a:{out_idx}", "copy"]) - # truehd_core est un bitstream filter de passthrough. - # Sur une piste réencodée, ffmpeg l'applique à la sortie encodée - # (ex. eac3) et échoue car le codec n'est plus TrueHD. - if a.extract_truehd_core: - args.extend([f"-bsf:a:{out_idx}", "truehd_core"]) - case "aac": - args.extend([f"-c:a:{out_idx}", "aac", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) - case "ac3": - args.extend([f"-c:a:{out_idx}", "ac3", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) - case "eac3": - args.extend([f"-c:a:{out_idx}", "eac3", f"-b:a:{out_idx}", f"{bitrate_kbps}k"]) - case "flac": - args.extend([f"-c:a:{out_idx}", "flac"]) - case _: - args.extend([f"-c:a:{out_idx}", a.codec]) - if needs_downmix_51: - args.extend([f"-ac:a:{out_idx}", "6", f"-channel_layout:a:{out_idx}", "5.1"]) - return args + def _video_encode_resource_key(self, video: VideoEncodeSettings) -> str: + return self._video_resource_policy().resource_key(video) - @staticmethod - def _needs_ac3_51_downmix(a: AudioTrackSettings) -> bool: - """ - Retourne True si la piste source doit être ramenée en 5.1 pour AC-3/E-AC-3. + def _parallel_video_min_available_ram_bytes(self) -> int: + total_ram = EncodeWorkflow._total_ram_bytes() + if total_ram <= 0 or self._ram_buffer_threshold_pct <= 0: + return 0 + return int(total_ram * self._ram_buffer_threshold_pct / 100) - Règle métier : - - si codec cible ∈ {ac3, eac3} - - et source 7.1 (8 canaux ou layout contenant "7.1") - """ - if a.codec not in {"ac3", "eac3"}: - return False - if (a.input_channels or 0) >= 8: - return True - layout = (a.input_channel_layout or "").lower() - return "7.1" in layout + def _video_prep_estimated_ram_bytes(self, spec: _VideoTrackPrepSpec) -> int: + source_size = 0 + try: + if spec.source.exists(): + source_size = max(0, spec.source.stat().st_size) + except OSError: + source_size = 0 + return self._video_resource_policy().estimated_ram_bytes( + spec.video, + source_size=source_size, + ) # ------------------------------------------------------------------ # Commandes spécialisées pour _run_with_metadata_inject # ------------------------------------------------------------------ - def _build_video_only_cmd(self, config: EncodeConfig, output_hevc: Path) -> list[str]: - """ - ffmpeg : vidéo seule, sortie HEVC brut (-f hevc, sans container). - Pas d'audio ni de subs. Utilisé pour encoder directement vers un - flux HEVC injectable, sans passer par un MKV intermédiaire. - """ + @staticmethod + def _offset_input_args(offset_ms: int) -> list[str]: + if offset_ms == 0: + return [] + if offset_ms > 0: + return ["-itsoffset", f"{offset_ms / 1000.0:.3f}"] + return ["-ss", f"{abs(offset_ms) / 1000.0:.3f}"] + + def _size_to_bitrate_kbps_for_video(self, config: EncodeConfig, video: VideoEncodeSettings) -> int: + duration = config.duration_s or 3600.0 + total_bits = video.target_size_mb * 8 * 1024 * 1024 + video_bits = max(total_bits, int(duration * 500_000)) + return max(500, int(video_bits / duration / 1000)) + + @staticmethod + def _two_pass_log_prefix(work_dir: Path, token: str) -> Path: + safe_token = re.sub(r"[^A-Za-z0-9_.-]+", "_", str(token)).strip("._") + if not safe_token: + safe_token = "video" + return work_dir / f"ffmpeg2pass-{safe_token}" + + def _build_video_track_base_cmd( + self, + *, + video: VideoEncodeSettings, + source: Path, + stream_index: int, + offset_ms: int = 0, + thread_count: int | None = None, + ) -> list[str]: cmd = [self._ffmpeg, "-hide_banner", "-y"] cmd.extend(self._ffmpeg_progress_args()) - cmd.extend(self._hardware_input_args(config.video)) - cmd.extend(["-i", str(self._video_source_path(config))]) - vf = self._build_encoder_vf(config.video) + cmd.extend(self._offset_input_args(offset_ms)) + cmd.extend(_hardware_input_args_domain(video, callbacks=self._codec_domain_callbacks())) + cmd.extend(["-i", str(source)]) + vf = _build_encoder_vf_domain(video, callbacks=self._codec_domain_callbacks()) if vf: cmd.extend(["-vf", vf]) - cmd.extend(self._ffmpeg_thread_args()) - cmd.extend(["-map", f"0:{self._video_stream_index(config)}"]) - cmd.extend(self._video_codec_args(config.video, config.video.bitrate_kbps)) - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - cmd.extend(self._hdr_meta_args(config.video)) + cmd.extend(self._ffmpeg_thread_args(thread_count)) + cmd.extend(["-map", f"0:{int(stream_index)}"]) + return cmd + + def _append_video_codec_and_hdr_args( + self, + cmd: list[str], + video: VideoEncodeSettings, + *, + bitrate_kbps: int | None = None, + include_hdr_meta: bool = True, + ) -> None: + callbacks = self._codec_domain_callbacks() + if bitrate_kbps is None: + cmd.extend(_video_codec_args_domain(video, video.bitrate_kbps, callbacks=callbacks)) + else: + cmd.extend(_video_codec_args_bitrate_domain(video, bitrate_kbps, callbacks=callbacks)) + if include_hdr_meta and video.inject_hdr_meta and not video.tonemap_to_sdr: + cmd.extend(_hdr_meta_args_domain(video)) + + def _build_multi_video_track_encode_commands( + self, + config: EncodeConfig, + video: VideoEncodeSettings, + source: Path, + output_path: Path, + *, + offset_ms: int = 0, + passlog_prefix: Path | None = None, + thread_count: int | None = None, + for_preview: bool = False, + ) -> list[list[str]]: + _ = for_preview + stream_index = self._video_stream_from_settings(video) + + if video.quality_mode == QualityMode.SIZE: + bitrate = self._size_to_bitrate_kbps_for_video(config, video) + pass1 = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(pass1, video, bitrate_kbps=bitrate, include_hdr_meta=False) + if passlog_prefix is not None: + pass1.extend(["-passlogfile", str(passlog_prefix)]) + pass1.extend(["-pass", "1", "-an", "-sn", "-dn", "-f", "null", os.devnull]) + + pass2 = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(pass2, video, bitrate_kbps=bitrate) + if passlog_prefix is not None: + pass2.extend(["-passlogfile", str(passlog_prefix)]) + pass2.extend(["-pass", "2"]) + pass2.extend(["-an", "-sn", "-dn", str(output_path)]) + return [pass1, pass2] + + cmd = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(cmd, video) + cmd.extend(["-an", "-sn", "-dn", str(output_path)]) + return [cmd] + + def _build_video_only_cmd(self, config: EncodeConfig, output_hevc: Path) -> list[str]: + """Construit la commande ffmpeg vidéo-seule vers un flux HEVC brut.""" + video = self._primary_video_settings(config) + return self._build_video_only_cmd_for_track( + config, + video, + self._video_source_path(config), + output_hevc, + ) + + def _build_video_only_cmd_for_track( + self, + config: EncodeConfig, + video: VideoEncodeSettings, + source: Path, + output_hevc: Path, + *, + offset_ms: int = 0, + thread_count: int | None = None, + ) -> list[str]: + cmd = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=self._video_stream_from_settings(video), + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(cmd, video) cmd.extend(["-an", "-f", "hevc", str(output_hevc)]) return cmd def _build_video_only_two_pass( self, config: EncodeConfig, output_hevc: Path ) -> list[list[str]]: - """ - Deux passes ffmpeg : vidéo seule, sortie HEVC brut. - Utilisé en mode SIZE pour l'étape vidéo de _run_with_metadata_inject. - """ - bitrate = self._size_to_bitrate_kbps(config) - vf = self._build_encoder_vf(config.video) - - def _base() -> list[str]: - c = [self._ffmpeg, "-hide_banner", "-y"] - c.extend(self._ffmpeg_progress_args()) - c.extend(self._hardware_input_args(config.video)) - c.extend(["-i", str(self._video_source_path(config))]) - if vf: - c.extend(["-vf", vf]) - c.extend(self._ffmpeg_thread_args()) - c.extend(["-map", f"0:{self._video_stream_index(config)}"]) - c.extend(self._video_codec_args_bitrate(config.video, bitrate)) - return c - - pass1 = _base() + ["-pass", "1", "-an", "-f", "null", os.devnull] - pass2 = _base() + ["-pass", "2"] - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - pass2.extend(self._hdr_meta_args(config.video)) + """Construit les 2 passes ffmpeg vidéo-seule vers un flux HEVC brut.""" + video = self._primary_video_settings(config) + return self._build_video_only_two_pass_for_track( + config, + video, + self._video_source_path(config), + output_hevc, + bitrate_kbps=self._size_to_bitrate_kbps(config), + ) + + def _build_video_only_two_pass_for_track( + self, + config: EncodeConfig, + video: VideoEncodeSettings, + source: Path, + output_hevc: Path, + *, + offset_ms: int = 0, + passlog_prefix: Path | None = None, + thread_count: int | None = None, + bitrate_kbps: int | None = None, + ) -> list[list[str]]: + bitrate = bitrate_kbps if bitrate_kbps is not None else self._size_to_bitrate_kbps_for_video(config, video) + stream_index = self._video_stream_from_settings(video) + pass1 = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(pass1, video, bitrate_kbps=bitrate, include_hdr_meta=False) + if passlog_prefix is not None: + pass1.extend(["-passlogfile", str(passlog_prefix)]) + pass1.extend(["-pass", "1", "-an", "-f", "null", os.devnull]) + pass2 = self._build_video_track_base_cmd( + video=video, + source=source, + stream_index=stream_index, + offset_ms=offset_ms, + thread_count=thread_count, + ) + self._append_video_codec_and_hdr_args(pass2, video, bitrate_kbps=bitrate) + if passlog_prefix is not None: + pass2.extend(["-passlogfile", str(passlog_prefix)]) + pass2.extend(["-pass", "2"]) pass2.extend(["-an", "-f", "hevc", str(output_hevc)]) return [pass1, pass2] def _size_to_bitrate_kbps(self, config: EncodeConfig) -> int: + video = self._primary_video_settings(config) duration = config.duration_s or 3600.0 - total_bits = config.video.target_size_mb * 8 * 1024 * 1024 + total_bits = video.target_size_mb * 8 * 1024 * 1024 audio_bps = sum( normalize_audio_bitrate_kbps( a.codec, @@ -2145,217 +1709,58 @@ def _size_to_bitrate_kbps(self, config: EncodeConfig) -> int: # Helpers RAM / buffer — cross-platform (Linux · macOS · Windows) # ------------------------------------------------------------------ - @staticmethod - def _total_ram_bytes() -> int: - """ - Retourne la RAM physique totale en octets. - Linux : /proc/meminfo · macOS : sysctl hw.memsize · Windows : ctypes GlobalMemoryStatusEx. - Retourne 0 si la valeur ne peut pas être lue. - """ - try: - if sys.platform == "linux": - text = Path("/proc/meminfo").read_text(encoding="ascii") - m = re.search(r"MemTotal:\s+(\d+)\s+kB", text) - return int(m.group(1)) * 1024 if m else 0 - if sys.platform == "darwin": - r = subprocess.run( - ["sysctl", "-n", "hw.memsize"], - capture_output=True, check=False, timeout=5, **subprocess_text_kwargs(), - ) - v = r.stdout.strip() - return int(v) if r.returncode == 0 and v.isdigit() else 0 - if sys.platform == "win32": - return EncodeWorkflow._win_mem_status().ullTotalPhys - except Exception: - pass - return 0 - - @staticmethod - def _available_ram_bytes() -> int: - """ - Retourne la RAM disponible en octets (MemAvailable sur Linux, équivalent sur macOS/Windows). - Retourne 0 si non déterminable. - """ - try: - if sys.platform == "linux": - text = Path("/proc/meminfo").read_text(encoding="ascii") - m = re.search(r"MemAvailable:\s+(\d+)\s+kB", text) - return int(m.group(1)) * 1024 if m else 0 - if sys.platform == "darwin": - return EncodeWorkflow._macos_available_ram() - if sys.platform == "win32": - return EncodeWorkflow._win_mem_status().ullAvailPhys - except Exception: - pass - return 0 - - @staticmethod - def _macos_available_ram() -> int: - """RAM disponible sur macOS via vm_stat (free + inactive + speculative + purgeable).""" - r = subprocess.run( - ["vm_stat"], capture_output=True, check=False, timeout=5, **subprocess_text_kwargs() - ) - if r.returncode != 0: - return 0 - page_m = re.search(r"page size of (\d+) bytes", r.stdout) - page = int(page_m.group(1)) if page_m else 4096 - pages = 0 - for field in ("Pages free", "Pages inactive", "Pages speculative", "Pages purgeable"): - m = re.search(rf"{re.escape(field)}:\s*(\d+)", r.stdout) - if m: - pages += int(m.group(1)) - return pages * page - - @staticmethod - def _win_mem_status(): - """Retourne une structure MEMORYSTATUSEX remplie (Windows uniquement).""" - class _MEMSTATEX(ctypes.Structure): - _fields_ = [ - ("dwLength", ctypes.c_ulong), - ("dwMemoryLoad", ctypes.c_ulong), - ("ullTotalPhys", ctypes.c_ulonglong), - ("ullAvailPhys", ctypes.c_ulonglong), - ("ullTotalPageFile", ctypes.c_ulonglong), - ("ullAvailPageFile", ctypes.c_ulonglong), - ("ullTotalVirtual", ctypes.c_ulonglong), - ("ullAvailVirtual", ctypes.c_ulonglong), - ("ullAvailExtendedVirtual", ctypes.c_ulonglong), - ] - stat = _MEMSTATEX() - stat.dwLength = ctypes.sizeof(stat) - ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) # type: ignore[attr-defined] - return stat - - @staticmethod - def _ram_buffer_dir() -> Path | None: - """ - Retourne le répertoire RAM-backed disponible sur cette plateforme, ou None. - - · Linux : /dev/shm (tmpfs kernel, taille = RAM physique) - · macOS : /dev/shm (POSIX shm namespace, writable sur macOS ≥ 10.15) - · Windows: aucun équivalent standard → None (buffer sur disque uniquement) - """ - if sys.platform in ("linux", "darwin"): - shm = Path("/dev/shm") - if shm.is_dir() and os.access(shm, os.W_OK): - return shm - return None + _total_ram_bytes = staticmethod(_ram_buffer_module.total_ram_bytes) + _available_ram_bytes = staticmethod(_ram_buffer_module.available_ram_bytes) + _macos_available_ram = staticmethod(_ram_buffer_module.macos_available_ram) + _ram_buffer_dir = staticmethod(_ram_buffer_module.ram_buffer_dir) def _shm_path(self, tmp: Path, name: str, file_size: int) -> Path: - """ - Retourne un chemin dans le répertoire RAM si les conditions sont réunies, - sinon un chemin dans tmp (disque). - - Conditions (toutes requises) : - 1. ram_buffer_enabled = True (configuration) - 2. Un répertoire RAM existe sur cette plateforme (_ram_buffer_dir()) - 3. RAM disponible après chargement ≥ threshold_pct % de la RAM totale - formule : available_before - file_size ≥ total_ram × threshold_pct / 100 - - La décision est réévaluée à chaque appel (RAM dynamique). - """ - if not self._ram_buffer_enabled: - return tmp / name - ram_dir = EncodeWorkflow._ram_buffer_dir() - if ram_dir is None: - return tmp / name - total = EncodeWorkflow._total_ram_bytes() - available = EncodeWorkflow._available_ram_bytes() - if total <= 0 or available <= 0: - return tmp / name - min_free_after = int(total * self._ram_buffer_threshold_pct / 100) - if available - file_size >= min_free_after: - return ram_dir / name - return tmp / name + """Wrapper de `runtime.ram_buffer.shm_path` lié à la config de l'instance.""" + return _ram_buffer_module.shm_path( + tmp, + name, + file_size, + enabled=self._ram_buffer_enabled, + threshold_pct=self._ram_buffer_threshold_pct, + ) # ------------------------------------------------------------------ # Aperçu lisible # ------------------------------------------------------------------ def preview_command(self, config: EncodeConfig) -> str: - cmd = self.build_command_single(config) - if not cmd: - return "" - prefix = ( - "# Mode taille cible : passe 1 omise de cet aperçu\n" - if self._uses_two_pass(config) - else "" - ) - lines = [cmd[0]] - i = 1 - while i < len(cmd): - p = cmd[i] - if p.startswith("-") and i + 1 < len(cmd) and not cmd[i + 1].startswith("-"): - lines.append(f" {p} {cmd[i + 1]}") - i += 2 - else: - lines.append(f" {p}") - i += 1 - return prefix + " \\\n".join(lines) + return _format_preview_selection_plan( + _build_encode_command_selection_plan( + config, + plan=self._build_encode_plan(config), + is_multi_video=self._is_multi_video, + uses_two_pass=self._uses_two_pass, + build_multi_video_preview=self._build_multi_video_command_preview, + build_two_pass=self._build_two_pass, + build_single_pass=self._build_single_pass, + ) + ) # ------------------------------------------------------------------ # Validation # ------------------------------------------------------------------ def validate(self, config: EncodeConfig) -> list[str]: - errors: list[str] = [] - if not config.source.is_file(): - errors.append(f"Fichier source introuvable : {config.source}") - output_dir = config.output.parent - if not output_dir.exists(): - errors.append(f"Dossier de sortie inexistant : {output_dir}") - elif not self._is_dir_writable(output_dir): - errors.append( - "Dossier de sortie non inscriptible : " - f"{output_dir} (vérifiez les protections Windows sur les dossiers Bibliothèques)." - ) - if config.source == config.output: - errors.append("Le fichier de sortie doit être différent du fichier source.") - if self._uses_two_pass(config) and not (config.duration_s or 0) > 0: - errors.append("Durée du fichier source inconnue — mode taille cible impossible.") - if config.video.inject_hdr_meta and not config.video.tonemap_to_sdr: - if config.video.master_display and not re.match( - r"^G\(\d+,\d+\)B\(\d+,\d+\)R\(\d+,\d+\)WP\(\d+,\d+\)L\(\d+,\d+\)$", - config.video.master_display.strip(), - ): - errors.append( - "Format master_display invalide. " - "Attendu : G(x,y)B(x,y)R(x,y)WP(x,y)L(max,min)" - ) - if config.video.max_cll and not re.match(r"^\d+,\d+$", config.video.max_cll.strip()): - errors.append("Format MaxCLL invalide. Attendu : MaxCLL,MaxFALL ex. 1000,400") - - for raw in config.track_time_offsets: - if not isinstance(raw, TrackTimeOffset): - continue - track_type = str(raw.track_type or "").strip().lower() - if track_type == "video" and int(raw.offset_ms) < 0: - errors.append( - "Décalage vidéo négatif interdit : " - f"source={Path(raw.source_path)}, stream={int(raw.stream_index)}, " - f"offset={int(raw.offset_ms)} ms" - ) - return errors - - @staticmethod - def _is_dir_writable(path: Path) -> bool: - """ - Vérifie qu'un fichier temporaire peut être créé dans ``path``. - - Sous Windows, certains dossiers protégés (Documents/Vidéos, etc.) peuvent - exister mais refuser la création de nouveaux fichiers. - """ - try: - with tempfile.NamedTemporaryFile( - mode="wb", - dir=path, - prefix="mrecode_write_probe_", - delete=True, - ): - pass - return True - except OSError: - return False + plan = _build_encode_plan_data( + config, + resolve_subtitle_tracks=lambda _config, _all_sources: ([], False), + resolve_global_tags=self._resolve_global_tags, + video_tracks=self._video_tracks, + video_source_from_settings=self._video_source_from_settings, + video_source_path=self._video_source_path, + video_stream_index=self._video_stream_index, + video_map_key=self._video_map_key, + ) + return _validate_encode_config_plan( + config, + planned_video_tracks=plan.video_tracks, + dir_writable=_is_dir_writable_plan, + ) # ------------------------------------------------------------------ # Exécution @@ -2513,7 +1918,9 @@ def _run_with_preparation( self._log_step(3, "Normalisation des options HDR dynamiques") self._check_cancelled(prep_signals) - if not self._is_video_passthrough(prepared_config): + if self._is_multi_video(prepared_config): + prepared_config = self._normalize_dynamic_hdr_multi(prepared_config) + elif not self._is_video_passthrough(prepared_config): prepared_config = self._normalize_dynamic_hdr_config(prepared_config) elif self._wants_dynamic_hdr_copy(prepared_config): # Codec COPY : les NAL units DoVi/HDR10+ sont déjà dans le bitstream source. @@ -2524,6 +1931,33 @@ def _run_with_preparation( "métadonnées préservées par passthrough ffmpeg.", ) + self._check_cancelled(prep_signals) + if self._is_multi_video(prepared_config): + self._log_step(4, "Routage du workflow (pipeline multi-pistes vidéo)") + plan = self._build_encode_plan(prepared_config) + if prep_signals is not None: + # En mode validate=False, le pipeline multi-pistes s'exécute + # inline et peut émettre finished/failed avant le retour. + # Les hooks doivent donc être branchés avant l'exécution. + self._bind_output_hooks( + prep_signals, + output=prepared_config.output, + cleanup_paths=cleanup_paths, + ) + signals = self._run_multi_video_pipeline( + prepared_config, + cleanup_paths, + prep_signals=prep_signals, + plan=plan, + ) + if prep_signals is None or signals is not prep_signals: + self._bind_output_hooks( + signals, + output=prepared_config.output, + cleanup_paths=cleanup_paths, + ) + return signals + self._check_cancelled(prep_signals) needs_inject = self._needs_metadata_inject(prepared_config) self._log_step( @@ -2540,12 +1974,38 @@ def _run_with_preparation( self._ensure_inject_storage_available(prepared_config) self._check_cancelled(prep_signals) + if prep_signals is not None: + # Chemin validate=False: certains sous-workflows peuvent exécuter + # inline et émettre des signaux terminaux avant retour. + self._bind_output_hooks( + prep_signals, + output=prepared_config.output, + cleanup_paths=cleanup_paths, + ) + + plan = self._build_encode_plan(prepared_config) signals = ( - self._run_with_metadata_inject(prepared_config) + self._run_with_metadata_inject( + prepared_config, + prep_signals=prep_signals, + plan=plan, + ) if needs_inject - else self._run_direct_output(prepared_config, cleanup_paths, prep_signals=prep_signals) + else self._run_direct_output( + prepared_config, + cleanup_paths, + prep_signals=prep_signals, + plan=plan, + ) ) - self._bind_temp_cleanup(signals, cleanup_paths) + if prep_signals is None or signals is not prep_signals: + self._bind_output_hooks( + signals, + output=prepared_config.output, + cleanup_paths=cleanup_paths, + include_segment_patch=False, + include_nfo=False, + ) return signals def _run_direct_output( @@ -2554,268 +2014,187 @@ def _run_direct_output( cleanup_paths: list[Path], *, prep_signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, ) -> TaskSignals: - self._check_cancelled(prep_signals) - self._log_step(5, "Construction de la commande ffmpeg (sortie directe)") cwd = config.work_dir or config.source.parent - - # Multi-source: le pré-scan ffprobe peut être coûteux. - # On déporte build+exécution dans un worker dédié pour éviter de bloquer l'UI. - if len(self._collect_all_sources(config)) > 1: - signals = self._run_direct_output_multisource_async( - config=config, - cleanup_paths=cleanup_paths, - cwd=cwd, - prep_signals=prep_signals, - ) - self._bind_matroska_segment_muxing_patch(signals, config.output) - self._bind_nfo_write(signals, config.output) - return signals - - chapter_dir: Path | None = None - if config.chapter_overrides: - chapter_dir = Path( - tempfile.mkdtemp( - prefix="enc_chapters_", - dir=str(config.work_dir) if config.work_dir else None, - ) + plan = plan or self._build_encode_plan(config) + signals = _DirectOutputRunner( + _DirectOutputRunnerCallbacks( + check_cancelled=self._check_cancelled, + log_step=self._log_step, + log_info=lambda message: self.log_message.emit("INFO", message), + uses_two_pass=self._uses_two_pass, + build_encode_plan=self._build_encode_plan, + build_runtime_two_pass_with_sync=self._build_runtime_two_pass_with_sync, + build_runtime_single_pass_with_sync=self._build_runtime_single_pass_with_sync, + run_two_pass=self._run_two_pass, + run_cmd=lambda cmd, cwd, label, progress_cb, signals: self._runner._run_cmd( + cmd, + signals=signals, + cwd=cwd, + label=label, + progress_cb=progress_cb, + ), + run_tool=lambda cmd, cwd, label: self._runner.run(cmd, cwd=cwd, label=label), + bind_live_sync_cleanup=self._bind_live_sync_cleanup, + cleanup_two_pass_logs=self._cleanup_two_pass_logs, ) - cleanup_paths.append(chapter_dir) - - self._check_cancelled(prep_signals) - if self._uses_two_pass(config): - self._log_step(6, "Préparation sync/remap + commandes ffmpeg (2 passes)") - cmds: list[list[str]] - live_sync_session: LiveSyncSession | None = None - sync_cleanup_paths: list[Path] = [] - try: - cmds, live_sync_session, sync_cleanup_paths = self._build_runtime_two_pass_with_sync( - config, - chapter_materialize_dir=chapter_dir, - signals=prep_signals, - ) - cleanup_paths.extend(sync_cleanup_paths) - self._check_cancelled(prep_signals) - self._log_step(7, "Exécution ffmpeg en 2 passes (sortie directe)") - signals = self._run_two_pass(cmds, cwd=cwd, signals=prep_signals) - except Exception: - if live_sync_session is not None: - live_sync_session.close() - raise - self._bind_live_sync_cleanup(signals, live_sync_session) + ).run( + config=config, + cleanup_paths=cleanup_paths, + cwd=cwd, + prep_signals=prep_signals, + plan=plan, + ) + if prep_signals is None: self._bind_matroska_segment_muxing_patch(signals, config.output) self._bind_nfo_write(signals, config.output) - return signals - - self._log_step(6, "Préparation sync/remap + commande ffmpeg (single pass)") - cmd: list[str] - live_sync_session = None - sync_cleanup_paths = [] - try: - cmd, live_sync_session, sync_cleanup_paths = self._build_runtime_single_pass_with_sync( - config, - chapter_materialize_dir=chapter_dir, - signals=prep_signals, - ) - cleanup_paths.extend(sync_cleanup_paths) - self._check_cancelled(prep_signals) - self._log_step(7, "Exécution ffmpeg en single pass (sortie directe)") - if prep_signals is not None: - output = self._runner._run_cmd( - cmd, - cwd=cwd, - label="ffmpeg", - progress_cb=lambda line: prep_signals.progress.emit(line), - signals=prep_signals, - ) - prep_signals.finished.emit(output) - signals = prep_signals - else: - signals = self._runner.run(cmd, cwd=cwd, label="ffmpeg") - except Exception: - if live_sync_session is not None: - live_sync_session.close() - raise - self._bind_live_sync_cleanup(signals, live_sync_session) - self._bind_matroska_segment_muxing_patch(signals, config.output) - self._bind_nfo_write(signals, config.output) return signals def _run_direct_output_multisource_async( self, *, config: EncodeConfig, - cleanup_paths: list[Path], - cwd: Path, - prep_signals: TaskSignals | None = None, - ) -> TaskSignals: - signals = prep_signals or TaskSignals() - executor = ThreadPoolExecutor(max_workers=1) - - def _task() -> None: - chapter_dir: Path | None = None - live_sync_session: LiveSyncSession | None = None - sync_cleanup_paths: list[Path] = [] - is_two_pass = self._uses_two_pass(config) - - try: - self._check_cancelled(signals) - if config.chapter_overrides: - chapter_dir = Path( - tempfile.mkdtemp( - prefix="enc_chapters_", - dir=str(config.work_dir) if config.work_dir else None, - ) - ) - cleanup_paths.append(chapter_dir) - - if is_two_pass: - self._log_step(6, "Préparation sync/remap + commandes ffmpeg (2 passes)") - cmds, live_sync_session, sync_cleanup_paths = self._build_runtime_two_pass_with_sync( - config, - chapter_materialize_dir=chapter_dir, - signals=signals, - ) - cleanup_paths.extend(sync_cleanup_paths) - if live_sync_session is not None: - for proc in live_sync_session.processes: - signals._register_proc(proc) - self._check_cancelled(signals) - self._log_step(7, "Exécution ffmpeg en 2 passes (sortie directe)") - - self.log_message.emit("INFO", "Passe 1/2 (analyse)…") - self._runner._run_cmd( - cmds[0], - cwd=cwd, - label="ffmpeg-pass1", - progress_cb=lambda line: signals.progress.emit(line), - signals=signals, - ) - self._check_cancelled(signals) - self.log_message.emit("INFO", "Passe 2/2 (encodage)…") - output = self._runner._run_cmd( - cmds[1], - cwd=cwd, - label="ffmpeg-pass2", - progress_cb=lambda line: signals.progress.emit(line), - signals=signals, - ) - signals.finished.emit(output) - else: - self._log_step(6, "Préparation sync/remap + commande ffmpeg (single pass)") - cmd, live_sync_session, sync_cleanup_paths = self._build_runtime_single_pass_with_sync( - config, - chapter_materialize_dir=chapter_dir, - signals=signals, - ) - cleanup_paths.extend(sync_cleanup_paths) - if live_sync_session is not None: - for proc in live_sync_session.processes: - signals._register_proc(proc) - self._check_cancelled(signals) - self._log_step(7, "Exécution ffmpeg en single pass (sortie directe)") - output = self._runner._run_cmd( - cmd, - cwd=cwd, - label="ffmpeg", - progress_cb=lambda line: signals.progress.emit(line), - signals=signals, - ) - signals.finished.emit(output) - except TaskCancelledError: - signals.cancelled.emit() - except Exception as exc: - signals.failed.emit(str(exc), exc) - finally: - if live_sync_session is not None: - for proc in live_sync_session.processes: - signals._unregister_proc(proc) - live_sync_session.close() - if is_two_pass: - self._cleanup_two_pass_logs(cwd) - executor.shutdown(wait=False) + cleanup_paths: list[Path], + cwd: Path, + prep_signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, + ) -> TaskSignals: + return _DirectOutputRunner( + _DirectOutputRunnerCallbacks( + check_cancelled=self._check_cancelled, + log_step=self._log_step, + log_info=lambda message: self.log_message.emit("INFO", message), + uses_two_pass=self._uses_two_pass, + build_encode_plan=self._build_encode_plan, + build_runtime_two_pass_with_sync=self._build_runtime_two_pass_with_sync, + build_runtime_single_pass_with_sync=self._build_runtime_single_pass_with_sync, + run_two_pass=self._run_two_pass, + run_cmd=lambda cmd, cwd, label, progress_cb, signals: self._runner._run_cmd( + cmd, + signals=signals, + cwd=cwd, + label=label, + progress_cb=progress_cb, + ), + run_tool=lambda cmd, cwd, label: self._runner.run(cmd, cwd=cwd, label=label), + bind_live_sync_cleanup=self._bind_live_sync_cleanup, + cleanup_two_pass_logs=self._cleanup_two_pass_logs, + ) + ).run_multisource_async( + config=config, + cleanup_paths=cleanup_paths, + cwd=cwd, + prep_signals=prep_signals, + plan=plan, + ) - executor.submit(_task) - return signals + def _run_multi_video_pipeline( + self, + config: EncodeConfig, + cleanup_paths: list[Path], + *, + prep_signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, + ) -> TaskSignals: + return _MultiVideoPipelineRunner( + _MultiVideoPipelineRunnerCallbacks( + ffmpeg_bin=self._ffmpeg, + bins=dict(self._bins), + max_parallel_video_encodes=_normalize_max_parallel_video_encodes(self._max_parallel_video_encodes), + check_cancelled=self._check_cancelled, + build_encode_plan=self._build_encode_plan, + video_tracks=self._video_tracks, + video_source_from_settings=self._video_source_from_settings, + video_stream_from_settings=self._video_stream_from_settings, + track_offset_ms=lambda lookup, **kwargs: _track_offset_ms_plan(lookup, **kwargs), + offset_input_args=self._offset_input_args, + parallel_video_worker_thread_count=self._parallel_video_worker_thread_count, + video_encode_resource_key=self._video_encode_resource_key, + parallel_video_min_available_ram_bytes=self._parallel_video_min_available_ram_bytes, + video_prep_estimated_ram_bytes=self._video_prep_estimated_ram_bytes, + format_bytes=_format_bytes_runtime, + available_ram_bytes=EncodeWorkflow._available_ram_bytes, + source_input_index_map=lambda sources, start_index: _source_input_index_map_plan( + sources, + start_index=start_index, + ), + prepare_multisource_sync=self._prepare_multisource_sync, + sync_cleanup_paths=_common_sync_cleanup_paths, + append_sync_inputs=self._append_sync_inputs, + prepare_container_metadata_inputs=self._prepare_container_metadata_inputs, + ffmpeg_thread_args=self._ffmpeg_thread_args, + append_offset_aux_inputs=self._append_offset_aux_inputs, + build_offset_specs=lambda config, **kwargs: _build_offset_specs_plan(config, **kwargs), + append_stream_maps_and_attachments=self._append_stream_maps_and_attachments, + append_strict_interleave_mux_flags=self._append_strict_interleave_mux_flags, + append_container_metadata_args=self._append_container_metadata_args, + ffmpeg_progress_args=self._ffmpeg_progress_args, + run_cmd=lambda cmd, cwd, label, progress_cb, signals: self._runner._run_cmd( + cmd, + cwd=cwd, + label=label, + progress_cb=progress_cb, + signals=signals, + ), + log_step=self._log_step, + log_info=lambda message: self.log_message.emit("INFO", message), + ui_encode_progress_message=_ui_encode_progress_message, + build_video_only_two_pass_for_track=self._build_video_only_two_pass_for_track, + cleanup_two_pass_logs_for_prefix=self._cleanup_two_pass_logs_for_prefix, + build_video_only_cmd_for_track=self._build_video_only_cmd_for_track, + wrap_injected_hevc_for_reconstruction=self._wrap_injected_hevc_for_reconstruction, + build_multi_video_track_encode_commands=self._build_multi_video_track_encode_commands, + two_pass_log_prefix=self._two_pass_log_prefix, + ) + ).run( + config, + cleanup_paths, + prep_signals=prep_signals, + plan=plan, + ) def _bind_live_sync_cleanup( self, signals: TaskSignals, session: LiveSyncSession | None, ) -> None: - if session is None: - return - - done = {"closed": False} - for proc in session.processes: - signals._register_proc(proc) - - def _cleanup(*_args) -> None: - if done["closed"]: - return - done["closed"] = True - for proc in session.processes: - signals._unregister_proc(proc) - session.close() - - signals.finished.connect(_cleanup) - signals.failed.connect(_cleanup) - signals.cancelled.connect(_cleanup) + self._signal_binding_service.bind_live_sync_cleanup(signals, session) def _bind_temp_cleanup(self, signals: TaskSignals, cleanup_paths: list[Path]) -> None: """Supprime les fichiers/dossiers temporaires quand le workflow se termine.""" - if not cleanup_paths: - return - - done = {"cleaned": False} - - def _cleanup(*_args) -> None: - if done["cleaned"]: - return - done["cleaned"] = True - for path in cleanup_paths: - try: - remove_path(path) - except OSError: - pass - - signals.finished.connect(_cleanup) - signals.failed.connect(_cleanup) - signals.cancelled.connect(_cleanup) + self._signal_binding_service.bind_temp_cleanup(signals, cleanup_paths) def _bind_matroska_segment_muxing_patch(self, signals: TaskSignals, output: Path) -> None: - self._muxing_post_action.bind_on_success(signals, output) - self._language_post_action.bind_on_success(signals, output) + self._signal_binding_service.bind_matroska_segment_muxing_patch(signals, output) def _bind_nfo_write(self, signals: TaskSignals, output: Path) -> None: if not self._generate_nfo: return - mediainfo_bin = self._bins.get("mediainfo") or "mediainfo" - log_cb = self.log_message.emit - - def _write(*_args) -> None: - write_mediainfo_nfo(output, log_cb=log_cb, mediainfo_bin=mediainfo_bin) - - signals.finished.connect(_write) + self._signal_binding_service.bind_nfo_write(signals, output) - @staticmethod - def _format_bytes(value: int) -> str: - size = float(max(0, value)) - units = ("B", "KiB", "MiB", "GiB", "TiB") - idx = 0 - while size >= 1024.0 and idx < len(units) - 1: - size /= 1024.0 - idx += 1 - return f"{size:.1f} {units[idx]}" + def _bind_output_hooks( + self, + signals: TaskSignals, + *, + output: Path, + cleanup_paths: list[Path] | None = None, + include_temp_cleanup: bool = True, + include_segment_patch: bool = True, + include_nfo: bool = True, + ) -> None: + if include_temp_cleanup and cleanup_paths is not None: + self._bind_temp_cleanup(signals, cleanup_paths) + if include_segment_patch: + self._bind_matroska_segment_muxing_patch(signals, output) + if include_nfo and self._generate_nfo: + self._bind_nfo_write(signals, output) def _estimate_duration_seconds(self, config: EncodeConfig) -> float: - duration_s = config.duration_s - if duration_s is not None and duration_s > 0: - return float(duration_s) - probed = self._postproc_helper._probe_duration_seconds(config.source) - if probed is not None and probed > 0: - return float(probed) - return 3600.0 + return _estimate_duration_seconds_runtime( + config, + probe_duration_seconds=self._postprocess_service.probe_duration_seconds, + ) def _estimate_inject_video_bytes( self, @@ -2824,133 +2203,33 @@ def _estimate_inject_video_bytes( duration_s: float, source_size: int, ) -> int: - if config.video.quality_mode == QualityMode.SIZE: - video_kbps = self._size_to_bitrate_kbps(config) - return int((video_kbps * 1000 / 8) * duration_s) - if config.video.quality_mode == QualityMode.BITRATE: - video_kbps = max(1, int(config.video.bitrate_kbps or 1)) - return int((video_kbps * 1000 / 8) * duration_s) - # CRF : pas de débit cible explicite ; base conservative sur la taille source. - return max(source_size, int(source_size * 3 / 4)) - - def _estimate_inject_storage_requirements(self, config: EncodeConfig) -> tuple[int, int]: - """ - Retourne (work_required_bytes, output_required_bytes) pour le chemin injection. - - work_required_bytes : pic d'espace requis pour les temporaires du workflow - d'injection (HEVC + sidecars) sur le FS de travail. - output_required_bytes : espace requis pour produire le MKV final sur le FS - de sortie. - """ - source_size = max(0, config.source.stat().st_size if config.source.exists() else 0) - duration_s = self._estimate_duration_seconds(config) - video_bytes = max( - 128 * 1024 * 1024, - self._estimate_inject_video_bytes( - config, - duration_s=duration_s, - source_size=source_size, - ), + return _estimate_inject_video_bytes_runtime( + config, + duration_s=duration_s, + source_size=source_size, + size_to_bitrate_kbps=self._size_to_bitrate_kbps, ) - sidecars = 32 * 1024 * 1024 - if config.copy_dv: - sidecars += 64 * 1024 * 1024 - if config.copy_hdr10plus: - sidecars += 64 * 1024 * 1024 - if config.chapter_overrides: - sidecars += 16 * 1024 * 1024 - - work_required = (2 * video_bytes) + sidecars - - encoded_audio_bytes = 0 - for audio in config.audio_tracks: - if audio.codec == "copy": - continue - if audio.codec == "flac": - kbps = max(1000, int(audio.bitrate_kbps or 0)) - else: - kbps = normalize_audio_bitrate_kbps( - audio.codec, - audio.bitrate_kbps, - audio.input_channels, - None, - audio.input_channel_layout, - ) - encoded_audio_bytes += int((kbps * 1000 / 8) * duration_s) - - extra_attachments_bytes = sum( - max(0, Path(p).stat().st_size) - for p in config.extra_attachments - if Path(p).exists() + def _estimate_inject_storage_requirements(self, config: EncodeConfig) -> tuple[int, int]: + return _estimate_inject_storage_requirements_runtime( + config, + probe_duration_seconds=self._postprocess_service.probe_duration_seconds, + size_to_bitrate_kbps=self._size_to_bitrate_kbps, ) - output_required = max( - source_size, - video_bytes + encoded_audio_bytes + extra_attachments_bytes, - ) + (64 * 1024 * 1024) - return work_required, output_required def _ensure_inject_storage_available(self, config: EncodeConfig) -> None: - """ - Vérifie l'espace libre avant le chemin d'injection DV/HDR10+. - - On fait une estimation conservative : - - temporaires d'injection (HEVC + sidecars) sur le volume de travail, - - fichier final sur le volume de sortie. - """ - work_required, output_required = self._estimate_inject_storage_requirements(config) - - work_root = config.work_dir or Path(tempfile.gettempdir()) - work_root.mkdir(parents=True, exist_ok=True) - output_root = config.output.parent - output_root.mkdir(parents=True, exist_ok=True) - - work_free = shutil.disk_usage(work_root).free - output_free = shutil.disk_usage(output_root).free - - self.log_message.emit( - "INFO", - "Estimation espace injection: " - f"temp≈{self._format_bytes(work_required)} sur {work_root} ; " - f"sortie≈{self._format_bytes(output_required)} sur {output_root}.", + _ensure_inject_storage_available_runtime( + config, + estimate_requirements=self._estimate_inject_storage_requirements, + log_info=lambda message: self.log_message.emit("INFO", message), + ram_buffer_enabled=self._ram_buffer_enabled, + ram_buffer_dir=EncodeWorkflow._ram_buffer_dir, + disk_usage=shutil.disk_usage, + stat=os.stat, + temp_dir=tempfile.gettempdir, + format_bytes_fn=_format_bytes_runtime, ) - if self._ram_buffer_enabled and EncodeWorkflow._ram_buffer_dir() is not None: - self.log_message.emit( - "INFO", - "Buffer RAM actif: estimation disque conservative (fallback disque).", - ) - - same_fs = False - try: - same_fs = os.stat(work_root).st_dev == os.stat(output_root).st_dev - except OSError: - same_fs = False - - if same_fs: - required = work_required + output_required - free = min(work_free, output_free) - if free < required: - raise EncodeError( - "Espace disque insuffisant pour l'injection DoVi/HDR10+ " - f"(requis≈{self._format_bytes(required)}, libre≈{self._format_bytes(free)} " - f"sur {output_root})." - ) - return - - if work_free < work_required: - raise EncodeError( - "Espace disque insuffisant pour les temporaires d'injection " - f"(requis≈{self._format_bytes(work_required)}, " - f"libre≈{self._format_bytes(work_free)} sur {work_root})." - ) - if output_free < output_required: - raise EncodeError( - "Espace disque insuffisant pour le fichier de sortie " - f"(requis≈{self._format_bytes(output_required)}, " - f"libre≈{self._format_bytes(output_free)} sur {output_root})." - ) - def _prepare_attachment_config( self, config: EncodeConfig, @@ -2958,131 +2237,33 @@ def _prepare_attachment_config( work_dir: Path, signals: TaskSignals | None = None, ) -> tuple[EncodeConfig, Path | None]: - """ - Convertit les ``attached_pic`` sélectionnés en fichiers joints temporaires. - - FFmpeg expose fréquemment un ``cover.jpg`` MKV comme stream vidéo MJPEG - avec ``disposition.attached_pic=1``. Si on le remappe via ``-map``, - la sortie perd ce flag et l'image devient une vraie piste vidéo. - Pour conserver le comportement "attachment", on extrait d'abord l'image - vers un fichier temporaire puis on la ré-attache avec ``-attach``. - """ - if not config.attachment_streams: - return config, None - - tmp_dir = Path(tempfile.mkdtemp(prefix="enc_attachments_", dir=str(work_dir))) - direct_streams: list = [] - extracted_files: list[Path] = [] - created_any = False - - try: - for selection in config.attachment_streams: - self._check_cancelled(signals) - src_path, stream_idx = selection[:2] - meta = self._describe_attachment_stream(src_path, stream_idx) - self._check_cancelled(signals) - if not meta["is_attached_pic"]: - direct_streams.append(selection) - continue - - created_any = True - filename = self._attachment_filename(meta, stream_idx) - dest = self._unique_attachment_path(tmp_dir, filename) - self._extract_attached_pic(src_path, stream_idx, dest, signals=signals) - self._check_cancelled(signals) - extracted_files.append(dest) - - if not created_any: - shutil.rmtree(tmp_dir, ignore_errors=True) - return config, None - - prepared = replace( - config, - attachment_streams=direct_streams, - extra_attachments=[*extracted_files, *config.extra_attachments], + return _AttachmentPreparationService( + _AttachmentPreparationServiceCallbacks( + check_cancelled=self._check_cancelled, + describe_attachment_stream=self._describe_attachment_stream, + attachment_filename=_default_attachment_filename, + unique_attachment_path=_unique_attachment_path_runtime, + extract_attached_pic=lambda source, stream_idx, dest, task_signals: self._extract_attached_pic( + source, + stream_idx, + dest, + signals=task_signals, + ), ) - return prepared, tmp_dir - except Exception: - shutil.rmtree(tmp_dir, ignore_errors=True) - raise + ).prepare( + config, + work_dir=work_dir, + signals=signals, + ) def _describe_attachment_stream(self, source: Path, stream_idx: int) -> dict[str, object]: - """Retourne les métadonnées ffprobe minimales d'un stream potentiellement attachment.""" - ffprobe_bin = self._ffprobe_bin_from_ffmpeg(self._ffmpeg) - - cmd = [ - ffprobe_bin, - "-v", "quiet", - "-print_format", "json", - "-show_streams", - str(source), - ] - try: - result = subprocess.run( - cmd, - capture_output=True, - check=False, - timeout=30, - **subprocess_text_kwargs(), - ) - except FileNotFoundError: - return { - "is_attached_pic": False, - "filename": f"attachment_{stream_idx}.bin", - "mimetype": "application/octet-stream", - } - - if result.returncode != 0: - return { - "is_attached_pic": False, - "filename": f"attachment_{stream_idx}.bin", - "mimetype": "application/octet-stream", - } - - try: - payload = json.loads(result.stdout or "{}") - except json.JSONDecodeError: - payload = {} - - for stream in payload.get("streams", []): - if int(stream.get("index", -1)) != int(stream_idx): - continue - tags = stream.get("tags", {}) or {} - disposition = stream.get("disposition", {}) or {} - return { - "is_attached_pic": bool(disposition.get("attached_pic", 0)), - "filename": tags.get("filename") or f"attachment_{stream_idx}.bin", - "mimetype": tags.get("mimetype") or "application/octet-stream", - } - - return { - "is_attached_pic": False, - "filename": f"attachment_{stream_idx}.bin", - "mimetype": "application/octet-stream", - } - - def _attachment_filename(self, meta: dict[str, object], stream_idx: int) -> str: - """Construit un nom de fichier exploitable pour un attachment extrait.""" - raw_name = str(meta.get("filename") or "").strip() - name = Path(raw_name).name if raw_name else f"attachment_{stream_idx}" - if Path(name).suffix: - return name - mime = str(meta.get("mimetype") or "").lower() - suffix = _EXT_BY_MIME.get(mime, ".bin") - return f"{name}{suffix}" - - def _unique_attachment_path(self, tmp_dir: Path, filename: str) -> Path: - """Retourne un chemin unique dans ``tmp_dir`` pour éviter les collisions de nom.""" - candidate = tmp_dir / filename - if not candidate.exists(): - return candidate - stem = candidate.stem - suffix = candidate.suffix - for idx in range(1, 1000): - alt = tmp_dir / f"{stem}_{idx}{suffix}" - if not alt.exists(): - return alt - return tmp_dir / f"{stem}_{os.getpid()}{suffix}" + return _probe_attachment_stream_runtime( + source, + stream_idx, + ffprobe_bin=self._ffprobe_bin_from_ffmpeg(self._ffmpeg), + subprocess_run=subprocess.run, + text_kwargs_factory=subprocess_text_kwargs, + ) def _extract_attached_pic( self, @@ -3092,39 +2273,21 @@ def _extract_attached_pic( *, signals: TaskSignals | None = None, ) -> None: - """Extrait un ``attached_pic`` vers un vrai fichier image.""" - self._check_cancelled(signals) - cmd = [ - self._ffmpeg, - "-hide_banner", "-y", - "-i", str(source), - "-map", f"0:{stream_idx}", - *self._ffmpeg_thread_args(), - "-c", "copy", - "-frames:v", "1", - str(dest), - ] - self.log_message.emit("INFO", "$ " + " ".join(cmd)) - try: - self._runner._run_cmd( + _extract_attached_pic_runtime( + source, + stream_idx, + dest, + ffmpeg_bin=self._ffmpeg, + ffmpeg_thread_args=self._ffmpeg_thread_args, + check_cancelled=self._check_cancelled, + log_info=lambda message: self.log_message.emit("INFO", message), + run_cmd=lambda cmd, label, task_signals: self._runner._run_cmd( cmd, - label="extract-attached-pic", - signals=signals, - ) - except TaskCancelledError: - dest.unlink(missing_ok=True) - raise - except Exception as exc: - dest.unlink(missing_ok=True) - stderr = str(exc).strip() - raise EncodeError( - f"Extraction attachment échouée pour le stream {stream_idx} de {source.name}: {stderr}" - ) from exc - self._check_cancelled(signals) - if not dest.exists(): - raise EncodeError( - f"Extraction attachment échouée pour le stream {stream_idx} de {source.name}: fichier absent" - ) + label=label, + signals=task_signals, + ), + signals=signals, + ) def _run_two_pass( self, @@ -3174,6 +2337,17 @@ def _cleanup_two_pass_logs(cwd: Path | None) -> None: except OSError: pass + @staticmethod + def _cleanup_two_pass_logs_for_prefix(prefix: Path) -> None: + base_dir = prefix.parent + if not base_dir.exists(): + return + for path in base_dir.glob(f"{prefix.name}-*.log*"): + try: + path.unlink(missing_ok=True) + except OSError: + pass + @staticmethod def _normalize_frame_rate_expr(value: object) -> str | None: raw = str(value or "").strip() @@ -3229,27 +2403,99 @@ def _wrap_injected_hevc_for_reconstruction( ] def _resolve_global_tags(self, config: EncodeConfig) -> dict[str, str]: - tags: dict[str, str] = {} - - if config.tag_overrides is not None: - for key, value in config.tag_overrides.items(): - key_s = str(key).strip() - value_s = str(value).strip() - if not key_s or not value_s: - continue - tags[key_s] = value_s - + tags = _common_resolve_global_tags(config.tag_overrides, "") tags["title"] = config.file_title return tags + def _build_multi_video_final_mux_command( + self, + config: EncodeConfig, + prepared_video_inputs: list[dict[str, object]], + *, + chapter_materialize_dir: Path | None = None, + plan: _EncodePlan | None = None, + ) -> list[str]: + video_inputs = prepared_video_inputs or [ + { + "input_args": [], + "path": ( + self._video_source_from_settings(config, video) + if video.codec == "copy" + else Path(f"") + ), + "map_arg": ( + f"{idx - 1}:{self._video_stream_from_settings(video)}" + if video.codec == "copy" + else f"{idx - 1}:v:0" + ), + } + for idx, video in enumerate(self._video_tracks(config), start=1) + ] + + cmd: list[str] = [self._ffmpeg, "-hide_banner", "-y"] + cmd.extend(self._ffmpeg_progress_args()) + for spec in video_inputs: + raw_input_args = spec.get("input_args", ()) + if isinstance(raw_input_args, (list, tuple)): + input_args = [str(arg) for arg in raw_input_args] + else: + input_args = [] + cmd.extend([*input_args, "-i", str(spec["path"])]) + + plan = plan or self._build_encode_plan(config) + all_sources = list(plan.all_sources) + source_idx = _source_input_index_map_plan(all_sources, start_index=len(video_inputs)) + for src in all_sources: + cmd.extend(["-i", str(src)]) + + next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( + cmd, + config, + source_idx=source_idx, + next_input_index=len(video_inputs) + len(all_sources), + plan=plan, + chapter_materialize_dir=chapter_materialize_dir, + chapter_probe_source=config.source, + ) + _ = next_input_index + cmd.extend(self._ffmpeg_thread_args()) + + for out_idx, spec in enumerate(video_inputs): + cmd.extend(["-map", str(spec["map_arg"])]) + cmd.extend([f"-c:v:{out_idx}", "copy"]) + + resolved_subtitle_tracks = list(plan.resolved_subtitle_tracks) + self._append_stream_maps_and_attachments( + cmd, + config, + source_idx=source_idx, + subtitle_copy_input_indices=list(source_idx.values()), + subtitle_tracks_override=resolved_subtitle_tracks, + force_copy_subtitles_wildcard=(config.copy_subtitles and not plan.subtitles_resolved), + ) + + default_source_index = source_idx.get(config.source, len(video_inputs)) + self._append_container_metadata_args( + cmd, + config, + default_metadata_input_index=default_source_index, + default_chapter_input_index=default_source_index, + chapter_input_index=chapter_input_index, + tag_input_index=tag_input_index, + include_copy_video_stream_passthrough=False, + plan=plan, + ) + cmd.append(str(config.output)) + return cmd + @staticmethod - def _track_spec_for_track_order(track_order: int, audio_count: int) -> tuple[str, int] | None: + def _track_spec_for_track_order(track_order: int, video_count: int, audio_count: int) -> tuple[str, int] | None: if track_order <= 0: return None - if track_order == 1: - return ("v", 0) + if track_order <= max(1, video_count): + return ("v", track_order - 1) - first_audio = 2 + first_audio = max(1, video_count) + 1 last_audio = first_audio + max(0, audio_count) - 1 if first_audio <= track_order <= last_audio: return ("a", track_order - first_audio) @@ -3261,58 +2507,28 @@ def _track_spec_for_track_order(track_order: int, audio_count: int) -> tuple[str @staticmethod def _normalized_track_language_value(language: str, title: str | None = None) -> str | None: - raw = (language or "").strip() - if not raw: - return None - canonical = LangTags.normalize(raw) or raw - regional = LangTags.regionalize_track_language(canonical, title) or canonical - if regional.lower() == "und": - return "und" - if LangTags.is_valid(regional): - return regional - if LangTags.is_valid(canonical): - return canonical - return "und" + return _common_normalize_track_language(language, title) @staticmethod def _disposition_value_from_edit(edit) -> str | None: - values = ( - edit.flag_default, - edit.flag_forced, - edit.flag_hearing_impaired, - edit.flag_visual_impaired, - edit.flag_original, - edit.flag_commentary, - ) - if all(v is None for v in values): - return None - # Évite les états partiels : la disposition ffmpeg remplace l'ensemble. - if any(v is None for v in values): - return None - - flags: list[str] = [] - if edit.flag_default: - flags.append("default") - if edit.flag_forced: - flags.append("forced") - if edit.flag_hearing_impaired: - flags.append("hearing_impaired") - if edit.flag_visual_impaired: - flags.append("visual_impaired") - if edit.flag_original: - flags.append("original") - if edit.flag_commentary: - flags.append("comment") - return "+".join(flags) if flags else "0" + return _common_disposition_value( + flag_default=edit.flag_default, + flag_forced=edit.flag_forced, + flag_hearing_impaired=edit.flag_hearing_impaired, + flag_visual_impaired=edit.flag_visual_impaired, + flag_original=edit.flag_original, + flag_commentary=edit.flag_commentary, + ) def _build_track_meta_args(self, config: EncodeConfig) -> list[str]: args: list[str] = [] if not config.track_meta_edits: return args + video_count = max(1, len(self._video_tracks(config))) audio_count = len(config.audio_tracks) for edit in config.track_meta_edits: - spec = self._track_spec_for_track_order(int(edit.track_order), audio_count) + spec = self._track_spec_for_track_order(int(edit.track_order), video_count, audio_count) if spec is None: self.log_message.emit("WARN", f"Piste invalide en édition metadata: @{edit.track_order}") continue @@ -3337,343 +2553,50 @@ def _build_track_meta_args(self, config: EncodeConfig) -> list[str]: args.extend([disposition_spec, disposition]) return args - def _run_with_metadata_inject(self, config: EncodeConfig) -> TaskSignals: - """ - Workflow d'encodage avec injection DV RPU / HDR10+ en une seule passe de sortie. - Appelé uniquement quand copy_dv ou copy_hdr10plus est actif ET codec ≠ copy. - - Gestion des fichiers de travail : - • Pas de encoded.mkv : la vidéo est encodée directement en HEVC brut (enc.hevc). - • Pas de src.hevc : les extractions RPU/HDR10+ lisent directement la source. - • L'audio copy est pris directement depuis la source via FFmpeg — aucun - traitement intermédiaire. - • La source n'est jamais modifiée. - • Tous les HEVC intermédiaires sont écrits dans le dossier process - dédié au job courant (sous work_dir). - • Chaque intermédiaire HEVC est supprimé immédiatement dès que l'étape - suivante l'a consommé. - • Le dossier temporaire process est supprimé en fin de workflow. - - Ordre d'injection (contrainte HDR10+ avant DV) : - HDR10+ en premier : hdr10plus_tool ne tolère pas les NAL RPU DV existants. - dovi_tool préserve tous les types de NAL. - - Étapes : - 1. Extraction RPU DoVi (dovi_tool extract-rpu) si copy_dv - 2. Extraction HDR10+ (hdr10plus_tool extract) si copy_hdr10plus - 3. Encodage vidéo seule → enc.hevc (HEVC brut, sans container) - Mode SIZE : deux passes (analyse + encodage direct en .hevc) - 4. Injection HDR10+ si applicable → nouveau current_hevc, ancien supprimé - 5. Injection RPU DV si applicable → nouveau current_hevc, ancien supprimé - 6. Reconstitution finale via ffmpeg (une seule commande, depuis la source) : - ffmpeg -i current_hevc -i source - -map 0:v:0 -c:v copy (vidéo injectée) - -map 1:stream_idx [codec args] (audio depuis source, copy ou réencodage) - -map 1:s? -c:s copy (subs depuis source) - -map_metadata/-map_chapters/... (tags/chapitres/track-meta) - output.mkv - Pas de fichier audio intermédiaire. La source n'est jamais modifiée. - """ - signals = TaskSignals() - executor = ThreadPoolExecutor(max_workers=1) - - def _task() -> None: - work = config.work_dir or Path(tempfile.gettempdir()) - work.mkdir(parents=True, exist_ok=True) - tmp_dir = tempfile.mkdtemp( - prefix="mediarecode_encode_", - dir=str(work), - ) - tmp = Path(tmp_dir) - # Conservé pour compatibilité : les intermédiaires sont désormais - # forcés dans tmp, donc cette liste reste vide. - ext_files: list[Path] = [] - live_sync_session: LiveSyncSession | None = None - sync_cleanup_paths: list[Path] = [] - - def _run(cmd: list[str]) -> str: - return self._runner._run_cmd( - cmd, signals=signals, cwd=tmp, - progress_cb=lambda line: signals.progress.emit(line), - ) - - def _check() -> None: - if signals._cancel_event.is_set(): - raise TaskCancelledError() - - def _alloc(name: str, ref_size: int) -> Path: - """ - Alloue un chemin de travail HEVC. - Tous les intermédiaires sont forcés dans le dossier process. - """ - _ = ref_size - return tmp / name - - def _free(path: Path) -> None: - """ - Supprime immédiatement un fichier intermédiaire. - Le retire de ext_files si présent. - Silencieux sur toute erreur OS. - """ - try: - path.unlink(missing_ok=True) - except OSError: - pass - try: - ext_files.remove(path) - except ValueError: - pass # chemin disque, pas dans ext_files - - try: - src_size_est = config.source.stat().st_size - # ── 1. RPU Dolby Vision ────────────────────────────────── - self._log_step(5, "Extraction des métadonnées dynamiques (DoVi/HDR10+)") - rpu_bin = tmp / "rpu.bin" - if config.copy_dv: - signals.progress.emit("Extraction RPU Dolby Vision…") - _run([ - self._bins["dovi_tool"], "extract-rpu", - "-i", str(self._video_source_path(config)), "-o", str(rpu_bin), - ]) - _check() - - # ── 2. HDR10+ ──────────────────────────────────────────── - hdr10p_json = tmp / "hdr10p.json" - if config.copy_hdr10plus: - signals.progress.emit("Extraction métadonnées HDR10+…") - _run([ - self._bins["hdr10plus_tool"], "extract", - str(self._video_source_path(config)), "-o", str(hdr10p_json), - ]) - _check() - - # ── 3. Encodage vidéo → enc.hevc brut ─────────────────── - # Encodage direct en HEVC sans container ni audio. - # Élimine encoded.mkv et garde un seul intermédiaire vidéo utile. - # La taille estimée = taille source (approximation conservative). - self._log_step(6, "Encodage vidéo seule (HEVC brut)") - enc_hevc = _alloc("enc.hevc", src_size_est) - signals.progress.emit("Encodage vidéo…") - if config.video.quality_mode == QualityMode.SIZE: - v_cmds = self._build_video_only_two_pass(config, enc_hevc) - self.log_message.emit("INFO", "Passe 1/2 (analyse)…") - _run(v_cmds[0]) - _check() - self.log_message.emit("INFO", "Passe 2/2 (encodage)…") - _run(v_cmds[1]) - else: - _run(self._build_video_only_cmd(config, enc_hevc)) - _check() - current_hevc = enc_hevc - - # ── 4. Injection HDR10+ ────────────────────────────────── - # HDR10+ avant DV : hdr10plus_tool ne tolère pas les NAL RPU DV. - self._log_step(7, "Injection HDR10+ puis DoVi (si demandé)") - if config.copy_hdr10plus and hdr10p_json.exists(): - cur_size = current_hevc.stat().st_size - out_hdr10p = _alloc("enc_hdr10p.hevc", cur_size) - signals.progress.emit("Injection métadonnées HDR10+…") - _run([ - self._bins["hdr10plus_tool"], "inject", - "-i", str(current_hevc), - "-j", str(hdr10p_json), - "-o", str(out_hdr10p), - ]) - _free(current_hevc) # libère enc.hevc immédiatement - current_hevc = out_hdr10p - _check() - - # ── 5. Injection RPU DV ────────────────────────────────── - if config.copy_dv and rpu_bin.exists(): - cur_size = current_hevc.stat().st_size - out_dv = _alloc("enc_dv.hevc", cur_size) - signals.progress.emit("Injection RPU Dolby Vision…") - _run([ - self._bins["dovi_tool"], - "-m", config.dovi_profile, - "inject-rpu", - "-i", str(current_hevc), - "-r", str(rpu_bin), - "-o", str(out_dv), - ]) - _free(current_hevc) # libère enc_hdr10p.hevc (ou enc.hevc) - current_hevc = out_dv - _check() - - # ── 6. Encapsulation FFmpeg-only du HEVC injecté ────────── - self._log_step(8, "Encapsulation timeline vidéo injectée") - wrapped_video = _alloc("enc_wrapped.mkv", current_hevc.stat().st_size) - signals.progress.emit("Encapsulation vidéo injectée…") - _run( - self._wrap_injected_hevc_for_reconstruction( - source=config.source, - hevc_input=current_hevc, - mkv_output=wrapped_video, - ) - ) - _free(current_hevc) - current_video_input = wrapped_video - _check() - - # ── 7. Reconstitution finale ───────────────────────────── - # ffmpeg multi-input : - # input 0 = vidéo encapsulée et horodatée, input 1 = source principale, - # input 2+ = sources audio / sous-titres / attachements supplémentaires. - self._log_step(9, "Reconstruction finale du conteneur MKV") - signals.progress.emit("Reconstitution finale…") - all_sources = self._collect_all_sources(config) - extra_sources = all_sources[1:] - recon_source_idx = self._source_input_index_map(all_sources, start_index=1) - sync_remap: dict[tuple[Path, int, str], tuple[int, int]] = {} - sync_inputs: list[Path | str] = [] - strict_interleave = False - - recon_cmd = [self._ffmpeg, "-hide_banner", "-y", - *self._ffmpeg_progress_args(), - "-i", str(current_video_input), - "-i", str(config.source)] - for sp in extra_sources: - recon_cmd.extend(["-i", str(sp)]) - - sync_remap, sync_inputs, live_sync_session, strict_interleave = self._prepare_multisource_sync( - config=config, - all_sources=all_sources, - sync_base_input_idx=2 + len(extra_sources), - work_dir=tmp, + def _run_with_metadata_inject( + self, + config: EncodeConfig, + *, + prep_signals: TaskSignals | None = None, + plan: _EncodePlan | None = None, + ) -> TaskSignals: + return _MetadataInjectRunner( + _MetadataInjectRunnerCallbacks( + ffmpeg_bin=self._ffmpeg, + bins=dict(self._bins), + log_step=self._log_step, + log_info=lambda message: self.log_message.emit("INFO", message), + check_cancelled=self._check_cancelled, + video_source_path=self._video_source_path, + build_video_only_two_pass=self._build_video_only_two_pass, + build_video_only_cmd=self._build_video_only_cmd, + wrap_injected_hevc_for_reconstruction=self._wrap_injected_hevc_for_reconstruction, + build_encode_plan=self._build_encode_plan, + source_input_index_map=lambda sources: _source_input_index_map_plan(sources, start_index=1), + prepare_multisource_sync=self._prepare_multisource_sync, + sync_cleanup_paths=_common_sync_cleanup_paths, + append_sync_inputs=self._append_sync_inputs, + prepare_container_metadata_inputs=self._prepare_container_metadata_inputs, + ffmpeg_thread_args=self._ffmpeg_thread_args, + ffmpeg_progress_args=self._ffmpeg_progress_args, + video_map_key=self._video_map_key, + append_offset_aux_inputs=self._append_offset_aux_inputs, + build_offset_specs=lambda config, **kwargs: _build_offset_specs_plan(config, **kwargs), + video_map_arg=_video_map_arg_plan, + append_stream_maps_and_attachments=self._append_stream_maps_and_attachments, + append_strict_interleave_mux_flags=self._append_strict_interleave_mux_flags, + append_container_metadata_args=self._append_container_metadata_args, + run_cmd=lambda cmd, signals, cwd, progress_cb: self._runner._run_cmd( + cmd, signals=signals, - allow_live=True, - ) - sync_cleanup_paths = self._sync_cleanup_paths(sync_inputs) - self._append_sync_inputs(recon_cmd, sync_inputs) - if live_sync_session is not None: - for proc in live_sync_session.processes: - signals._register_proc(proc) - - next_input_index, chapter_input_index, tag_input_index = self._prepare_container_metadata_inputs( - recon_cmd, - config, - source_idx=recon_source_idx, - next_input_index=2 + len(extra_sources) + len(sync_inputs), - chapter_materialize_dir=tmp, - chapter_probe_source=config.source, - ) - - recon_cmd.extend(self._ffmpeg_thread_args()) - resolved_subtitle_tracks, subtitles_resolved = self._resolved_subtitle_tracks_for_encode( - config, - all_sources, - ) - - offset_lookup = self._track_time_offset_lookup(config) - track_input_paths: list[Path | str] = [current_video_input, *all_sources, *sync_inputs] - - def _input_path(idx: int, fallback: Path | str) -> Path | str: - if 0 <= idx < len(track_input_paths): - return track_input_paths[idx] - return fallback - - video_key = self._video_map_key(config) - video_default_map = (0, 0) - track_mappings: list[tuple[tuple[Path, int, str], Path | str, int]] = [ - (video_key, _input_path(0, current_video_input), 0) - ] - - for a in config.audio_tracks: - src_path = Path(a.source_path or config.source) - remapped = sync_remap.get((src_path, int(a.stream_index), "audio")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = recon_source_idx.get(src_path) - if inp is None: - inp = recon_source_idx.get(config.source, 1) - stream_idx = int(a.stream_index) - track_mappings.append(((src_path, int(a.stream_index), "audio"), _input_path(int(inp), src_path), int(stream_idx))) - - for src_path_raw, stream_idx_raw in resolved_subtitle_tracks: - src_path = Path(src_path_raw) - remapped = sync_remap.get((src_path, int(stream_idx_raw), "subtitle")) - if remapped is not None: - inp, stream_idx = remapped - else: - inp = recon_source_idx.get(src_path) - if inp is None: - continue - stream_idx = int(stream_idx_raw) - track_mappings.append(((src_path, int(stream_idx_raw), "subtitle"), _input_path(int(inp), src_path), int(stream_idx))) - - next_input_index, offset_remap = self._append_offset_aux_inputs( - recon_cmd, - self._build_offset_specs( - config, - track_mappings=track_mappings, - offset_lookup=offset_lookup, - ), - start_input_index=next_input_index, - ) - _ = next_input_index - - video_map_key = video_key - recon_cmd.extend([ - "-map", - self._video_map_arg( - video_default_map, - offset_remap=offset_remap, - map_key=video_map_key, - ), - "-c:v", - "copy", - ]) - self._append_stream_maps_and_attachments( - recon_cmd, - config, - source_idx=recon_source_idx, - subtitle_copy_input_indices=list(range(1, 2 + len(extra_sources))), - sync_remap=sync_remap, - offset_remap=offset_remap, - subtitle_tracks_override=resolved_subtitle_tracks, - force_copy_subtitles_wildcard=(config.copy_subtitles and not subtitles_resolved), - ) - if strict_interleave: - self._append_strict_interleave_mux_flags(recon_cmd) - - self._append_container_metadata_args( - recon_cmd, - config, - default_metadata_input_index=0, - default_chapter_input_index=1, - chapter_input_index=chapter_input_index, - tag_input_index=tag_input_index, - ) - recon_cmd.append(str(config.output)) - _run(recon_cmd) - - signals.finished.emit(f"Encodage terminé → {config.output.name}") - - except TaskCancelledError: - signals.cancelled.emit() - except Exception as exc: - signals.failed.emit(str(exc), exc) - finally: - executor.shutdown(wait=False) - if live_sync_session is not None: - for proc in live_sync_session.processes: - signals._unregister_proc(proc) - live_sync_session.close() - for path in sync_cleanup_paths: - try: - remove_path(path) - except OSError: - pass - # Compatibilité : ext_files est vide en mode "tout dans work_dir". - for p in list(ext_files): - try: - p.unlink(missing_ok=True) - except OSError: - pass - shutil.rmtree(tmp_dir, ignore_errors=True) - - executor.submit(_task) - self._bind_matroska_segment_muxing_patch(signals, config.output) - self._bind_nfo_write(signals, config.output) - return signals + cwd=cwd, + progress_cb=progress_cb, + ), + bind_matroska_segment_muxing_patch=self._bind_matroska_segment_muxing_patch, + bind_nfo_write=self._bind_nfo_write, + ) + ).run( + config, + prep_signals=prep_signals, + plan=plan, + ) diff --git a/core/workflows/remux.py b/core/workflows/remux.py index f999e5a..08fed35 100644 --- a/core/workflows/remux.py +++ b/core/workflows/remux.py @@ -16,22 +16,16 @@ from __future__ import annotations -import json from typing import Callable import os -import shutil import subprocess import tempfile from concurrent.futures import ThreadPoolExecutor -from dataclasses import dataclass, replace +from dataclasses import replace from pathlib import Path from PySide6.QtCore import QObject, Signal -from core.inspector import AttachmentInfo -from core.lang_tags import Rfc5646LanguageTags as LangTags from core.runner import TaskCancelledError, TaskSignals, ToolRunner -from core.subprocess_utils import subprocess_text_kwargs -from core.subtitle_codec import plan_subtitle_codec from core.version import APP_VERSION_LABEL from core.workdir import ( download_tmdb_cover, @@ -41,6 +35,43 @@ ) from core.workflows.matroska_header_editor import MatroskaMuxingAppPostAction from core.workflows.matroska_language_editor import MatroskaLanguagePostAction +from core.workflows.common.chapters import ( + probe_media_duration_seconds, + write_ffmetadata_chapters, +) +from core.workflows.common.ffmpeg_runtime import ( + cli_path as _cli_path, + ffmpeg_progress_args, + ffmpeg_thread_args as _common_ffmpeg_thread_args, + normalize_ffmpeg_thread_count as _normalize_ffmpeg_thread_count, +) +from core.workflows.common.timeline_sync import needs_strict_interleave as _common_needs_strict_interleave +from core.workflows.common.metadata import STREAM_SPEC_BY_TRACK_TYPE as _STREAM_SPEC_BY_TYPE +from core.workflows.remux_attachments import ( + attachment_names as _attachment_names_helper, + build_attachment_mapping as _build_attachment_mapping_helper, + extract_attached_pics as _extract_attached_pics_helper, + write_mediainfo_nfo as _write_mediainfo_nfo_helper, +) +from core.workflows.remux_command import ( + build_remux_command as _build_remux_command_helper, + preview_remux_command as _preview_remux_command_helper, +) +from core.workflows.remux_mapping import ( + append_offset_inputs as _append_offset_inputs_helper, + chapter_map_value as _chapter_map_value_helper, + disposition_value_for_track as _disposition_value_helper, + is_dir_writable as _is_dir_writable_helper, + MappedTrack as _MappedTrack, + metadata_map_value as _metadata_map_value_helper, + normalized_language_value as _normalized_language_value_helper, + offset_input_specs_from_mapped_tracks as _offset_input_specs_from_mapped_tracks_helper, + offset_seconds as _offset_seconds_helper, + requires_file_sync_fallback_for_offsets as _requires_file_sync_fallback_for_offsets_helper, + resolve_mapped_tracks as _resolve_mapped_tracks_helper, + resolved_global_tags as _resolved_global_tags_helper, + track_order_parts as _track_order_parts_helper, +) from core.workflows.remux_models import ( RemuxConfig, RemuxError, @@ -48,6 +79,11 @@ TrackEntry, tracks_from_file_info, ) +from core.workflows.remux_sync import ( + bind_temp_cleanup as _bind_temp_cleanup_helper, + decide_strict_interleave_with_prescan as _decide_strict_interleave_with_prescan_helper, + prepare_timeline_sync_inputs as _prepare_timeline_sync_inputs_helper, +) from core.workflows.remux_timeline_sync import ( LiveSyncSession, FfmpegTimelineSync, @@ -55,110 +91,6 @@ TimelineSyncFallbackHelper, ) - -_MIME_BY_EXT: dict[str, str] = { - ".jpg": "image/jpeg", - ".jpeg": "image/jpeg", - ".png": "image/png", - ".gif": "image/gif", - ".bmp": "image/bmp", - ".webp": "image/webp", - ".tif": "image/tiff", - ".tiff": "image/tiff", - ".ttf": "application/x-truetype-font", - ".otf": "font/otf", - ".woff": "font/woff", - ".woff2": "font/woff2", - ".txt": "text/plain", - ".xml": "application/xml", -} - -_EXT_BY_MIME: dict[str, str] = { - "image/jpeg": ".jpg", - "image/png": ".png", - "image/gif": ".gif", - "image/bmp": ".bmp", - "image/webp": ".webp", - "image/tiff": ".tiff", - "application/x-truetype-font": ".ttf", - "font/ttf": ".ttf", - "font/otf": ".otf", - "font/woff": ".woff", - "font/woff2": ".woff2", - "text/plain": ".txt", - "application/xml": ".xml", -} - -_STREAM_SPEC_BY_TYPE: dict[str, str] = { - "video": "v", - "audio": "a", - "subtitle": "s", -} - - -@dataclass(frozen=True) -class _MappedTrack: - source_input_idx: int - source_file_index: int - source_path: Path | str - stream_index: int - track: TrackEntry - out_type_index: int - - -@dataclass(frozen=True) -class _OffsetInputSpec: - map_key: tuple[int, int, str, int] - input_path: Path | str - input_stream_index: int - offset_ms: int - - -@dataclass(frozen=True) -class _AttachmentSpec: - path: Path - filename: str - mimetype: str - - -def _cli_path(path: Path | str) -> str: - if isinstance(path, str): - return path - text = str(path) - if text.startswith("\\\\.\\pipe\\"): - return text - return path.as_posix() - - -def _mime_for(path: Path) -> str: - return _MIME_BY_EXT.get(path.suffix.lower(), "application/octet-stream") - - -def _sanitize_filename(name: str, fallback: str) -> str: - clean = Path((name or "").strip()).name - return clean or fallback - - -def _ffmeta_escape(value: str) -> str: - text = str(value).replace("\\", "\\\\") - text = text.replace("\n", " ") - text = text.replace(";", "\\;").replace("#", "\\#").replace("=", "\\=") - return text - - -def _default_ffmpeg_thread_count() -> int: - """Default FFmpeg thread count: logical CPU count x 0.75, rounded up.""" - cpu_count = os.cpu_count() or 1 - return max(1, (cpu_count * 3 + 3) // 4) - - -def _normalize_ffmpeg_thread_count(value: int | None) -> int: - """Return a safe FFmpeg thread count, preserving 0 as ffmpeg auto mode.""" - if value is None or value < 0: - return _default_ffmpeg_thread_count() - return value - - # ============================================================================= # NFO helper # ============================================================================= @@ -169,20 +101,12 @@ def write_mediainfo_nfo( mediainfo_bin: str = "mediainfo", ) -> None: """Génère un fichier .nfo (même nom que le MKV) avec la sortie brute de mediainfo.""" - output_path = Path(output_path).expanduser() - if not output_path.is_absolute(): - output_path = output_path.resolve() - nfo_path = output_path.with_suffix(".nfo") - try: - result = subprocess.run( - [mediainfo_bin, str(output_path)], - capture_output=True, - **subprocess_text_kwargs(), - ) - nfo_path.write_text(result.stdout, encoding="utf-8") - log_cb("OK", f"NFO généré : {nfo_path.name}") - except Exception as exc: - log_cb("WARN", f"Impossible de générer le NFO : {exc}") + _write_mediainfo_nfo_helper( + output_path, + log_cb=log_cb, + mediainfo_bin=mediainfo_bin, + run_cmd=subprocess.run, + ) # ============================================================================= @@ -248,11 +172,7 @@ def set_mediainfo_bin(self, mediainfo_bin: str) -> None: self._mediainfo_bin = mediainfo_bin def _ffmpeg_thread_args(self) -> list[str]: - return ["-threads", str(self._ffmpeg_threads)] - - @staticmethod - def _ffmpeg_progress_args() -> list[str]: - return ["-progress", "pipe:1", "-nostats"] + return _common_ffmpeg_thread_args(self._ffmpeg_threads) # ------------------------------------------------------------------ # Validation @@ -373,80 +293,7 @@ def validate(self, config: RemuxConfig) -> list[str]: @staticmethod def _is_dir_writable(path: Path) -> bool: - try: - with tempfile.NamedTemporaryFile( - mode="wb", - dir=path, - prefix="mrecode_write_probe_", - delete=True, - ): - pass - return True - except OSError: - return False - - @staticmethod - def _offset_seconds(offset_ms: int) -> str: - return f"{abs(int(offset_ms)) / 1000.0:.3f}" - - def _offset_input_specs_from_mapped_tracks( - self, - mapped_tracks: list[_MappedTrack], - ) -> list[_OffsetInputSpec]: - specs: list[_OffsetInputSpec] = [] - for mt in mapped_tracks: - offset_ms = int(getattr(mt.track, "time_shift_ms", 0) or 0) - if offset_ms == 0: - continue - if mt.track.track_type == "video" and offset_ms < 0: - raise RemuxError( - "Décalage vidéo négatif interdit : " - f"file_index={mt.source_file_index}, stream={mt.stream_index}, offset={offset_ms} ms" - ) - specs.append(_OffsetInputSpec( - map_key=( - int(mt.source_file_index), - int(mt.stream_index), - str(mt.track.track_type), - int(mt.out_type_index), - ), - input_path=mt.source_path, - input_stream_index=int(mt.stream_index), - offset_ms=offset_ms, - )) - return specs - - def _append_offset_inputs( - self, - cmd: list[str], - specs: list[_OffsetInputSpec], - *, - start_input_index: int, - ) -> tuple[int, dict[tuple[int, int, str, int], tuple[int, int]]]: - next_input_index = start_input_index - input_by_key: dict[tuple[str, int, int, str], int] = {} - remap: dict[tuple[int, int, str, int], tuple[int, int]] = {} - - for spec in specs: - input_key = ( - _cli_path(spec.input_path), - int(spec.input_stream_index), - int(spec.offset_ms), - str(spec.map_key[2]), - ) - input_idx = input_by_key.get(input_key) - if input_idx is None: - if int(spec.offset_ms) > 0: - cmd.extend(["-itsoffset", self._offset_seconds(spec.offset_ms), "-i", _cli_path(spec.input_path)]) - else: - cmd.extend(["-ss", self._offset_seconds(spec.offset_ms), "-i", _cli_path(spec.input_path)]) - input_idx = next_input_index - input_by_key[input_key] = input_idx - next_input_index += 1 - - remap[spec.map_key] = (int(input_idx), int(spec.input_stream_index)) - - return next_input_index, remap + return _is_dir_writable_helper(path) # ------------------------------------------------------------------ # Construction de commande @@ -463,135 +310,24 @@ def build_command( strict_interleave_override: bool | None = None, mapped_tracks_override: list[_MappedTrack] | None = None, ) -> list[str]: - mapped_tracks = ( - mapped_tracks_override - if mapped_tracks_override is not None - else self._resolve_mapped_tracks(config) - ) - needs_strict_interleave = ( - self._needs_strict_interleave(mapped_tracks) - if strict_interleave_override is None - else strict_interleave_override - ) - - cmd: list[str] = [self._ffmpeg, "-hide_banner", "-y"] - cmd.extend(self._ffmpeg_progress_args()) - cmd.extend(self._ffmpeg_thread_args()) - - for src in config.sources: - cmd.extend(["-i", _cli_path(src.path)]) - for i, p in enumerate(sync_inputs or []): - fmt = (sync_input_formats or [])[i] if sync_input_formats and i < len(sync_input_formats) else "matroska" - cmd.extend(["-f", fmt, "-i", _cli_path(p)]) - for p in (extra_inputs or []): - cmd.extend(["-i", _cli_path(p)]) - - sync_count = len(sync_inputs or []) - extra_count = len(extra_inputs or []) - offset_specs = self._offset_input_specs_from_mapped_tracks(mapped_tracks) - _, offset_remap = self._append_offset_inputs( - cmd, - offset_specs, - start_input_index=len(config.sources) + sync_count + extra_count, - ) - - for mt in mapped_tracks: - map_key = ( - int(mt.source_file_index), - int(mt.stream_index), - str(mt.track.track_type), - int(mt.out_type_index), - ) - remapped = offset_remap.get(map_key) - if remapped is None: - cmd.extend(["-map", f"{mt.source_input_idx}:{mt.stream_index}"]) - else: - cmd.extend(["-map", f"{remapped[0]}:{remapped[1]}"]) - - cmd.extend(["-c", "copy", "-default_mode", "passthrough"]) - - # Sortie MKV : certains codecs de sous-titres (mov_text, eia_608, …) - # ne sont pas copyables en Matroska → conversion vers srt par piste. - for mt in mapped_tracks: - if mt.track.track_type != "subtitle": - continue - try: - codec_arg, _ = plan_subtitle_codec(mt.track.codec) - except ValueError as exc: - raise RemuxError(str(exc)) from exc - if codec_arg != "copy": - cmd.extend([f"-c:s:{mt.out_type_index}", codec_arg]) - if needs_strict_interleave: - cmd.extend(["-max_interleave_delta", "0"]) - cmd.extend(["-max_muxing_queue_size", "9999"]) - chapter_map = self._chapter_map_value(config, chapter_input_index) - cmd.extend(["-map_metadata", self._metadata_map_value(config, chapter_input_index, chapter_map)]) - cmd.extend(["-map_chapters", chapter_map]) - - cmd.extend(["-metadata", "encoder=", "-metadata", "creation_time="]) - - for key, value in self._resolved_global_tags(config).items(): - cmd.extend(["-metadata", f"{key}={value}"]) - - for mt in mapped_tracks: - stream_spec = _STREAM_SPEC_BY_TYPE[mt.track.track_type] - out_idx = mt.out_type_index - - lang_value = self._normalized_language_value(mt.track) - cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", f"language={lang_value}"]) - cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", "language-ietf="]) - - cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", f"title={mt.track.title or ''}"]) - cmd.extend([f"-disposition:{stream_spec}:{out_idx}", self._disposition_value(mt.track)]) - - cmd.extend(self._build_attachment_mapping(config)) - att_t_idx = 0 - for src in config.sources: - for att in sorted(src.selected_attachments, key=lambda a: a.local_index): - if att.is_attached_pic: - continue - meta_name = self._attachment_names(att)[0] - mimetype = (att.mimetype or "").strip() or _mime_for(Path(meta_name)) - cmd.extend([f"-metadata:s:t:{att_t_idx}", f"mimetype={mimetype}"]) - cmd.extend([f"-metadata:s:t:{att_t_idx}", f"filename={meta_name}"]) - att_t_idx += 1 - - for att_path in config.extra_attachments: - cmd.extend(["-attach", _cli_path(att_path)]) - cmd.extend([f"-metadata:s:t:{att_t_idx}", f"mimetype={_mime_for(att_path)}"]) - cmd.extend([f"-metadata:s:t:{att_t_idx}", f"filename={att_path.name}"]) - att_t_idx += 1 - - cmd.append(_cli_path(config.output)) - return cmd - - def preview_command(self, config: RemuxConfig) -> str: - extra_inputs: list[Path | str] = [] - chapter_input_index: int | None = None - if config.chapter_overrides: - extra_inputs.append(Path("")) - chapter_input_index = len(config.sources) - - parts = self.build_command( + return _build_remux_command_helper( config, + ffmpeg_bin=self._ffmpeg, + ffmpeg_progress_args=ffmpeg_progress_args(), + ffmpeg_thread_args=self._ffmpeg_thread_args(), + cli_path=_cli_path, + sync_inputs=sync_inputs, + sync_input_formats=sync_input_formats, extra_inputs=extra_inputs, chapter_input_index=chapter_input_index, + strict_interleave_override=strict_interleave_override, + mapped_tracks_override=mapped_tracks_override, + resolve_mapped_tracks_fn=_resolve_mapped_tracks_helper, + needs_strict_interleave_fn=_common_needs_strict_interleave, ) - if not parts: - return "" - - lines: list[str] = [parts[0]] - i = 1 - while i < len(parts): - p = parts[i] - if p.startswith("-") and i + 1 < len(parts) and not parts[i + 1].startswith("-"): - lines.append(f" {p} {parts[i + 1]}") - i += 2 - else: - lines.append(f" {p}") - i += 1 - return " \\\n".join(lines) + def preview_command(self, config: RemuxConfig) -> str: + return _preview_remux_command_helper(config, build_command=self.build_command) # ------------------------------------------------------------------ # Exécution @@ -660,28 +396,44 @@ def _task() -> None: try: extra_inputs: list[Path | str] = [] self._log_step(3, "Analyse du mapping pistes + pr\u00e9-scan de risque") - mapped_tracks = self._resolve_mapped_tracks(run_config) - strict_interleave = self._decide_strict_interleave_with_prescan(run_config) + mapped_tracks = _resolve_mapped_tracks_helper(run_config) + strict_interleave = _decide_strict_interleave_with_prescan_helper( + run_config, + resolve_mapped_tracks=_resolve_mapped_tracks_helper, + log_cb=self.log_message.emit, + ) - extracted_pics = self._extract_attached_pics(run_config, tmp_dir, signals) + extracted_pics = _extract_attached_pics_helper( + run_config, + tmp_dir, + signals, + ffmpeg_bin=self._ffmpeg, + cli_path=_cli_path, + log_cb=self.log_message.emit, + ) if extracted_pics: run_config = replace(run_config, extra_attachments=[*run_config.extra_attachments, *extracted_pics]) if strict_interleave: self._log_step(4, "Synchronisation timeline multi-source (live/fallback)") allow_live_sync = True - if self._requires_file_sync_fallback_for_offsets(mapped_tracks): + if _requires_file_sync_fallback_for_offsets_helper(mapped_tracks): allow_live_sync = False self.log_message.emit( "INFO", "D\u00e9calage sur piste \u00e9trang\u00e8re d\u00e9tect\u00e9 : sync live d\u00e9sactiv\u00e9, fallback fichier forc\u00e9.", ) - mapped_tracks, sync_prepared, live_sync_session = self._prepare_timeline_sync_inputs( + mapped_tracks, sync_prepared, live_sync_session = _prepare_timeline_sync_inputs_helper( run_config, mapped_tracks, tmp_dir, signals, allow_live=allow_live_sync, + ffmpeg_bin=self._ffmpeg, + ffmpeg_thread_args=self._ffmpeg_thread_args(), + log_cb=self.log_message.emit, + syncer_factory=FfmpegTimelineSync, + fallback_helper_factory=TimelineSyncFallbackHelper, ) sync_cleanup_paths = [p for p in (item.path for item in sync_prepared) if isinstance(p, Path)] else: @@ -693,8 +445,8 @@ def _task() -> None: chapter_input_index: int | None = None if run_config.chapter_overrides: self._log_step(5, "Mat\u00e9rialisation des chapitres FFMetadata") - duration_s = self._probe_duration_seconds(run_config.sources[0].path) - chapter_meta_file = self._write_ffmetadata_chapters( + duration_s = probe_media_duration_seconds(self._ffprobe, run_config.sources[0].path) + chapter_meta_file = write_ffmetadata_chapters( entries=run_config.chapter_overrides, out_dir=tmp_dir, duration_s=duration_s, @@ -759,325 +511,27 @@ def _write_nfo(self, output_path: Path) -> None: def _bind_temp_cleanup(self, signals: TaskSignals, cleanup_paths: list[Path]) -> None: """Supprime les dossiers temporaires du workflow quand le traitement se termine.""" - if not cleanup_paths: - return - - done = {"cleaned": False} - - def _cleanup(*_args) -> None: - if done["cleaned"]: - return - done["cleaned"] = True - for path in cleanup_paths: - try: - remove_path(path) - except OSError: - pass - - signals.finished.connect(_cleanup) - signals.failed.connect(_cleanup) - signals.cancelled.connect(_cleanup) - - # ------------------------------------------------------------------ - # Helpers: mapping pistes / tags / langues / flags - # ------------------------------------------------------------------ - - @staticmethod - def _needs_strict_interleave(mapped_tracks: list[_MappedTrack]) -> bool: - """ - Déclenche l'interleave strict si : - - remux multi-source effectif, - - sous-titres en sortie (flux clairsemés), - - au moins une piste audio ou sous-titre venant d'une source étrangère. - """ - if len({mt.source_file_index for mt in mapped_tracks}) < 2: - return False - - if not any(mt.track.track_type == "subtitle" for mt in mapped_tracks): - return False - - primary_video = next((mt for mt in mapped_tracks if mt.track.track_type == "video"), None) - if primary_video is None: - return False - - return any( - mt.track.track_type in {"audio", "subtitle"} - and mt.source_file_index != primary_video.source_file_index - for mt in mapped_tracks - ) - - @staticmethod - def _requires_file_sync_fallback_for_offsets(mapped_tracks: list[_MappedTrack]) -> bool: - """ - Le mode live (FIFO/pipe) n'est pas robuste pour les offsets piste-par-piste - sur des flux \u00e9trangers. On force alors un fallback fichier explicite. - """ - primary_video = next((mt for mt in mapped_tracks if mt.track.track_type == "video"), None) - if primary_video is None: - return False - - return any( - mt.track.track_type in {"audio", "subtitle"} - and mt.source_file_index != primary_video.source_file_index - and int(getattr(mt.track, "time_shift_ms", 0) or 0) != 0 - for mt in mapped_tracks - ) - - def _decide_strict_interleave_with_prescan(self, config: RemuxConfig) -> bool: - mapped_tracks = self._resolve_mapped_tracks(config) - if self._requires_file_sync_fallback_for_offsets(mapped_tracks): - self.log_message.emit( - "INFO", - "D\u00e9calage sur piste \u00e9trang\u00e8re d\u00e9tect\u00e9 : sync timeline activ\u00e9.", - ) - return True - - base_risk = self._needs_strict_interleave(mapped_tracks) - if not base_risk: - return False - - # Piste audio étrangère détectée + sous-titres en sortie → sync obligatoire, - # pas besoin de prescan ffprobe (coûteux sur gros fichiers). - self.log_message.emit( - "INFO", - "Piste audio \u00e9trang\u00e8re + sous-titres d\u00e9tect\u00e9s : interleave strict activ\u00e9.", - ) - return True - - def _prepare_timeline_sync_inputs( - self, - config: RemuxConfig, - mapped_tracks: list[_MappedTrack], - tmp_dir: Path, - signals: TaskSignals, - *, - allow_live: bool = True, - ) -> tuple[list[_MappedTrack], list[SyncPreparedInput], LiveSyncSession | None]: - """ - D\u00e9l\u00e8gue la normalisation des flux \u00e9trangers \u00e0 un utilitaire d\u00e9di\u00e9 afin de - conserver une logique testable et r\u00e9utilisable hors workflow. - """ - syncer = FfmpegTimelineSync( - ffmpeg_bin=self._ffmpeg, - ffmpeg_thread_args=self._ffmpeg_thread_args(), - log_cb=lambda msg: self.log_message.emit("INFO", msg), - ) - prepared_result = TimelineSyncFallbackHelper( - syncer=syncer, - work_dir=tmp_dir, - ram_dir=TimelineSyncFallbackHelper.default_ram_dir(), - log_cb=lambda msg: self.log_message.emit("INFO", msg), - ).prepare( - mapped_tracks=mapped_tracks, - sources=config.sources, - base_input_idx=len(config.sources), - allow_live=allow_live, - cancel_cb=signals._cancel_event.is_set, - ) - live_session = prepared_result.live_session - prepared = prepared_result.prepared_inputs - if live_session is not None: - for proc in live_session.processes: - signals._register_proc(proc) - - if not prepared: - return mapped_tracks, [], live_session - - remap = {item.key: item for item in prepared} - remapped: list[_MappedTrack] = [] - for mt in mapped_tracks: - key = (mt.source_file_index, mt.stream_index, mt.track.track_type) - hit = remap.get(key) - if hit is None: - remapped.append(mt) - continue - remapped.append(replace( - mt, - source_input_idx=hit.input_idx, - source_path=hit.path, - stream_index=0, - )) - - return remapped, prepared, live_session - - def _resolve_mapped_tracks(self, config: RemuxConfig) -> list[_MappedTrack]: - file_index_to_input_idx = { - src.file_index: i - for i, src in enumerate(config.sources) - } - - track_map_by_id = { - (src.file_index, t.entry_id): (src.path, t) - for src in config.sources - for t in src.tracks - } - track_map_by_pair: dict[tuple[int, int], list[tuple[Path, TrackEntry]]] = {} - for src in config.sources: - for track in src.tracks: - track_map_by_pair.setdefault((src.file_index, track.mkv_tid), []).append( - (src.path, track) - ) - - type_counters: dict[str, int] = {"video": 0, "audio": 0, "subtitle": 0} - mapped: list[_MappedTrack] = [] - - for order_item in config.track_order: - file_index, mkv_tid, entry_id = self._track_order_parts(order_item) - input_idx = file_index_to_input_idx.get(file_index) - if input_idx is None: - raise RemuxError(f"Source inconnue dans track_order : file_index={file_index}") - - found = ( - track_map_by_id.get((file_index, entry_id)) - if entry_id - else next(iter(track_map_by_pair.get((file_index, mkv_tid), [])), None) - ) - if found is None: - raise RemuxError( - "Piste introuvable dans track_order : " - f"file_index={file_index}, stream={mkv_tid}" - ) - src_path, track = found - if track.track_type not in _STREAM_SPEC_BY_TYPE: - raise RemuxError( - "Type de piste non support\u00e9 en remux FFmpeg : " - f"{track.track_type} (file_index={file_index}, stream={mkv_tid})" - ) - - out_type_index = type_counters[track.track_type] - type_counters[track.track_type] += 1 - mapped.append(_MappedTrack( - source_input_idx=input_idx, - source_file_index=file_index, - source_path=src_path, - stream_index=mkv_tid, - track=track, - out_type_index=out_type_index, - )) - - return mapped + _bind_temp_cleanup_helper(signals, cleanup_paths) @staticmethod def _track_order_parts( item: tuple[int, int] | tuple[int, int, str], ) -> tuple[int, int, str | None]: - if len(item) >= 3: - file_index, mkv_tid, entry_id = item[0], item[1], str(item[2] or "").strip() - return int(file_index), int(mkv_tid), entry_id or None - return int(item[0]), int(item[1]), None - - @staticmethod - def _chapter_map_value(config: RemuxConfig, chapter_input_index: int | None) -> str: - if config.chapter_overrides is not None: - if config.chapter_overrides and chapter_input_index is not None: - return str(chapter_input_index) - return "-1" - return "0" if config.keep_chapters else "-1" - - @staticmethod - def _metadata_map_value( - config: RemuxConfig, - chapter_input_index: int | None, - chapter_map: str | None = None, - ) -> str: - if config.tag_overrides is not None: - if config.chapter_overrides and chapter_input_index is not None: - return str(chapter_input_index) - if chapter_map is not None and chapter_map not in ("-1", ""): - return chapter_map - return "-1" - for input_idx, src in enumerate(config.sources): - if src.copy_tags: - return str(input_idx) - # Sans copy_tags explicite : aligner -map_metadata sur la source des chapitres - # pour que FFmpeg préserve les titres de chapitres (les tags globaux parasites - # sont neutralisés par les -metadata key= ajoutés par l'appelant). - if chapter_map is not None and chapter_map not in ("-1", ""): - return chapter_map - return "-1" - - def _resolved_global_tags(self, config: RemuxConfig) -> dict[str, str]: - tags: dict[str, str] = {} - - if config.tag_overrides is not None: - for key, value in config.tag_overrides.items(): - key_s = str(key).strip() - value_s = str(value).strip() - if not key_s or not value_s: - continue - tags[key_s] = value_s - - if config.file_title.strip(): - tags["title"] = config.file_title.strip() + return _track_order_parts_helper(item) - return tags - - @staticmethod - def _normalized_language_value(track: TrackEntry) -> str: - raw = (track.language or "").strip() or "und" - canonical = LangTags.normalize(raw) or raw - - if canonical.lower() == "und": - return "und" - - regional = LangTags.regionalize_track_language(canonical, track.title) or canonical - if LangTags.is_valid(regional) and regional.lower() != "und": - return regional - if LangTags.is_valid(canonical) and canonical.lower() != "und": - return canonical - return "und" - - @staticmethod - def _disposition_value(track: TrackEntry) -> str: - flags: list[str] = [] - if track.flag_default: - flags.append("default") - if track.flag_forced: - flags.append("forced") - if track.flag_hearing_impaired: - flags.append("hearing_impaired") - if track.flag_visual_impaired: - flags.append("visual_impaired") - if track.flag_original: - flags.append("original") - if track.flag_commentary: - flags.append("comment") - return "+".join(flags) if flags else "0" + def _decide_strict_interleave_with_prescan(self, config: RemuxConfig) -> bool: + return _decide_strict_interleave_with_prescan_helper( + config, + resolve_mapped_tracks=_resolve_mapped_tracks_helper, + log_cb=self.log_message.emit, + ) # ------------------------------------------------------------------ - # Helpers: chapitres (ffmetadata) + # Helpers: chapitres (contrat interne encode -> remux) # ------------------------------------------------------------------ def _probe_duration_seconds(self, source: Path) -> float | None: - cmd = [ - self._ffprobe, - "-v", "quiet", - "-print_format", "json", - "-show_format", - str(source), - ] - try: - result = subprocess.run( - cmd, - capture_output=True, - check=False, - timeout=20, - **subprocess_text_kwargs(), - ) - except FileNotFoundError: - return None - - if result.returncode != 0: - return None - try: - payload = json.loads(result.stdout or "{}") - raw = (payload.get("format") or {}).get("duration") - if raw is None: - return None - value = float(raw) - return value if value > 0 else None - except Exception: - return None + return probe_media_duration_seconds(self._ffprobe, source) def _write_ffmetadata_chapters( self, @@ -1085,110 +539,4 @@ def _write_ffmetadata_chapters( out_dir: Path, duration_s: float | None, ) -> Path: - sorted_entries = sorted(entries, key=lambda e: float(getattr(e, "timecode_s", 0.0))) - - if duration_s is None: - duration_s = max((float(getattr(e, "timecode_s", 0.0)) for e in sorted_entries), default=0.0) + 1.0 - total_ms = max(1, int(round(duration_s * 1000.0))) - - lines: list[str] = [";FFMETADATA1"] - for idx, chapter in enumerate(sorted_entries): - start_ms = max(0, int(round(float(getattr(chapter, "timecode_s", 0.0)) * 1000.0))) - if idx + 1 < len(sorted_entries): - end_ms = max(start_ms + 1, int(round(float(getattr(sorted_entries[idx + 1], "timecode_s", 0.0)) * 1000.0))) - else: - end_ms = max(start_ms + 1, total_ms) - - lines.extend([ - "", - "[CHAPTER]", - "TIMEBASE=1/1000", - f"START={start_ms}", - f"END={end_ms}", - f"title={_ffmeta_escape(str(getattr(chapter, 'name', '') or ''))}", - ]) - - ffmeta_path = out_dir / "chapters.ffmetadata" - ffmeta_path.write_text("\n".join(lines) + "\n", encoding="utf-8") - return ffmeta_path - - # ------------------------------------------------------------------ - # Helpers: attachements - # ------------------------------------------------------------------ - - def _build_attachment_mapping(self, config: RemuxConfig) -> list[str]: - """ - G\u00e9n\u00e8re les arguments FFmpeg pour mapper les attachements sources directement. - """ - args: list[str] = [] - for input_idx, src in enumerate(config.sources): - for att in sorted(src.selected_attachments, key=lambda a: a.local_index): - if att.is_attached_pic: - continue - args.extend(["-map", f"{input_idx}:{att.index}"]) - if args: - args.extend(["-c:t", "copy"]) - args.extend(["-map_metadata:s:t", "-1"]) - return args - - def _extract_attached_pics( - self, - config: RemuxConfig, - tmp_dir: Path, - signals: TaskSignals, - ) -> list[Path]: - """ - Extrait les flux ``attached_pic`` (couvertures MKV) vers des fichiers temporaires. - """ - paths: list[Path] = [] - for src in config.sources: - for att in sorted(src.selected_attachments, key=lambda a: a.local_index): - if not att.is_attached_pic: - continue - if signals._cancel_event.is_set(): - raise TaskCancelledError() - - raw_name = _sanitize_filename(att.filename, f"attachment_{att.index}") - suffix = Path(raw_name).suffix.lower() - if not suffix: - suffix = _EXT_BY_MIME.get((att.mimetype or "").strip().lower(), ".jpg") - stem = Path(raw_name).stem or f"attachment_{att.index}" - out_path = tmp_dir / f"{stem}{suffix}" - counter = 1 - while out_path.exists(): - out_path = tmp_dir / f"{stem}_{counter}{suffix}" - counter += 1 - - cmd = [ - self._ffmpeg, "-hide_banner", "-y", - "-i", _cli_path(src.path), - "-map", f"0:{att.index}", - "-threads", "1", - "-frames:v", "1", - _cli_path(out_path), - ] - self.log_message.emit("INFO", "$ " + " ".join(str(c) for c in cmd)) - result = subprocess.run( - cmd, - capture_output=True, - check=False, - timeout=60, - **subprocess_text_kwargs(), - ) - if result.returncode != 0 or not out_path.exists() or out_path.stat().st_size == 0: - stderr = (result.stderr or "").strip() - raise RemuxError( - f"Extraction attached_pic \u00e9chou\u00e9e " - f"(source={src.path.name}, stream={att.index}): {stderr}" - ) - paths.append(out_path) - return paths - - def _attachment_names(self, att: AttachmentInfo) -> tuple[str, str]: - raw_name = _sanitize_filename(att.filename, f"attachment_{att.index}") - if not Path(raw_name).suffix: - mime = (att.mimetype or "").strip().lower() - ext = _EXT_BY_MIME.get(mime, ".bin") - raw_name = f"{raw_name}{ext}" - - return raw_name, raw_name + return write_ffmetadata_chapters(entries, out_dir, duration_s) diff --git a/core/workflows/remux_attachments.py b/core/workflows/remux_attachments.py new file mode 100644 index 0000000..238d5c1 --- /dev/null +++ b/core/workflows/remux_attachments.py @@ -0,0 +1,119 @@ +"""Attachment and NFO helpers for the remux workflow.""" + +from __future__ import annotations + +import subprocess +from pathlib import Path +from typing import Callable + +from core.inspector import AttachmentInfo +from core.runner import TaskCancelledError, TaskSignals +from core.subprocess_utils import subprocess_text_kwargs +from core.workflows.common.attachments import ATTACHMENT_EXT_BY_MIME, mime_for_path, sanitize_filename +from core.workflows.remux_models import RemuxConfig, RemuxError + + +def write_mediainfo_nfo( + output_path: Path, + log_cb: Callable[[str, str], None], + mediainfo_bin: str = "mediainfo", + *, + run_cmd: Callable[..., subprocess.CompletedProcess[str]] = subprocess.run, +) -> None: + output_path = Path(output_path).expanduser() + if not output_path.is_absolute(): + output_path = output_path.resolve() + nfo_path = output_path.with_suffix(".nfo") + try: + result = run_cmd( + [mediainfo_bin, str(output_path)], + capture_output=True, + **subprocess_text_kwargs(), + ) + nfo_path.write_text(result.stdout, encoding="utf-8") + log_cb("OK", f"NFO généré : {nfo_path.name}") + except Exception as exc: + log_cb("WARN", f"Impossible de générer le NFO : {exc}") + + +def build_attachment_mapping(config: RemuxConfig) -> list[str]: + args: list[str] = [] + for input_idx, source in enumerate(config.sources): + for attachment in sorted(source.selected_attachments, key=lambda item: item.local_index): + if attachment.is_attached_pic: + continue + args.extend(["-map", f"{input_idx}:{attachment.index}"]) + if args: + args.extend(["-c:t", "copy"]) + args.extend(["-map_metadata:s:t", "-1"]) + return args + + +def attachment_names(attachment: AttachmentInfo) -> tuple[str, str]: + raw_name = sanitize_filename(attachment.filename, f"attachment_{attachment.index}") + if not Path(raw_name).suffix: + mime = (attachment.mimetype or "").strip().lower() + ext = ATTACHMENT_EXT_BY_MIME.get(mime, ".bin") + raw_name = f"{raw_name}{ext}" + return raw_name, raw_name + + +def extract_attached_pics( + config: RemuxConfig, + tmp_dir: Path, + signals: TaskSignals, + *, + ffmpeg_bin: str, + cli_path: Callable[[Path | str], str], + log_cb: Callable[[str, str], None], +) -> list[Path]: + paths: list[Path] = [] + for source in config.sources: + for attachment in sorted(source.selected_attachments, key=lambda item: item.local_index): + if not attachment.is_attached_pic: + continue + if signals._cancel_event.is_set(): + raise TaskCancelledError() + + raw_name = sanitize_filename(attachment.filename, f"attachment_{attachment.index}") + suffix = Path(raw_name).suffix.lower() + if not suffix: + suffix = ATTACHMENT_EXT_BY_MIME.get((attachment.mimetype or "").strip().lower(), ".jpg") + stem = Path(raw_name).stem or f"attachment_{attachment.index}" + out_path = tmp_dir / f"{stem}{suffix}" + counter = 1 + while out_path.exists(): + out_path = tmp_dir / f"{stem}_{counter}{suffix}" + counter += 1 + + cmd = [ + ffmpeg_bin, "-hide_banner", "-y", + "-i", cli_path(source.path), + "-map", f"0:{attachment.index}", + "-threads", "1", + "-frames:v", "1", + cli_path(out_path), + ] + log_cb("INFO", "$ " + " ".join(str(token) for token in cmd)) + result = subprocess.run( + cmd, + capture_output=True, + check=False, + timeout=60, + **subprocess_text_kwargs(), + ) + if result.returncode != 0 or not out_path.exists() or out_path.stat().st_size == 0: + stderr = (result.stderr or "").strip() + raise RemuxError( + f"Extraction attached_pic échouée (source={source.path.name}, stream={attachment.index}): {stderr}" + ) + paths.append(out_path) + return paths + + +__all__ = [ + "attachment_names", + "build_attachment_mapping", + "extract_attached_pics", + "write_mediainfo_nfo", +] diff --git a/core/workflows/remux_command.py b/core/workflows/remux_command.py new file mode 100644 index 0000000..9529f5c --- /dev/null +++ b/core/workflows/remux_command.py @@ -0,0 +1,172 @@ +"""Command construction helpers for the remux workflow.""" + +from __future__ import annotations + +from collections.abc import Sequence +from pathlib import Path +from typing import Callable + +from core.subtitle_codec import plan_subtitle_codec +from core.workflows.common.attachments import mime_for_path +from core.workflows.common.track_types import TimelineMappedTrack +from core.workflows.common.metadata import STREAM_SPEC_BY_TRACK_TYPE as STREAM_SPEC_BY_TYPE +from core.workflows.remux_attachments import attachment_names, build_attachment_mapping +from core.workflows.remux_mapping import ( + MappedTrack, + append_offset_inputs, + disposition_value_for_track, + metadata_context, + normalized_language_value, + offset_input_specs_from_mapped_tracks, + resolve_mapped_tracks, +) +from core.workflows.remux_models import RemuxConfig, RemuxError + + +def build_remux_command( + config: RemuxConfig, + *, + ffmpeg_bin: str, + ffmpeg_progress_args: list[str], + ffmpeg_thread_args: list[str], + cli_path: Callable[[Path | str], str], + sync_inputs: list[Path | str] | None = None, + sync_input_formats: list[str] | None = None, + extra_inputs: list[Path | str] | None = None, + chapter_input_index: int | None = None, + strict_interleave_override: bool | None = None, + mapped_tracks_override: list[MappedTrack] | None = None, + resolve_mapped_tracks_fn: Callable[[RemuxConfig], list[MappedTrack]] = resolve_mapped_tracks, + needs_strict_interleave_fn: Callable[[Sequence[TimelineMappedTrack]], bool] | None = None, +) -> list[str]: + mapped_tracks = mapped_tracks_override if mapped_tracks_override is not None else resolve_mapped_tracks_fn(config) + needs_strict_interleave_impl = needs_strict_interleave_fn or (lambda tracks: False) + needs_strict_interleave = ( + needs_strict_interleave_impl(mapped_tracks) + if strict_interleave_override is None + else strict_interleave_override + ) + + cmd: list[str] = [ffmpeg_bin, "-hide_banner", "-y"] + cmd.extend(ffmpeg_progress_args) + cmd.extend(ffmpeg_thread_args) + + for source in config.sources: + cmd.extend(["-i", cli_path(source.path)]) + for index, path in enumerate(sync_inputs or []): + fmt = (sync_input_formats or [])[index] if sync_input_formats and index < len(sync_input_formats) else "matroska" + cmd.extend(["-f", fmt, "-i", cli_path(path)]) + for path in (extra_inputs or []): + cmd.extend(["-i", cli_path(path)]) + + sync_count = len(sync_inputs or []) + extra_count = len(extra_inputs or []) + offset_specs = offset_input_specs_from_mapped_tracks(mapped_tracks) + _, offset_remap = append_offset_inputs( + cmd, + offset_specs, + start_input_index=len(config.sources) + sync_count + extra_count, + cli_path=cli_path, + ) + + for mapped_track in mapped_tracks: + map_key = ( + int(mapped_track.source_file_index), + int(mapped_track.stream_index), + str(mapped_track.track.track_type), + int(mapped_track.out_type_index), + ) + remapped = offset_remap.get(map_key) + if remapped is None: + cmd.extend(["-map", f"{mapped_track.source_input_idx}:{mapped_track.stream_index}"]) + else: + cmd.extend(["-map", f"{remapped[0]}:{remapped[1]}"]) + + cmd.extend(["-c", "copy", "-default_mode", "passthrough"]) + + for mapped_track in mapped_tracks: + if mapped_track.track.track_type != "subtitle": + continue + try: + codec_arg, _ = plan_subtitle_codec(mapped_track.track.codec) + except ValueError as exc: + raise RemuxError(str(exc)) from exc + if codec_arg != "copy": + cmd.extend([f"-c:s:{mapped_track.out_type_index}", codec_arg]) + if needs_strict_interleave: + cmd.extend(["-max_interleave_delta", "0"]) + cmd.extend(["-max_muxing_queue_size", "9999"]) + + meta = metadata_context(config, chapter_input_index) + cmd.extend(["-map_metadata", meta.metadata_map]) + cmd.extend(["-map_chapters", meta.chapter_map]) + cmd.extend(["-metadata", "encoder=", "-metadata", "creation_time="]) + + for key, value in meta.global_tags.items(): + cmd.extend(["-metadata", f"{key}={value}"]) + + for mapped_track in mapped_tracks: + stream_spec = STREAM_SPEC_BY_TYPE[mapped_track.track.track_type] + out_idx = mapped_track.out_type_index + lang_value = normalized_language_value(mapped_track.track) + cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", f"language={lang_value}"]) + cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", "language-ietf="]) + cmd.extend([f"-metadata:s:{stream_spec}:{out_idx}", f"title={mapped_track.track.title or ''}"]) + cmd.extend([f"-disposition:{stream_spec}:{out_idx}", disposition_value_for_track(mapped_track.track)]) + + cmd.extend(build_attachment_mapping(config)) + att_t_idx = 0 + for source in config.sources: + for attachment in sorted(source.selected_attachments, key=lambda item: item.local_index): + if attachment.is_attached_pic: + continue + meta_name = attachment_names(attachment)[0] + mimetype = (attachment.mimetype or "").strip() or mime_for_path(Path(meta_name)) + cmd.extend([f"-metadata:s:t:{att_t_idx}", f"mimetype={mimetype}"]) + cmd.extend([f"-metadata:s:t:{att_t_idx}", f"filename={meta_name}"]) + att_t_idx += 1 + + for attachment_path in config.extra_attachments: + cmd.extend(["-attach", cli_path(attachment_path)]) + cmd.extend([f"-metadata:s:t:{att_t_idx}", f"mimetype={mime_for_path(attachment_path)}"]) + cmd.extend([f"-metadata:s:t:{att_t_idx}", f"filename={attachment_path.name}"]) + att_t_idx += 1 + + cmd.append(cli_path(config.output)) + return cmd + + +def preview_remux_command( + config: RemuxConfig, + *, + build_command: Callable[..., list[str]], +) -> str: + extra_inputs: list[Path | str] = [] + chapter_input_index: int | None = None + if config.chapter_overrides: + extra_inputs.append(Path("")) + chapter_input_index = len(config.sources) + + parts = build_command( + config, + extra_inputs=extra_inputs, + chapter_input_index=chapter_input_index, + ) + if not parts: + return "" + + lines: list[str] = [parts[0]] + index = 1 + while index < len(parts): + token = parts[index] + if token.startswith("-") and index + 1 < len(parts) and not parts[index + 1].startswith("-"): + lines.append(f" {token} {parts[index + 1]}") + index += 2 + else: + lines.append(f" {token}") + index += 1 + + return " \\\n".join(lines) + + +__all__ = ["build_remux_command", "preview_remux_command"] diff --git a/core/workflows/remux_mapping.py b/core/workflows/remux_mapping.py new file mode 100644 index 0000000..5c71d6b --- /dev/null +++ b/core/workflows/remux_mapping.py @@ -0,0 +1,277 @@ +"""Shared mapping and metadata helpers for the remux workflow.""" + +from __future__ import annotations + +import tempfile +from dataclasses import dataclass +from pathlib import Path +from typing import Callable + +from core.workflows.common.metadata import ( + STREAM_SPEC_BY_TRACK_TYPE as STREAM_SPEC_BY_TYPE, + disposition_value, + normalize_track_language_from_track, + resolve_global_tags, +) +from core.workflows.common.timeline_sync import needs_strict_interleave as common_needs_strict_interleave +from core.workflows.remux_models import RemuxConfig, RemuxError, SourceInput, TrackEntry + + +@dataclass(frozen=True) +class MappedTrack: + source_input_idx: int + source_file_index: int + source_path: Path | str + stream_index: int + track: TrackEntry + out_type_index: int + + +@dataclass(frozen=True) +class OffsetInputSpec: + map_key: tuple[int, int, str, int] + input_path: Path | str + input_stream_index: int + offset_ms: int + + +@dataclass(frozen=True) +class RemuxMetadataContext: + chapter_map: str + metadata_map: str + global_tags: dict[str, str] + + +def is_dir_writable(path: Path) -> bool: + try: + with tempfile.NamedTemporaryFile( + mode="wb", + dir=path, + prefix="mrecode_write_probe_", + delete=True, + ): + pass + return True + except OSError: + return False + + +def offset_seconds(offset_ms: int) -> str: + return f"{abs(int(offset_ms)) / 1000.0:.3f}" + + +def track_order_parts( + item: tuple[int, int] | tuple[int, int, str], +) -> tuple[int, int, str | None]: + if len(item) >= 3: + file_index, mkv_tid, entry_id = item[0], item[1], str(item[2] or "").strip() + return int(file_index), int(mkv_tid), entry_id or None + return int(item[0]), int(item[1]), None + + +def resolve_mapped_tracks(config: RemuxConfig) -> list[MappedTrack]: + file_index_to_input_idx = { + src.file_index: i + for i, src in enumerate(config.sources) + } + + track_map_by_id = { + (src.file_index, track.entry_id): (src.path, track) + for src in config.sources + for track in src.tracks + } + track_map_by_pair: dict[tuple[int, int], list[tuple[Path, TrackEntry]]] = {} + for src in config.sources: + for track in src.tracks: + track_map_by_pair.setdefault((src.file_index, track.mkv_tid), []).append((src.path, track)) + + type_counters: dict[str, int] = {"video": 0, "audio": 0, "subtitle": 0} + mapped: list[MappedTrack] = [] + + for order_item in config.track_order: + file_index, mkv_tid, entry_id = track_order_parts(order_item) + input_idx = file_index_to_input_idx.get(file_index) + if input_idx is None: + raise RemuxError(f"Source inconnue dans track_order : file_index={file_index}") + + found = ( + track_map_by_id.get((file_index, entry_id)) + if entry_id + else next(iter(track_map_by_pair.get((file_index, mkv_tid), [])), None) + ) + if found is None: + raise RemuxError( + "Piste introuvable dans track_order : " + f"file_index={file_index}, stream={mkv_tid}" + ) + src_path, track = found + if track.track_type not in STREAM_SPEC_BY_TYPE: + raise RemuxError( + "Type de piste non supporté en remux FFmpeg : " + f"{track.track_type} (file_index={file_index}, stream={mkv_tid})" + ) + + out_type_index = type_counters[track.track_type] + type_counters[track.track_type] += 1 + mapped.append(MappedTrack( + source_input_idx=input_idx, + source_file_index=file_index, + source_path=src_path, + stream_index=mkv_tid, + track=track, + out_type_index=out_type_index, + )) + + return mapped + + +def offset_input_specs_from_mapped_tracks(mapped_tracks: list[MappedTrack]) -> list[OffsetInputSpec]: + specs: list[OffsetInputSpec] = [] + for mapped_track in mapped_tracks: + offset_ms = int(getattr(mapped_track.track, "time_shift_ms", 0) or 0) + if offset_ms == 0: + continue + if mapped_track.track.track_type == "video" and offset_ms < 0: + raise RemuxError( + "Décalage vidéo négatif interdit : " + f"file_index={mapped_track.source_file_index}, stream={mapped_track.stream_index}, offset={offset_ms} ms" + ) + specs.append(OffsetInputSpec( + map_key=( + int(mapped_track.source_file_index), + int(mapped_track.stream_index), + str(mapped_track.track.track_type), + int(mapped_track.out_type_index), + ), + input_path=mapped_track.source_path, + input_stream_index=int(mapped_track.stream_index), + offset_ms=offset_ms, + )) + return specs + + +def append_offset_inputs( + cmd: list[str], + specs: list[OffsetInputSpec], + *, + start_input_index: int, + cli_path: Callable[[Path | str], str], +) -> tuple[int, dict[tuple[int, int, str, int], tuple[int, int]]]: + next_input_index = start_input_index + input_by_key: dict[tuple[str, int, int, str], int] = {} + remap: dict[tuple[int, int, str, int], tuple[int, int]] = {} + + for spec in specs: + input_key = ( + cli_path(spec.input_path), + int(spec.input_stream_index), + int(spec.offset_ms), + str(spec.map_key[2]), + ) + input_idx = input_by_key.get(input_key) + if input_idx is None: + if int(spec.offset_ms) > 0: + cmd.extend(["-itsoffset", offset_seconds(spec.offset_ms), "-i", cli_path(spec.input_path)]) + else: + cmd.extend(["-ss", offset_seconds(spec.offset_ms), "-i", cli_path(spec.input_path)]) + input_idx = next_input_index + input_by_key[input_key] = input_idx + next_input_index += 1 + + remap[spec.map_key] = (int(input_idx), int(spec.input_stream_index)) + + return next_input_index, remap + + +def needs_strict_interleave(mapped_tracks: list[MappedTrack]) -> bool: + return common_needs_strict_interleave(mapped_tracks) + + +def requires_file_sync_fallback_for_offsets(mapped_tracks: list[MappedTrack]) -> bool: + primary_video = next((mapped_track for mapped_track in mapped_tracks if mapped_track.track.track_type == "video"), None) + if primary_video is None: + return False + + return any( + mapped_track.track.track_type in {"audio", "subtitle"} + and mapped_track.source_file_index != primary_video.source_file_index + and int(getattr(mapped_track.track, "time_shift_ms", 0) or 0) != 0 + for mapped_track in mapped_tracks + ) + + +def chapter_map_value(config: RemuxConfig, chapter_input_index: int | None) -> str: + if config.chapter_overrides is not None: + if config.chapter_overrides and chapter_input_index is not None: + return str(chapter_input_index) + return "-1" + return "0" if config.keep_chapters else "-1" + + +def metadata_map_value( + config: RemuxConfig, + chapter_input_index: int | None, + chapter_map: str | None = None, +) -> str: + if config.tag_overrides is not None: + if config.chapter_overrides and chapter_input_index is not None: + return str(chapter_input_index) + if chapter_map is not None and chapter_map not in ("-1", ""): + return chapter_map + return "-1" + for input_idx, source in enumerate(config.sources): + if source.copy_tags: + return str(input_idx) + if chapter_map is not None and chapter_map not in ("-1", ""): + return chapter_map + return "-1" + + +def resolved_global_tags(config: RemuxConfig) -> dict[str, str]: + return resolve_global_tags(config.tag_overrides, config.file_title) + + +def normalized_language_value(track: TrackEntry) -> str: + return normalize_track_language_from_track(track) + + +def disposition_value_for_track(track: TrackEntry) -> str: + return disposition_value( + flag_default=track.flag_default, + flag_forced=track.flag_forced, + flag_hearing_impaired=track.flag_hearing_impaired, + flag_visual_impaired=track.flag_visual_impaired, + flag_original=track.flag_original, + flag_commentary=track.flag_commentary, + allow_partial=True, + ) or "0" + + +def metadata_context(config: RemuxConfig, chapter_input_index: int | None) -> RemuxMetadataContext: + chap_map = chapter_map_value(config, chapter_input_index) + return RemuxMetadataContext( + chapter_map=chap_map, + metadata_map=metadata_map_value(config, chapter_input_index, chap_map), + global_tags=resolved_global_tags(config), + ) + + +__all__ = [ + "MappedTrack", + "OffsetInputSpec", + "RemuxMetadataContext", + "append_offset_inputs", + "chapter_map_value", + "disposition_value_for_track", + "is_dir_writable", + "metadata_context", + "metadata_map_value", + "needs_strict_interleave", + "normalized_language_value", + "offset_input_specs_from_mapped_tracks", + "offset_seconds", + "requires_file_sync_fallback_for_offsets", + "resolve_mapped_tracks", + "resolved_global_tags", + "track_order_parts", +] diff --git a/core/workflows/remux_models.py b/core/workflows/remux_models.py index d2a9344..2303aa6 100644 --- a/core/workflows/remux_models.py +++ b/core/workflows/remux_models.py @@ -11,6 +11,7 @@ from __future__ import annotations +from collections.abc import Sequence from dataclasses import dataclass, field, replace from pathlib import Path from uuid import uuid4 @@ -50,6 +51,10 @@ class TrackEntry: orig_title: str = field(default="", repr=False) orig_codec: str = field(default="", repr=False) orig_display_info: str = field(default="", repr=False) + encode_plan_codec: str = field(default="", repr=False) + encode_plan_summary: str = field(default="", repr=False) + encode_plan_hdr_badges: tuple[str, ...] = field(default_factory=tuple, repr=False) + encode_plan_modified: bool = field(default=False, repr=False) # Flags MKV éditables (transmis à FFmpeg si modifiés) flag_enabled: bool = field(default=True, repr=False) # --track-enabled-flag @@ -194,7 +199,7 @@ class RemuxConfig: sources: list[SourceInput] output: Path - track_order: list[tuple[int, int] | tuple[int, int, str]] + track_order: Sequence[tuple[int, int] | tuple[int, int, str]] keep_chapters: bool = True #: None → FFmpeg recopie les chapitres des sources (comportement par défaut). #: list → un fichier ffmetadata temporaire est généré depuis ces entrées ; @@ -254,7 +259,7 @@ def _flags_from_disp(raw: dict) -> dict: for v in info.video_tracks: parts: list[str] = [v.resolution] if v.hdr_type != HDRType.NONE: - parts.append(v.hdr_type.label()) + parts.append(v.hdr_label) if v.frame_rate: fr = v.frame_rate if "/" in fr: diff --git a/core/workflows/remux_sync.py b/core/workflows/remux_sync.py new file mode 100644 index 0000000..f9ad8cb --- /dev/null +++ b/core/workflows/remux_sync.py @@ -0,0 +1,124 @@ +"""Timeline sync helpers for the remux workflow.""" + +from __future__ import annotations + +from dataclasses import replace +from pathlib import Path +from typing import Callable + +from core.runner import TaskSignals +from core.workdir import remove_path +from core.workflows.remux_models import RemuxConfig +from core.workflows.remux_mapping import MappedTrack, needs_strict_interleave, requires_file_sync_fallback_for_offsets +from core.workflows.remux_timeline_sync import LiveSyncSession, SyncPreparedInput + + +def decide_strict_interleave_with_prescan( + config: RemuxConfig, + *, + resolve_mapped_tracks: Callable[[RemuxConfig], list[MappedTrack]], + log_cb: Callable[[str, str], None], +) -> bool: + mapped_tracks = resolve_mapped_tracks(config) + if requires_file_sync_fallback_for_offsets(mapped_tracks): + log_cb( + "INFO", + "Décalage sur piste étrangère détecté : sync timeline activé.", + ) + return True + + base_risk = needs_strict_interleave(mapped_tracks) + if not base_risk: + return False + + log_cb( + "INFO", + "Piste audio étrangère + sous-titres détectés : interleave strict activé.", + ) + return True + + +def prepare_timeline_sync_inputs( + config: RemuxConfig, + mapped_tracks: list[MappedTrack], + tmp_dir: Path, + signals: TaskSignals, + *, + allow_live: bool, + ffmpeg_bin: str, + ffmpeg_thread_args: list[str], + log_cb: Callable[[str, str], None], + syncer_factory, + fallback_helper_factory, +) -> tuple[list[MappedTrack], list[SyncPreparedInput], LiveSyncSession | None]: + syncer = syncer_factory( + ffmpeg_bin=ffmpeg_bin, + ffmpeg_thread_args=ffmpeg_thread_args, + log_cb=lambda msg: log_cb("INFO", msg), + ) + prepared_result = fallback_helper_factory( + syncer=syncer, + work_dir=tmp_dir, + ram_dir=fallback_helper_factory.default_ram_dir(), + log_cb=lambda msg: log_cb("INFO", msg), + ).prepare( + mapped_tracks=mapped_tracks, + sources=config.sources, + base_input_idx=len(config.sources), + allow_live=allow_live, + cancel_cb=signals._cancel_event.is_set, + ) + live_session = prepared_result.live_session + prepared = prepared_result.prepared_inputs + if live_session is not None: + for proc in live_session.processes: + signals._register_proc(proc) + + if not prepared: + return mapped_tracks, [], live_session + + remap = {item.key: item for item in prepared} + remapped: list[MappedTrack] = [] + for mapped_track in mapped_tracks: + key = (mapped_track.source_file_index, mapped_track.stream_index, mapped_track.track.track_type) + hit = remap.get(key) + if hit is None: + remapped.append(mapped_track) + continue + remapped.append(replace( + mapped_track, + source_input_idx=hit.input_idx, + source_path=hit.path, + stream_index=0, + )) + + return remapped, prepared, live_session + + +def bind_temp_cleanup(signals: TaskSignals, cleanup_paths: list[Path]) -> None: + if not cleanup_paths: + return + + done = {"cleaned": False} + + def _cleanup(*_args) -> None: + if done["cleaned"]: + return + done["cleaned"] = True + for path in cleanup_paths: + try: + remove_path(path) + except OSError: + pass + + signals.finished.connect(_cleanup) + signals.failed.connect(_cleanup) + signals.cancelled.connect(_cleanup) + + +__all__ = [ + "bind_temp_cleanup", + "decide_strict_interleave_with_prescan", + "prepare_timeline_sync_inputs", + "requires_file_sync_fallback_for_offsets", +] diff --git a/core/workflows/remux_timeline_sync.py b/core/workflows/remux_timeline_sync.py index b00daba..cef1b48 100644 --- a/core/workflows/remux_timeline_sync.py +++ b/core/workflows/remux_timeline_sync.py @@ -22,18 +22,10 @@ from core.runner import TaskCancelledError from core.subprocess_utils import subprocess_text_kwargs +from core.workflows.common.ffmpeg_runtime import cli_path as _cli_path from core.workflows.remux_models import RemuxError, SourceInput -def _cli_path(path: Path | str) -> str: - if isinstance(path, str): - return path - text = str(path) - if text.startswith("\\\\.\\pipe\\"): - return text - return path.as_posix() - - class _TrackLike(Protocol): @property def track_type(self) -> str: diff --git a/locales.json b/locales.json index 84c0749..158ffdb 100644 --- a/locales.json +++ b/locales.json @@ -115,6 +115,58 @@ "fra": "Nombre maximum de lignes conservées dans le panneau de log.", "eng": "Maximum number of lines kept in the log panel." }, + "Activer le logging fichier": { + "fra": "Activer le logging fichier", + "eng": "Enable file logging" + }, + "Si activé, les logs applicatifs sont aussi écrits dans un fichier texte sous app_data/logs/.": { + "fra": "Si activé, les logs applicatifs sont aussi écrits dans un fichier texte sous app_data/logs/.", + "eng": "When enabled, application logs are also written to a text file under app_data/logs/." + }, + "Niveau de logging fichier": { + "fra": "Niveau de logging fichier", + "eng": "File logging level" + }, + "Standard écrit le flux visible dans la fenêtre. Verbose ajoute les sorties techniques détaillées des outils.": { + "fra": "Standard écrit le flux visible dans la fenêtre. Verbose ajoute les sorties techniques détaillées des outils.", + "eng": "Standard writes the output visible in the window. Verbose also adds detailed technical tool output." + }, + "Dossier des logs fichier": { + "fra": "Dossier des logs fichier", + "eng": "File logs directory" + }, + "Dossier où écrire les logs fichier. Prérempli par défaut avec le chemin complet actuel.": { + "fra": "Dossier où écrire les logs fichier. Prérempli par défaut avec le chemin complet actuel.", + "eng": "Directory where file logs are written. Pre-filled by default with the full current path." + }, + "Standard": { + "fra": "Standard", + "eng": "Standard" + }, + "Verbose": { + "fra": "Verbose", + "eng": "Verbose" + }, + "Le dossier des logs fichier doit être renseigné quand l'option est activée.": { + "fra": "Le dossier des logs fichier doit être renseigné quand l'option est activée.", + "eng": "The file logs directory must be set when this option is enabled." + }, + "Impossible d'écrire le fichier de logging.": { + "fra": "Impossible d'écrire le fichier de logging.", + "eng": "Unable to write the logging file." + }, + "Logging fichier activé ({level}) : {path}": { + "fra": "Logging fichier activé ({level}) : {path}", + "eng": "File logging enabled ({level}): {path}" + }, + "Logging fichier désactivé.": { + "fra": "Logging fichier désactivé.", + "eng": "File logging disabled." + }, + "Niveau de logging fichier : {level}": { + "fra": "Niveau de logging fichier : {level}", + "eng": "File logging level: {level}" + }, "Thème": { "fra": "Thème", "eng": "Theme" diff --git a/main.py b/main.py index 1a886c1..fae9e47 100755 --- a/main.py +++ b/main.py @@ -111,7 +111,8 @@ def main() -> int: from ui.main_window import MainWindow window = MainWindow(config) if startup_paths: - QTimer.singleShot(0, lambda: window.open_startup_paths(startup_paths)) + startup_items: list[Path | str] = list(startup_paths) + QTimer.singleShot(0, lambda: window.open_startup_paths(startup_items)) window.show() return app.exec() diff --git a/package.py b/package.py index 8294d2c..439a17d 100755 --- a/package.py +++ b/package.py @@ -57,7 +57,7 @@ import urllib.request import zipfile from pathlib import Path -from typing import Iterable +from typing import Any, Iterable from core.file_types import ACCEPTED_EXTENSIONS, build_desktop_mime_type_string from core.version import APP_NAME, APP_VERSION @@ -92,16 +92,22 @@ _APPIMAGE_UPDATE_OWNER = os.environ.get("MEDIARECODE_APPIMAGE_UPDATE_OWNER", "Hydro74000").strip() or "Hydro74000" _APPIMAGE_UPDATE_REPO = os.environ.get("MEDIARECODE_APPIMAGE_UPDATE_REPO", "mediarecode").strip() or "mediarecode" _APPIMAGE_UPDATE_RELEASE = os.environ.get("MEDIARECODE_APPIMAGE_UPDATE_RELEASE", "latest").strip() or "latest" -_MSIX_IDENTITY = os.environ.get("MEDIARECODE_MSIX_IDENTITY", "Hydro74000.Mediarecode").strip() or "Hydro74000.Mediarecode" -_MSIX_PUBLISHER = os.environ.get("MEDIARECODE_MSIX_PUBLISHER", "CN=Hydro74000").strip() or "CN=Hydro74000" -_MSIX_PUBLISHER_DISPLAY_NAME = os.environ.get("MEDIARECODE_MSIX_PUBLISHER_DISPLAY_NAME", "Hydro74000").strip() or "Hydro74000" -_MSIX_DESCRIPTION = os.environ.get("MEDIARECODE_MSIX_DESCRIPTION", "Mediarecode video workflow").strip() or "Mediarecode video workflow" +_MSIX_IDENTITY = os.environ.get("MEDIARECODE_MSIX_IDENTITY", "AOTR.Mediarecode").strip() or "AOTR.Mediarecode" +_MSIX_PUBLISHER = os.environ.get("MEDIARECODE_MSIX_PUBLISHER", "CN=AOTR").strip() or "CN=AOTR" +_MSIX_PUBLISHER_DISPLAY_NAME = os.environ.get("MEDIARECODE_MSIX_PUBLISHER_DISPLAY_NAME", "AOTR").strip() or "AOTR" +_MSIX_DESCRIPTION = os.environ.get("MEDIARECODE_MSIX_DESCRIPTION", "AOTR Mediarecode is an independent video workflow tool. This software is not affiliated with, authorized, or endorsed by Nero AG or any other media software providers.").strip() or "AOTR Mediarecode is an independent video workflow tool. This software is not affiliated with, authorized, or endorsed by Nero AG or any other media software providers." _MSIX_CERT_PFX = os.environ.get("MEDIARECODE_MSIX_CERT_PFX", "").strip() _MSIX_CERT_PASSWORD = os.environ.get("MEDIARECODE_MSIX_CERT_PASSWORD", "").strip() _MSIX_TIMESTAMP_URL = os.environ.get("MEDIARECODE_MSIX_TIMESTAMP_URL", "http://timestamp.digicert.com").strip() or "http://timestamp.digicert.com" _MSIX_STORE_CONFIG = os.environ.get("MEDIARECODE_MSIX_STORE_CONFIG", "").strip() _WINDOWS_SDK_WINGET_ID = os.environ.get("MEDIARECODE_WINDOWS_SDK_WINGET_ID", "Microsoft.WindowsSDK").strip() or "Microsoft.WindowsSDK" _WINDOWS_SDK_INSTALLER = os.environ.get("MEDIARECODE_WINDOWS_SDK_INSTALLER", "").strip() +# Exception MSIX : nom technique distinct du branding/artefacts classiques. +_MSIX_PACKAGE_NAME = re.sub( + r"\s+", + "", + os.environ.get("MEDIARECODE_MSIX_PACKAGE_NAME", "AOTRMediarecode").strip(), +) or "AOTRMediarecode" # ── Modules Python exclus du bundle ────────────────────────────────────────── @@ -1756,10 +1762,10 @@ def _stage_msix_layout( assets_dir = layout_dir / "Assets" _build_msix_assets(assets_dir) - vfs_root = layout_dir / "VFS" / _msix_vfs_root_dir() / APP_NAME + vfs_root = layout_dir / "VFS" / _msix_vfs_root_dir() / _MSIX_PACKAGE_NAME shutil.copytree(bundle_dir, vfs_root) - executable = f"VFS\\{_msix_vfs_root_dir()}\\{APP_NAME}\\mediarecode.exe" + executable = f"VFS\\{_msix_vfs_root_dir()}\\{_MSIX_PACKAGE_NAME}\\mediarecode.exe" manifest_path = layout_dir / "AppxManifest.xml" manifest_path.write_text( _msix_manifest_content(version_tag, executable, metadata=metadata), @@ -1808,7 +1814,7 @@ def _build_msix_package( layout_dir = _stage_msix_layout(bundle_dir, version_tag, metadata=metadata) makeappx = _ensure_windows_sdk_tool("makeappx.exe") - output = _versioned_output_path(ROOT / f"{APP_NAME}.msix", version_tag) + output = _versioned_output_path(ROOT / f"{_MSIX_PACKAGE_NAME}.msix", version_tag) if output.exists(): output.unlink() @@ -1992,8 +1998,8 @@ def _ensure_windows_icu_runtime_cache() -> list[Path]: if not normalized.lower().endswith(".dll"): continue target = cache_dir / Path(normalized).name - with archive.open(member) as src, target.open("wb") as dst: - shutil.copyfileobj(src, dst) + with archive.open(member) as src_f, target.open("wb") as dst_f: + shutil.copyfileobj(src_f, dst_f) extracted_any = True # --- AJOUT : création des alias ICU non suffixés --- @@ -2017,18 +2023,18 @@ def _ensure_windows_icu_runtime_cache() -> list[Path]: # créer les alias - for plain, src in ( + for plain, src_dll in ( ("icuuc.dll", alias_map["icuuc"]), ("icuin.dll", alias_map["icuin"]), ("icudt.dll", alias_map["icudt"] or alias_map["icu"]), ("icu.dll", alias_map["icu"] or alias_map["icudt"]), ): - if src is None: + if src_dll is None: continue dst = cache_dir / plain if not dst.exists(): - shutil.copy2(src, dst) - _ok(f"ICU alias créé : {dst.name} -> {src.name}") + shutil.copy2(src_dll, dst) + _ok(f"ICU alias créé : {dst.name} -> {src_dll.name}") if not extracted_any: @@ -2557,19 +2563,24 @@ def _build_icns_from_png(src_png: Path, dest_icns: Path) -> Path | None: _warn(f"icon.png absent : {src_png} — .icns non généré") return None + pil_image: Any | None = None + qimage_cls: Any | None = None + qt_namespace: Any | None = None + use_qt_backend = False + try: - from PIL import Image # type: ignore[import-not-found] + from PIL import Image as _PILImage # type: ignore[import-not-found] + pil_image = _PILImage except ImportError: try: - from PySide6.QtGui import QImage # type: ignore[import-not-found] - qt_backend = True + from PySide6.QtCore import Qt as _Qt # type: ignore[import-not-found] + from PySide6.QtGui import QImage as _QImage # type: ignore[import-not-found] + qt_namespace = _Qt + qimage_cls = _QImage + use_qt_backend = True except ImportError: _warn("Ni PIL ni PySide6.QtGui disponibles — .icns non généré") return None - else: - qt_backend = True - else: - qt_backend = False iconset = dest_icns.with_suffix(".iconset") if iconset.exists(): @@ -2584,17 +2595,22 @@ def _build_icns_from_png(src_png: Path, dest_icns: Path) -> Path | None: continue suffix = "" if scale == 1 else "@2x" out = iconset / f"icon_{size}x{size}{suffix}.png" - if qt_backend: - img = QImage(str(src_png)) - scaled = img.scaled( - px, px, - aspectRatioMode=1, # Qt.AspectRatioMode.KeepAspectRatio - transformMode=1, # Qt.TransformationMode.SmoothTransformation + if use_qt_backend: + assert qimage_cls is not None and qt_namespace is not None + qimg: Any = qimage_cls(str(src_png)) + scaled = qimg.scaled( + px, + px, + qt_namespace.AspectRatioMode.KeepAspectRatio, + qt_namespace.TransformationMode.SmoothTransformation, ) scaled.save(str(out), "PNG") else: - img = Image.open(src_png).convert("RGBA") - img.resize((px, px), Image.LANCZOS).save(out, "PNG") + assert pil_image is not None + pil_img: Any = pil_image.open(src_png).convert("RGBA") + resampling = getattr(pil_image, "Resampling", None) + lanczos = resampling.LANCZOS if resampling is not None else getattr(pil_image, "LANCZOS", 1) + pil_img.resize((px, px), lanczos).save(out, "PNG") dest_icns.parent.mkdir(parents=True, exist_ok=True) _run(["iconutil", "-c", "icns", str(iconset), "-o", str(dest_icns)]) diff --git a/release-v1.2.1.md b/release-v1.2.1.md deleted file mode 100644 index a026d15..0000000 --- a/release-v1.2.1.md +++ /dev/null @@ -1,56 +0,0 @@ -# Mediarecode v1.2.1 - -Date: 2026-04-09 - -PR associee: `#12` -Merge commit: `c8a349d94f08583fa1b165dd78802dfda95b29b5` - -Depuis `v1.2` (commit `96967a69207088d2b5a20d75b586c855f60504da`), cette version regroupe **5 commits** (dont 1 merge), avec **13 fichiers modifies** (`+793 / -99`). - -## Point principal - -Cette iteration est principalement une mise en compatibilite avec **mkvmerge v98**. - -Le changement majeur confirme dans l'historique Git et dans le diff est un **breaking change sur la gestion des balises de langue IETF** (`language-ietf`). Cette release adapte le remux, l'edition des langues et la detection de version de l'outillage pour continuer a reemettre correctement les langues dans les fichiers MKV. - -## Points forts - -- Compatibilite avec **mkvmerge v98** et son nouveau comportement autour de `language-ietf`. -- Correction de la reemission des langues lors des operations de remux. -- Mise a jour du build et de la configuration pour tenir compte des differences de version des outils externes. -- Correctif Wine / cross-build pour recuperer les DLL ICU Windows manquantes. - -## Correctifs inclus - -- Correction du remux lorsque `mkvmerge v98` modifie la mecanique des tags `language-ietf`. -- Correction de la reemission des langues de pistes apres edition ou reorganisation. -- Correction d'un cas de **titre vide** dans le workflow remux. -- Renforcement du packaging Windows cross-platform pour mieux gerer le runtime ICU sous Wine. - -## Impact technique - -- `core/config.py` introduit une detection de version majeure des outils externes afin d'adapter le comportement en fonction de la version de `mkvmerge`. -- `core/workflows/remux.py` porte l'essentiel de l'adaptation au breaking change `language-ietf`. -- `core/workflows/encode/workflow.py` est ajuste pour garder un comportement coherent lors des editions de langues. -- `package.py` est renforce pour fiabiliser les builds Windows / Wine lies aux DLL ICU. - -## Tests et validation - -Verification ciblee lancee avec: - -```sh -python3 -m pytest -q tests/test_remux.py tests/test_encode_workflow.py tests/test_setup_and_config.py tests/test_package.py -``` - -Resultat: - -- `408 passed` -- `2 failed` - -Les 2 echecs restants sont lies a des tests Windows qui patchent `ctypes.windll`, indisponible dans ce runtime Linux. - -## Notes de release - -- Cette release est associee au PR [#12](https://github.com/Hydro74000/mediarecode/pull/12). -- Conformement a la consigne de release, **aucun nouveau tag GitHub n'a ete cree** pour cette fusion. -- Le tag GitHub conserve pour la release publique reste **`v1.2`**. diff --git a/release-v1.2.md b/release-v1.2.md deleted file mode 100644 index bcbd963..0000000 --- a/release-v1.2.md +++ /dev/null @@ -1,75 +0,0 @@ -# Mediarecode v1.2 - -Date: 2026-04-08 - -Depuis `v1.1` (commit `aee1c107586806719bd5775f563d30037e3e5760`), cette version regroupe **22 commits** (dont 1 merge), avec **49 fichiers modifiés** (`+13 191 / -706`). - -## Points forts - -- Interface localisée (FR/EN) avec gestion centralisée des traductions (`locales.json`) et détection initiale de langue. -- Nouveau panneau **Paramètres** pour éditer `config.ini` dans l’application (UI, chemins, outils externes, encodage, TMDB). -- Ajout des thèmes **dark/light** appliqués dynamiquement sans redémarrage. -- Workflow enrichi avec intégration **TMDB/IMDb** (recherche, tags, titre conteneur, synopsis, cover auto). -- Gros renforcement du packaging Windows/Linux (PyInstaller, AppImage, NSIS, cross-build Wine). - -## Nouvelles fonctionnalités - -- **Localisation complète de l’UI** -- **Panneau de démarrage configurable** (dashboard/remux/encode/dovi/settings). -- **Recherche TMDB/IMDb intégrée** dans le remux: - - pré-remplissage de requête depuis le nom de fichier, - - détection saison/épisode, - - injection de métadonnées dans les tags MKV, - - remplacement automatique du titre conteneur selon le résultat TMDB. -- **Gestion automatique des pochettes** via TMDB (`auto-cover.jpg`) dans le workflow remux. -- **Sélecteur de bitrate audio amélioré** (valeurs par défaut + disponibles selon codec). -- **Threading FFmpeg** configurable avec nouvelle valeur par défaut basée sur les CPU logiques. -- **Métadonnées de version centralisées** (`core/version.py`) pour harmoniser version label/user-agent/writing-app. - -## Améliorations techniques - -- Détection matérielle encodeurs accélérée (probes runtime optimisées). -- Normalisation des langues renforcée (RFC 5646 / ISO 639, gestion des codes 2 caractères et variantes). -- Normalisation des requêtes TMDB améliorée pour mieux extraire titre/année/saison/épisode. -- `setup.py` renforcé: - - meilleur support multi-plateforme, - - auto-remplissage des chemins outils, - - options `--dry-run` / `--force`, - - traitement charset/IO Windows, - - gestion des chemins `config.ini` cross-platform. -- `package.py` / `package_appimage.py` refondus: - - packaging `.exe` natif, - - installateur NSIS, - - cross-build Windows depuis Linux via Wine, - - récupération des DLL ICU manquantes en environnement Wine. - -## Correctifs principaux - -- Correction des labels/boutons localisés. -- Correction de la création parasite d’une piste MJPEG lors de l’attachement `cover.jpg` en encode. -- Correction de la conversion audio TrueHD (downmix/cas de conversion ciblés). -- Correction de la normalisation TMDB et des cas synopsis d’épisode absent selon locale (fallback plus robuste). -- Correction du tag de version pour la release 1.2. -- Correction des doubles backslashes lors d’éditions successives de `config.ini` sous Windows. -- Correctifs packaging Windows (sécurité, encodage des sorties, robustesse install/build). - -## Tests et fiabilité - -Extension importante de la couverture avec nouveaux tests ciblant notamment: - -- i18n, -- settings/setup/config, -- launcher, -- package / packaging cross-platform, -- media_info_fetcher (TMDB), -- encode workflow (audio bitrate, progression, matériel), -- remux. - -## Notes de migration - -- Le chemin de `config.ini` est désormais strictement résolu selon plateforme/contexte (XDG Linux/macOS, `%APPDATA%` en Windows packagé). -- Pour Windows, une configuration de sécurité peut être requise (Controlled Folder Access) pour autoriser certains exports. - -## Remerciements - -Merci à toutes les personnes ayant contribué aux retours, tests et validations de cette itération `v1.2`. diff --git a/release-v2.0.0.md b/release-v2.0.0.md new file mode 100644 index 0000000..f209009 --- /dev/null +++ b/release-v2.0.0.md @@ -0,0 +1,28 @@ +# Multi-track encode orchestration, verbose logging and remux hardening + +This release brings a major upgrade to the encode pipeline with **multi-track video orchestration**, stronger **hardware encoder handling**, richer **tool logging**, and a solid **remux workflow refactor**. + +From encode planning to final muxing, Mediarecode now handles complex video setups more cleanly, with better per-track visibility, safer resource management, and improved metadata consistency across the workflow. + +## Highlights + +- Add **multi-track video encode orchestration** with per-track plans, previews and final mux ordering +- Improve parallel video preparation with **resource and RAM guards** +- Add clearer **per-track progress reporting** and UI plan badges +- Strengthen hardware encoder support for **VAAPI, QSV, NVENC and AMF** +- Improve HDR detection and keep HDR-related options scoped per video track +- Add **verbose file logging**, rotation and external tool output capture +- Refactor remux internals to better align attachments, metadata and offsets +- Expand test coverage across encode, remux, config, packaging and runtime services + +## Why it matters + +- Better support for advanced projects with multiple video tracks +- Easier troubleshooting when ffmpeg, mkvmerge or hardware tools behave unexpectedly +- More consistent results between encode preparation and remux output +- Stronger confidence in future changes thanks to much broader automated test coverage + +## Included commits + +- `eaa2b45` feat: add multi-track video encode orchestration and verbose tool logging +- `da4260e` feat: Add tests for encode runtime services and big refactor remux workflow diff --git a/scripts/windows_pytest_bootstrap.py b/scripts/windows_pytest_bootstrap.py index 7d7be00..c515cfb 100644 --- a/scripts/windows_pytest_bootstrap.py +++ b/scripts/windows_pytest_bootstrap.py @@ -5,6 +5,7 @@ import sys import types from pathlib import Path +from typing import Any, cast class _BoundSignal: @@ -71,9 +72,9 @@ def _install_fake_pyside6() -> None: if "PySide6" in sys.modules: return - pkg = types.ModuleType("PySide6") - qtcore = types.ModuleType("PySide6.QtCore") - qtwidgets = types.ModuleType("PySide6.QtWidgets") + pkg = cast(Any, types.ModuleType("PySide6")) + qtcore = cast(Any, types.ModuleType("PySide6.QtCore")) + qtwidgets = cast(Any, types.ModuleType("PySide6.QtWidgets")) qtcore.QObject = _QObject qtcore.Signal = _SignalDescriptor diff --git a/tests/integration/test_encode_video_track_selection.py b/tests/integration/test_encode_video_track_selection.py index 5e7c052..0ea8390 100644 --- a/tests/integration/test_encode_video_track_selection.py +++ b/tests/integration/test_encode_video_track_selection.py @@ -154,3 +154,55 @@ def test_encode_transcode_uses_selected_video_stream(tmp_path: Path) -> None: assert state["failed"] is None, f"Encode failed: {state['failed']}" assert out.exists() and out.stat().st_size > 0 assert _video_dims(out) == [(128, 96)] + + +def test_encode_multi_video_keeps_copy_and_transcoded_tracks(tmp_path: Path) -> None: + """La sortie multi-vidéo garde l'ordre remux avec copy + réencodage.""" + src = tmp_path / "multi-mixed.mkv" + make_multi_video_mkv( + src, + videos=[ + (64, 48, "red"), + (128, 96, "blue"), + ], + ) + + out = tmp_path / "multi-mixed-out.mkv" + cfg = EncodeConfig( + source=src, + output=out, + video=VideoEncodeSettings( + codec="copy", + source_path=src, + stream_index=0, + track_entry_id="video-first", + ), + video_tracks=[ + VideoEncodeSettings( + codec="copy", + source_path=src, + stream_index=0, + track_entry_id="video-first", + ), + VideoEncodeSettings( + codec="libx264", + quality_mode=QualityMode.CRF, + crf=35, + preset="ultrafast", + source_path=src, + stream_index=1, + track_entry_id="video-second", + ), + ], + audio_tracks=[], + copy_subtitles=False, + keep_chapters=False, + duration_s=1.0, + ) + + wf = _workflow() + state = wait_task(wf.run(cfg), timeout=120.0) + + assert state["failed"] is None, f"Encode failed: {state['failed']}" + assert out.exists() and out.stat().st_size > 0 + assert _video_dims(out) == [(64, 48), (128, 96)] diff --git a/tests/test_dashboard_page.py b/tests/test_dashboard_page.py index a3d6cbc..a6739bb 100644 --- a/tests/test_dashboard_page.py +++ b/tests/test_dashboard_page.py @@ -5,6 +5,7 @@ from __future__ import annotations from types import SimpleNamespace +from typing import Any, cast from unittest.mock import MagicMock, patch from ui.main_window import DashboardPage @@ -40,7 +41,7 @@ def test_dashboard_hw_detection_uses_configured_ffmpeg_path(): ) with patch("core.workflows.encode.hardware.HardwareEncoderDetector.detect", return_value={"hevc_nvenc"}) as mock_detect: - DashboardPage._run_hw_detection(fake_page) + DashboardPage._run_hw_detection(cast(Any, fake_page)) mock_detect.assert_called_once_with(custom_ffmpeg) assert emitted == [{"hevc_nvenc"}] diff --git a/tests/test_encode_audio_bitrate_ui.py b/tests/test_encode_audio_bitrate_ui.py index 9efcff4..bd09254 100644 --- a/tests/test_encode_audio_bitrate_ui.py +++ b/tests/test_encode_audio_bitrate_ui.py @@ -27,6 +27,7 @@ from __future__ import annotations from pathlib import Path +from typing import Any, cast from PySide6.QtCore import Qt from PySide6.QtWidgets import QComboBox, QLineEdit @@ -71,7 +72,7 @@ def _at( def _bitrate_editor(table: _AudioTable, row: int): editor = table.cellWidget(row, _AudioTable.COL_BITRATE) assert editor is not None - return editor + return cast(Any, editor) def _codec_combo(table: _AudioTable, row: int) -> QComboBox: @@ -185,7 +186,7 @@ def test_eac3_stereo_range_uses_two_channel_output_limit(self, qt_app): table.close() def test_codec_switch_uses_configured_aac_default_per_channel(self, qt_app): - table = _AudioTable(config=_Cfg(aac=160, eac3=96)) + table = _AudioTable(config=cast(Any, _Cfg(aac=160, eac3=96))) table.load_tracks([(_at(channels=6, bit_rate=2_000_000), _COLOR, _PATH_A)], default_codec="copy") _set_codec(table, 0, "aac") @@ -194,7 +195,7 @@ def test_codec_switch_uses_configured_aac_default_per_channel(self, qt_app): table.close() def test_codec_switch_from_copy_ignores_source_floor_for_configured_aac_default(self, qt_app): - table = _AudioTable(config=_Cfg(aac=160, eac3=96)) + table = _AudioTable(config=cast(Any, _Cfg(aac=160, eac3=96))) table.load_tracks([(_at(channels=6, bit_rate=640_000), _COLOR, _PATH_A)], default_codec="copy") _set_codec(table, 0, "aac") @@ -203,7 +204,7 @@ def test_codec_switch_from_copy_ignores_source_floor_for_configured_aac_default( table.close() def test_codec_switch_uses_configured_eac3_default_per_channel(self, qt_app): - table = _AudioTable(config=_Cfg(aac=96, eac3=224)) + table = _AudioTable(config=cast(Any, _Cfg(aac=96, eac3=224))) table.load_tracks([(_at(channels=6, bit_rate=2_000_000), _COLOR, _PATH_A)], default_codec="copy") _set_codec(table, 0, "eac3") @@ -212,7 +213,7 @@ def test_codec_switch_uses_configured_eac3_default_per_channel(self, qt_app): table.close() def test_codec_switch_from_copy_ignores_source_floor_for_configured_eac3_default(self, qt_app): - table = _AudioTable(config=_Cfg(aac=96, eac3=224)) + table = _AudioTable(config=cast(Any, _Cfg(aac=96, eac3=224))) table.load_tracks([(_at(channels=6, bit_rate=640_000), _COLOR, _PATH_A)], default_codec="copy") _set_codec(table, 0, "eac3") @@ -336,7 +337,7 @@ def test_dialog_flac_prefills_selected_track_source_bitrate(self, qt_app): def test_dialog_codec_switch_from_copy_uses_configured_defaults(self, qt_app): surround = _at(index=1, channels=6, channel_layout="5.1", bit_rate=640_000, title="Surround") - dialog = _AudioSourceDialog([(surround, _COLOR, _PATH_A)], config=_Cfg(aac=160, eac3=224)) + dialog = _AudioSourceDialog([(surround, _COLOR, _PATH_A)], config=cast(Any, _Cfg(aac=160, eac3=224))) aac_idx = next(i for i in range(dialog._codec_combo.count()) if dialog._codec_combo.itemData(i) == "aac") dialog._codec_combo.setCurrentIndex(aac_idx) @@ -354,7 +355,7 @@ def test_dialog_codec_switch_uses_selected_track_for_default_calculation(self, q surround = _at(index=2, channels=6, channel_layout="5.1", bit_rate=640_000, title="Surround") dialog = _AudioSourceDialog( [(stereo, _COLOR, _PATH_A), (surround, _COLOR, _PATH_A)], - config=_Cfg(aac=96, eac3=224), + config=cast(Any, _Cfg(aac=96, eac3=224)), ) dialog._track_list.setCurrentRow(1) diff --git a/tests/test_encode_catalog.py b/tests/test_encode_catalog.py new file mode 100644 index 0000000..05c2639 --- /dev/null +++ b/tests/test_encode_catalog.py @@ -0,0 +1,58 @@ +from core.workflows.encode.catalog import ( + AudioCodecSpec, + VideoCodecFamily, + VideoCodecSpec, + audio_codec_spec, + encoder_badge, + is_h264_video_codec, + is_hardware_video_codec, + supports_dynamic_hdr, + supports_force_8bit, + video_codec_family, + video_codec_spec, +) + + +class TestVideoCodecSpecs: + def test_libx264_spec_exposes_family_and_force_8bit(self): + spec = video_codec_spec("libx264") + assert isinstance(spec, VideoCodecSpec) + assert spec.family is VideoCodecFamily.SOFTWARE + assert spec.is_h264 is True + assert spec.supports_force_8bit is True + + def test_hevc_nvenc_spec_exposes_hardware_family_and_hdr(self): + spec = video_codec_spec("hevc_nvenc") + assert isinstance(spec, VideoCodecSpec) + assert spec.family is VideoCodecFamily.NVENC + assert spec.is_hardware is True + assert spec.supports_dynamic_hdr is True + + def test_unknown_video_codec_family_falls_back_to_other(self): + assert video_codec_spec("unknown_codec") is None + assert video_codec_family("unknown_codec") is VideoCodecFamily.OTHER + + def test_catalog_helpers_dispatch_from_specs(self): + assert is_h264_video_codec("h264_qsv") is True + assert is_hardware_video_codec("h264_qsv") is True + assert supports_force_8bit("h264_qsv") is True + assert supports_dynamic_hdr("hevc_qsv") is True + assert encoder_badge("hevc_qsv") == "QSV" + + def test_copy_keeps_dynamic_hdr_passthrough_capability(self): + assert supports_dynamic_hdr("copy") is True + + +class TestAudioCodecSpecs: + def test_copy_audio_spec_is_passthrough(self): + spec = audio_codec_spec("copy") + assert isinstance(spec, AudioCodecSpec) + assert spec.passthrough is True + assert spec.supports_bitrate is False + assert spec.supports_truehd_core_bsf is True + + def test_flac_audio_spec_is_lossless(self): + spec = audio_codec_spec("flac") + assert isinstance(spec, AudioCodecSpec) + assert spec.lossless is True + assert spec.supports_bitrate is False diff --git a/tests/test_encode_hardware.py b/tests/test_encode_hardware.py index 6a4a6be..f28e0bf 100644 --- a/tests/test_encode_hardware.py +++ b/tests/test_encode_hardware.py @@ -51,6 +51,71 @@ def fake_run(cmd, **_kwargs): assert used_ff == "ffmpeg.exe" assert all(cmd[0] != "nvidia-smi" for cmd in calls) + def test_detect_windows_qsv_uses_explicit_adapter_index(self): + encoders = "V....D hevc_qsv Intel Quick Sync Video\n" + probe_cmds: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + if cmd == ["ffmpeg.exe", "-hide_banner", "-encoders"]: + return _completed(stdout=encoders) + probe_cmds.append(list(cmd)) + return _completed(returncode=0) + + with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ + patch.object(HardwareEncoderDetector, "_qsv_device", return_value="1"), \ + _no_system_ff: + detected, used_ff = HardwareEncoderDetector().detect("ffmpeg.exe") + + assert detected == {"hevc_qsv"} + assert used_ff == "ffmpeg.exe" + assert len(probe_cmds) == 1 + cmd = probe_cmds[0] + assert "-qsv_device" in cmd + assert cmd[cmd.index("-qsv_device") + 1] == "1" + + def test_detect_windows_amf_uses_explicit_d3d11_adapter(self): + encoders = "V....D h264_amf AMD AMF H.264 Encoder\n" + probe_cmds: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + if cmd == ["ffmpeg.exe", "-hide_banner", "-encoders"]: + return _completed(stdout=encoders) + probe_cmds.append(list(cmd)) + return _completed(returncode=0) + + with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ + patch.object(HardwareEncoderDetector, "_amf_device", return_value="2"), \ + _no_system_ff: + detected, used_ff = HardwareEncoderDetector().detect("ffmpeg.exe") + + assert detected == {"h264_amf"} + assert used_ff == "ffmpeg.exe" + cmd = probe_cmds[0] + assert "-init_hw_device" in cmd + assert cmd[cmd.index("-init_hw_device") + 1] == "d3d11va=mre_amf_probe2:2" + assert "-filter_hw_device" in cmd + + def test_detect_windows_nvenc_uses_explicit_gpu_index(self): + encoders = "V....D h264_nvenc NVIDIA NVENC H.264 encoder\n" + probe_cmds: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + if cmd == ["ffmpeg.exe", "-hide_banner", "-encoders"]: + return _completed(stdout=encoders) + probe_cmds.append(list(cmd)) + return _completed(returncode=0) + + with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ + patch.object(HardwareEncoderDetector, "_nvenc_device", return_value="1"), \ + _no_system_ff: + detected, used_ff = HardwareEncoderDetector().detect("ffmpeg.exe") + + assert detected == {"h264_nvenc"} + assert used_ff == "ffmpeg.exe" + cmd = probe_cmds[0] + assert "-gpu" in cmd + assert cmd[cmd.index("-gpu") + 1] == "1" + def test_detect_linux_nvenc_keeps_nvidia_shortcut_for_distrobox(self): """ Linux : si le signal NVIDIA bas niveau est present, on garde la logique @@ -145,6 +210,7 @@ def fake_run(cmd, **_kwargs): with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ patch.object(HardwareEncoderDetector, "_vaapi_device", return_value=None), \ + patch.object(HardwareEncoderDetector, "_qsv_device", return_value="/dev/dri/renderD129"), \ _no_system_ff: detected, used_ff = HardwareEncoderDetector().detect("ffmpeg") @@ -152,6 +218,48 @@ def fake_run(cmd, **_kwargs): assert all("hevc_vaapi" not in cmd for cmd in probe_cmds) assert any("h264_qsv" in cmd for cmd in probe_cmds) + def test_detect_keeps_qsv_specific_probe_on_linux(self): + """ + Linux multi-GPU : QSV doit cibler explicitement le render node Intel + pour éviter le choix implicite du mauvais device. + """ + encoders = "V....D hevc_qsv H.265 / HEVC (Intel Quick Sync Video acceleration)\n" + probe_cmds: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + if cmd == ["ffmpeg", "-hide_banner", "-encoders"]: + return _completed(stdout=encoders) + probe_cmds.append(list(cmd)) + return _completed(returncode=0) + + with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ + patch.object(HardwareEncoderDetector, "_qsv_device", return_value="/dev/dri/renderD129"), \ + _no_system_ff: + detected, used_ff = HardwareEncoderDetector().detect("ffmpeg") + + assert detected == {"hevc_qsv"} + assert used_ff == "ffmpeg" + assert len(probe_cmds) == 1 + cmd = probe_cmds[0] + assert "-qsv_device" in cmd + assert cmd[cmd.index("-qsv_device") + 1] == "/dev/dri/renderD129" + + def test_detect_skips_qsv_when_device_missing(self): + """Sans render node Intel resolu, QSV ne doit pas etre annonce.""" + encoders = "V....D h264_qsv H.264 / AVC (Intel Quick Sync Video acceleration)\n" + + def fake_run(cmd, **_kwargs): + if cmd == ["ffmpeg", "-hide_banner", "-encoders"]: + return _completed(stdout=encoders) + raise AssertionError(f"Commande inattendue: {cmd}") + + with patch("core.workflows.encode.hardware.subprocess.run", side_effect=fake_run), \ + patch.object(HardwareEncoderDetector, "_qsv_device", return_value=None), \ + _no_system_ff: + detected, _ff = HardwareEncoderDetector().detect("ffmpeg") + + assert detected == set() + def test_failed_probe_does_not_mark_codec_as_available(self): """Un codec compile mais non utilisable ne doit pas etre expose a l'UI.""" encoders = "V....D h264_nvenc NVIDIA NVENC H.264 encoder\n" diff --git a/tests/test_encode_hw_devices.py b/tests/test_encode_hw_devices.py new file mode 100644 index 0000000..f507ba9 --- /dev/null +++ b/tests/test_encode_hw_devices.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from unittest.mock import patch + +from core.workflows.encode.hw_devices import LinuxRenderNodeInfo, select_linux_hwaccel_device +from core.workflows.encode import hw_devices as hw_devices_mod + + +class TestSelectLinuxHwaccelDevice: + + def test_vaapi_prefers_amd_over_nvidia(self): + nodes = ( + LinuxRenderNodeInfo("/dev/dri/renderD128", vendor_id="0x10de", driver="nvidia"), + LinuxRenderNodeInfo("/dev/dri/renderD129", vendor_id="0x1002", driver="amdgpu"), + ) + + assert select_linux_hwaccel_device("hevc_vaapi", nodes=nodes) == "/dev/dri/renderD129" + + def test_vaapi_falls_back_to_intel_when_no_amd_exists(self): + nodes = ( + LinuxRenderNodeInfo("/dev/dri/renderD128", vendor_id="0x10de", driver="nvidia"), + LinuxRenderNodeInfo("/dev/dri/renderD129", vendor_id="0x8086", driver="i915"), + ) + + assert select_linux_hwaccel_device("h264_vaapi", nodes=nodes) == "/dev/dri/renderD129" + + def test_qsv_requires_intel_render_node(self): + nodes = ( + LinuxRenderNodeInfo("/dev/dri/renderD128", vendor_id="0x10de", driver="nvidia"), + LinuxRenderNodeInfo("/dev/dri/renderD129", vendor_id="0x8086", driver="xe"), + ) + + assert select_linux_hwaccel_device("av1_qsv", nodes=nodes) == "/dev/dri/renderD129" + + def test_qsv_returns_none_without_intel_node(self): + nodes = ( + LinuxRenderNodeInfo("/dev/dri/renderD128", vendor_id="0x10de", driver="nvidia"), + LinuxRenderNodeInfo("/dev/dri/renderD129", vendor_id="0x1002", driver="amdgpu"), + ) + + assert select_linux_hwaccel_device("hevc_qsv", nodes=nodes) is None + + +class TestSelectWindowsHwaccelDevice: + + def setup_method(self): + hw_devices_mod.select_windows_hwaccel_device.cache_clear() + + def test_windows_qsv_picks_first_successful_adapter(self): + calls: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + calls.append(list(cmd)) + class Result: + def __init__(self, returncode: int): + self.returncode = returncode + return Result(0 if cmd[cmd.index("-qsv_device") + 1] == "1" else 1) + + with patch("core.workflows.encode.hw_devices.subprocess.run", side_effect=fake_run): + selected = hw_devices_mod.select_windows_hwaccel_device("hevc_qsv", ffmpeg_bin="ffmpeg.exe") + + assert selected == "1" + assert any("-qsv_device" in cmd and cmd[cmd.index("-qsv_device") + 1] == "0" for cmd in calls) + assert any("-qsv_device" in cmd and cmd[cmd.index("-qsv_device") + 1] == "1" for cmd in calls) + + def test_windows_amf_uses_hwupload_probe(self): + calls: list[list[str]] = [] + + def fake_run(cmd, **_kwargs): + calls.append(list(cmd)) + class Result: + returncode = 0 + return Result() + + with patch("core.workflows.encode.hw_devices.subprocess.run", side_effect=fake_run): + selected = hw_devices_mod.select_windows_hwaccel_device("h264_amf", ffmpeg_bin="ffmpeg.exe") + + assert selected == "0" + cmd = calls[0] + assert "-init_hw_device" in cmd + assert "-filter_hw_device" in cmd + assert "-vf" in cmd + assert cmd[cmd.index("-vf") + 1] == "format=nv12,hwupload" diff --git a/tests/test_encode_models.py b/tests/test_encode_models.py index df2b83a..9209313 100644 --- a/tests/test_encode_models.py +++ b/tests/test_encode_models.py @@ -42,6 +42,7 @@ EncodePreset, QualityMode, TrackTimeOffset, + VideoTrackEncodePlan, VideoEncodeSettings, presets_for_codec, ) @@ -169,9 +170,13 @@ def test_defaults(self): assert vs.target_size_mb == 4000 assert vs.preset == "slow" assert vs.extra_params == "" + assert vs.force_8bit is False assert vs.inject_hdr_meta is False assert vs.master_display == "" assert vs.max_cll == "" + assert vs.copy_dv is False + assert vs.copy_hdr10plus is False + assert vs.dovi_profile == "0" assert vs.tonemap_to_sdr is False assert vs.tonemap_algorithm == "hable" @@ -228,6 +233,7 @@ def test_required_fields(self, tmp_path): cfg = EncodeConfig(source=src, output=out, video=vs, audio_tracks=[]) assert cfg.source == src assert cfg.output == out + assert cfg.video_tracks == [vs] assert cfg.copy_subtitles is True assert cfg.copy_dv is False assert cfg.copy_hdr10plus is False @@ -244,6 +250,20 @@ def test_duration_default_none(self, tmp_path): ) assert cfg.duration_s is None + def test_video_tracks_can_define_primary_legacy_flags(self, tmp_path): + first = VideoEncodeSettings(codec="libx265", copy_dv=True, dovi_profile="2") + second = VideoEncodeSettings(codec="copy", copy_hdr10plus=True) + cfg = EncodeConfig( + source=tmp_path / "s.mkv", + output=tmp_path / "o.mkv", + video_tracks=[first, second], + audio_tracks=[], + ) + assert cfg.video is first + assert cfg.copy_dv is True + assert cfg.copy_hdr10plus is False + assert cfg.dovi_profile == "2" + # =========================================================================== # TrackTimeOffset @@ -259,6 +279,16 @@ def test_fields(self, tmp_path): assert tto.offset_ms == -80 +class TestVideoTrackEncodePlan: + def test_defaults(self): + plan = VideoTrackEncodePlan(track_entry_id="video-1", codec_summary="Copy") + assert plan.track_entry_id == "video-1" + assert plan.codec_summary == "Copy" + assert plan.target_codec == "copy" + assert plan.hdr_badges == () + assert plan.is_modified is False + + # =========================================================================== # EncodePreset # =========================================================================== diff --git a/tests/test_encode_panel_widgets.py b/tests/test_encode_panel_widgets.py index e757f41..fb83e53 100644 --- a/tests/test_encode_panel_widgets.py +++ b/tests/test_encode_panel_widgets.py @@ -56,8 +56,10 @@ from __future__ import annotations +from collections.abc import Generator from pathlib import Path from types import SimpleNamespace +from typing import Any, cast import pytest from PySide6.QtCore import Qt @@ -99,7 +101,7 @@ def _at( @pytest.fixture -def table(qt_app) -> _AudioTable: +def table(qt_app) -> Generator[_AudioTable, None, None]: t = _AudioTable() yield t t.close() @@ -120,7 +122,7 @@ def _codec_combo(table: _AudioTable, row: int) -> QComboBox: def _bitrate_editor(table: _AudioTable, row: int): editor = table.cellWidget(row, _AudioTable.COL_BITRATE) assert editor is not None - return editor + return cast(Any, editor) def _set_codec(table: _AudioTable, row: int, codec_id: str) -> None: @@ -158,7 +160,7 @@ def _remux_entry(entry_id: str = "entry-a") -> TrackEntry: ) -def _video_track(index: int, hdr_type: HDRType = HDRType.NONE) -> VideoTrack: +def _video_track(index: int, hdr_type: HDRType = HDRType.NONE, bit_depth: int = 10) -> VideoTrack: return VideoTrack( index=index, codec="hevc", @@ -166,7 +168,7 @@ def _video_track(index: int, hdr_type: HDRType = HDRType.NONE) -> VideoTrack: width=3840, height=2160, frame_rate="23.976", - bit_depth=10, + bit_depth=bit_depth, color_space=None, color_primaries=None, color_transfer=None, @@ -589,7 +591,7 @@ def test_selected_track_hdr10plus_is_used_even_when_container_hdr_is_sdr(self, q assert panel._copy_hdr10plus_cb.isChecked() is True panel.close() - def test_dynamic_hdr_settings_are_independent_per_video_entry(self, qt_app): + def test_apply_all_video_settings_is_disabled_by_default(self, qt_app): panel = EncodePanel(AppConfig()) info = _file_info( _PATH_A, @@ -607,6 +609,7 @@ def test_dynamic_hdr_settings_are_independent_per_video_entry(self, qt_app): (info, dovi_entry, _COLOR), (info, sdr_entry, _COLOR), ]) + assert panel._apply_all_video_cb.isChecked() is False assert panel._copy_dv_cb.isChecked() is True panel._video_list.setCurrentRow(1) @@ -617,7 +620,285 @@ def test_dynamic_hdr_settings_are_independent_per_video_entry(self, qt_app): assert panel._copy_dv_cb.isChecked() is True panel.close() - def test_manual_dynamic_hdr_choice_is_kept_per_video_entry(self, qt_app): + def test_video_row_shows_encoder_badge_and_bold_only_when_not_copy(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-encoder-badge" + panel.set_video_tracks([(_file_info(_PATH_A, [_video_track(0)]), entry, _COLOR)]) + + row_item = panel._video_list.item(0) + assert row_item is not None + assert "Enc:" not in row_item.text() + assert "[x265]" not in row_item.text() + assert row_item.font().bold() is False + + idx_x265 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx265" + ) + panel._codec_combo.setCurrentIndex(idx_x265) + row_item = panel._video_list.item(0) + assert row_item is not None + assert "[x265]" in row_item.text() + assert "Enc:" not in row_item.text() + assert row_item.font().bold() is True + + idx_copy = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "copy" + ) + panel._codec_combo.setCurrentIndex(idx_copy) + row_item = panel._video_list.item(0) + assert row_item is not None + assert "[x265]" not in row_item.text() + assert row_item.font().bold() is False + panel.close() + + def test_video_row_shows_dv_and_hdr10plus_badges_from_track(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-with-hdr-badges" + info = _file_info(_PATH_A, [_video_track(0, HDRType.DOLBY_VISION_HDR10PLUS)]) + + panel.set_video_tracks([(info, entry, _COLOR)]) + + row_item = panel._video_list.item(0) + assert row_item is not None + text = row_item.text().lower() + assert "[dv]" in text + assert "[10+]" in text + assert "enc:" not in text + panel.close() + + def test_video_row_dv_badge_is_isolated_per_track_on_initial_load(self, qt_app): + panel = EncodePanel(AppConfig()) + dv_entry = _video_entry(0) + dv_entry.entry_id = "video-dv" + sdr_entry = _video_entry(0) + sdr_entry.entry_id = "video-sdr" + dv_info = _file_info(_PATH_A, [_video_track(0, HDRType.DOLBY_VISION)]) + sdr_info = _file_info(_PATH_B, [_video_track(0, HDRType.NONE)]) + + panel.set_video_tracks([ + (dv_info, dv_entry, _COLOR), + (sdr_info, sdr_entry, _COLOR), + ]) + + first = panel._video_list.item(0) + second = panel._video_list.item(1) + assert first is not None + assert second is not None + assert "[dv]" in first.text().lower() + assert "[dv]" not in second.text().lower() + panel.close() + + def test_video_row_h264_codec_removes_dv_badge_for_dv_source(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-dv-h264" + info = _file_info(_PATH_A, [_video_track(0, HDRType.DOLBY_VISION)]) + panel.set_video_tracks([(info, entry, _COLOR)]) + + idx_x264 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx264" + ) + panel._codec_combo.setCurrentIndex(idx_x264) + row_item = panel._video_list.item(0) + assert row_item is not None + text = row_item.text().lower() + assert "[dv]" not in text + assert "[10+]" not in text + panel.close() + + def test_collect_config_keeps_dynamic_hdr_flags_scoped_per_track(self, qt_app): + panel = EncodePanel(AppConfig()) + panel.set_output_provider(lambda: Path("/tmp/out.mkv")) + dv_entry = _video_entry(0) + dv_entry.entry_id = "video-dv" + sdr_entry = _video_entry(0) + sdr_entry.entry_id = "video-sdr" + dv_info = _file_info(_PATH_A, [_video_track(0, HDRType.DOLBY_VISION)]) + sdr_info = _file_info(_PATH_B, [_video_track(0, HDRType.NONE)]) + panel.set_video_tracks([ + (dv_info, dv_entry, _COLOR), + (sdr_info, sdr_entry, _COLOR), + ]) + + cfg = panel.collect_config() + assert cfg is not None + by_entry = {str(track.track_entry_id): track for track in cfg.video_tracks} + assert by_entry["video-dv"].copy_dv is True + assert by_entry["video-sdr"].copy_dv is False + panel.close() + + def test_new_video_track_does_not_inherit_previous_track_settings_when_apply_all_disabled(self, qt_app): + panel = EncodePanel(AppConfig()) + panel.set_output_provider(lambda: Path("/tmp/out.mkv")) + first_entry = _video_entry(0) + first_entry.entry_id = "video-first" + second_entry = _video_entry(1) + second_entry.entry_id = "video-second" + info = _file_info( + _PATH_A, + [ + _video_track(0, HDRType.NONE), + _video_track(1, HDRType.NONE), + ], + ) + + panel.set_video_tracks([(info, first_entry, _COLOR)]) + panel._apply_all_video_cb.setChecked(False) + idx_x265 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx265" + ) + panel._codec_combo.setCurrentIndex(idx_x265) + + panel.set_video_tracks([ + (info, first_entry, _COLOR), + (info, second_entry, _COLOR), + ]) + + cfg = panel.collect_config() + assert cfg is not None + by_entry = {str(video.track_entry_id): video for video in cfg.video_tracks} + assert by_entry["video-first"].codec == "libx265" + assert by_entry["video-second"].codec == "copy" + panel.close() + + def test_new_video_track_inherits_settings_only_when_apply_all_and_non_copy(self, qt_app): + panel = EncodePanel(AppConfig()) + panel.set_output_provider(lambda: Path("/tmp/out.mkv")) + first_entry = _video_entry(0) + first_entry.entry_id = "video-first" + second_entry = _video_entry(1) + second_entry.entry_id = "video-second" + info = _file_info( + _PATH_A, + [ + _video_track(0, HDRType.NONE), + _video_track(1, HDRType.NONE), + ], + ) + + panel.set_video_tracks([(info, first_entry, _COLOR)]) + panel._apply_all_video_cb.setChecked(True) + idx_x265 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx265" + ) + panel._codec_combo.setCurrentIndex(idx_x265) + + panel.set_video_tracks([ + (info, first_entry, _COLOR), + (info, second_entry, _COLOR), + ]) + + cfg = panel.collect_config() + assert cfg is not None + by_entry = {str(video.track_entry_id): video for video in cfg.video_tracks} + assert by_entry["video-first"].codec == "libx265" + assert by_entry["video-second"].codec == "libx265" + panel.close() + + def test_video_row_shows_sdr_badge_when_tonemap_enabled(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-tonemap" + info = _file_info(_PATH_A, [_video_track(0, HDRType.HDR10)]) + panel.set_video_tracks([(info, entry, _COLOR)]) + + panel._tonemap_cb.setChecked(True) + row_item = panel._video_list.item(0) + assert row_item is not None + text = row_item.text().lower() + assert "[sdr]" in text + assert "[dv]" not in text + assert "[10+]" not in text + panel.close() + + def test_video_row_shows_hdr_badge_when_metadata_injection_enabled(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-hdr-injection" + info = _file_info(_PATH_A, [_video_track(0, HDRType.NONE)]) + panel.set_video_tracks([(info, entry, _COLOR)]) + + panel._inject_hdr_cb.setChecked(True) + row_item = panel._video_list.item(0) + assert row_item is not None + text = row_item.text().lower() + assert "[hdr]" in text + panel.close() + + def test_h264_precheck_forces_8bit_and_logs_switch(self, qt_app): + panel = EncodePanel(AppConfig()) + entry = _video_entry(0) + entry.entry_id = "video-h264-8bit" + info = _file_info(_PATH_A, [_video_track(0, HDRType.NONE, bit_depth=10)]) + logs: list[tuple[str, str]] = [] + panel.log_message.connect(lambda lvl, msg: logs.append((str(lvl), str(msg)))) + panel.set_video_tracks([(info, entry, _COLOR)]) + + idx_x264 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx264" + ) + panel._codec_combo.setCurrentIndex(idx_x264) + + row_item = panel._video_list.item(0) + assert row_item is not None + assert "[8-bit]" in row_item.text() + settings = panel._current_video_settings() + assert settings.force_8bit is True + assert any("bascule auto en 8-bit" in message for _lvl, message in logs) + + idx_x265 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx265" + ) + panel._codec_combo.setCurrentIndex(idx_x265) + settings = panel._current_video_settings() + assert settings.force_8bit is False + assert any("retour au mode source" in message for _lvl, message in logs) + panel.close() + + def test_h264_8bit_switch_is_per_track_only(self, qt_app): + panel = EncodePanel(AppConfig()) + panel.set_output_provider(lambda: Path("/tmp/out.mkv")) + first_entry = _video_entry(0) + first_entry.entry_id = "video-10bit" + second_entry = _video_entry(1) + second_entry.entry_id = "video-8bit" + info = _file_info( + _PATH_A, + [ + _video_track(0, HDRType.NONE, bit_depth=10), + _video_track(1, HDRType.NONE, bit_depth=8), + ], + ) + + panel.set_video_tracks([(info, first_entry, _COLOR), (info, second_entry, _COLOR)]) + panel._apply_all_video_cb.setChecked(False) + + idx_x264 = next( + i for i in range(panel._codec_combo.count()) + if panel._codec_combo.itemData(i) == "libx264" + ) + panel._video_list.setCurrentRow(0) + panel._codec_combo.setCurrentIndex(idx_x264) + panel._video_list.setCurrentRow(1) + panel._codec_combo.setCurrentIndex(idx_x264) + + cfg = panel.collect_config() + assert cfg is not None + by_entry = {str(video.track_entry_id): video for video in cfg.video_tracks} + assert by_entry["video-10bit"].force_8bit is True + assert by_entry["video-8bit"].force_8bit is False + panel.close() + + def test_dynamic_hdr_settings_are_independent_per_video_entry_when_apply_all_is_disabled(self, qt_app): panel = EncodePanel(AppConfig()) info = _file_info( _PATH_A, @@ -635,6 +916,7 @@ def test_manual_dynamic_hdr_choice_is_kept_per_video_entry(self, qt_app): (info, first_entry, _COLOR), (info, second_entry, _COLOR), ]) + panel._apply_all_video_cb.setChecked(False) panel._copy_dv_cb.setChecked(False) panel._video_list.setCurrentRow(1) @@ -704,7 +986,52 @@ def test_dynamic_hdr_flags_do_not_force_encode_when_video_is_copy(self, qt_app): copy_hdr10plus=True, ) - assert panel.is_pure_copy(config) is True + assert panel.is_pure_copy(cast(Any, config)) is True + panel.close() + + def test_is_pure_copy_is_false_if_any_video_track_requires_encode(self, qt_app): + panel = EncodePanel(AppConfig()) + config = SimpleNamespace( + video=SimpleNamespace(codec="copy", inject_hdr_meta=False, tonemap_to_sdr=False), + video_tracks=[ + SimpleNamespace(codec="copy", inject_hdr_meta=False, tonemap_to_sdr=False), + SimpleNamespace(codec="libx265", inject_hdr_meta=False, tonemap_to_sdr=False), + ], + audio_tracks=[SimpleNamespace(codec="copy")], + ) + + assert panel.is_pure_copy(cast(Any, config)) is False + panel.close() + + def test_collect_config_keeps_all_video_tracks_after_addition_in_per_track_mode(self, qt_app): + panel = EncodePanel(AppConfig()) + panel.set_output_provider(lambda: Path("/tmp/out.mkv")) + + info = _file_info( + _PATH_A, + [ + _video_track(0, HDRType.NONE), + _video_track(1, HDRType.NONE), + ], + ) + first_entry = _video_entry(0) + first_entry.entry_id = "video-first" + second_entry = _video_entry(1) + second_entry.entry_id = "video-second" + + panel.set_video_tracks([(info, first_entry, _COLOR)]) + panel._apply_all_video_cb.setChecked(False) + panel.set_video_tracks([ + (info, first_entry, _COLOR), + (info, second_entry, _COLOR), + ]) + + cfg = panel.collect_config() + assert cfg is not None + assert [video.track_entry_id for video in cfg.video_tracks] == [ + "video-first", + "video-second", + ] panel.close() @@ -723,6 +1050,6 @@ def fake_run(cfg, *, validate=True): monkeypatch.setattr(panel._workflow, "run", fake_run) - assert panel.run_operation(config) is expected_signals + assert panel.run_operation(cast(Any, config)) is expected_signals assert captured == {"config": config, "validate": False} panel.close() diff --git a/tests/test_encode_planning.py b/tests/test_encode_planning.py new file mode 100644 index 0000000..21a003c --- /dev/null +++ b/tests/test_encode_planning.py @@ -0,0 +1,522 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from core.workflows.encode.models import ( + AudioTrackSettings, + EncodeConfig, + EncodeError, + VideoEncodeSettings, +) +from core.workflows.encode.planning.encode_plan import build_encode_plan +from core.workflows.encode.planning.command_plan import build_encode_command_selection +from core.workflows.encode.planning.metadata_plan import ( + append_container_metadata_args, + build_container_metadata_plan, + container_chapter_map_value, + container_metadata_map_value, + materialize_container_metadata_inputs, + prepare_container_metadata_inputs, +) +from core.workflows.encode.planning.offsets import ( + build_offset_specs, + track_time_offset_lookup, + video_map_arg, +) +from core.workflows.encode.planning.track_assembly import ( + build_track_input_paths, + resolve_track_assembly, +) +from core.workflows.encode.planning.sources import resolve_source_layout +from core.workflows.encode.planning.subtitles import resolve_subtitle_tracks_for_encode +from core.workflows.encode.planning.sync_plan import ( + build_sync_analysis_plan, + build_probe_remux_config, + build_sync_mapped_tracks, + needs_strict_interleave_for_encode, + requires_file_sync_fallback_for_offsets, +) +from core.workflows.encode.planning.preview import format_preview_selection +from core.workflows.encode.runtime_helpers import EncodeSyncMappedTrack, EncodeSyncTrack +from core.workflows.common.track_types import TrackTimeOffset +from core.inspector import ChapterEntry + + +def _make_config(source: Path, output: Path, **kwargs) -> EncodeConfig: + cfg = EncodeConfig( + source=source, + output=output, + video=kwargs.pop("video", VideoEncodeSettings(codec="copy")), + ) + for key, value in kwargs.items(): + setattr(cfg, key, value) + return cfg + + +def test_resolve_source_layout_dedupes_all_encode_inputs(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + + cfg = _make_config( + src, + tmp_path / "out.mkv", + video=VideoEncodeSettings(codec="copy", source_path=alt), + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=alt)], + subtitle_tracks=[(src, 3), (alt, 4)], + attachment_streams=[(alt, 5)], + ) + + layout = resolve_source_layout(cfg) + + assert layout.sources == (src, alt) + assert layout.source_idx == {src: 0, alt: 1} + + +def test_build_encode_plan_aggregates_static_encode_resolution(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + video=VideoEncodeSettings(codec="copy", source_path=alt, stream_index=2), + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=src)], + subtitle_tracks=[(alt, 4)], + track_time_offsets=[ + TrackTimeOffset(track_type="audio", source_path=src, stream_index=1, offset_ms=120), + ], + ) + + plan = build_encode_plan( + cfg, + probe_subtitle_indices=lambda *_args: pytest.fail("explicit subtitles should skip probe"), + resolve_global_tags=lambda _cfg: {"title": "Titre", "GENRE": "Drama"}, + video_tracks=lambda config: list(config.video_tracks) if config.video_tracks else [config.video], + video_source_from_settings=lambda config, video: video.source_path or config.source, + video_source_path=lambda config: config.video.source_path or config.source, + video_stream_index=lambda config: config.video.stream_index, + video_map_key=lambda config: (Path(config.video.source_path or config.source), int(config.video.stream_index), "video"), + ) + + assert plan.all_sources == (src, alt) + assert dict(plan.source_idx) == {src: 0, alt: 1} + assert plan.resolved_subtitle_tracks == ((alt, 4),) + assert plan.subtitles_resolved is True + assert plan.video_source == alt + assert plan.video_stream == 2 + assert plan.video_default_map == (1, 2) + assert len(plan.video_tracks) == 1 + assert plan.video_tracks[0].source == alt + assert plan.video_tracks[0].stream_index == 2 + assert plan.video_tracks[0].codec == "copy" + assert plan.sync_analysis.enabled is True + assert plan.sync_analysis.needs_subtitle_prescan is True + assert plan.sync_analysis.probe_remux_config is not None + assert dict(plan.offset_lookup) == {("audio", src, 1): 120} + assert plan.container_metadata.tag_source is None + assert dict(plan.container_metadata.global_tags) == {"title": "Titre", "GENRE": "Drama"} + + +def test_resolve_subtitle_tracks_for_encode_dedupes_explicit_tracks(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + subtitle_tracks=[(src, 3), (src, 3), (src, 4)], + copy_subtitles=True, + ) + + resolved = resolve_subtitle_tracks_for_encode( + cfg, + [src], + probe_indices=lambda *_args: pytest.fail("probe should not be used"), + ) + + assert resolved.complete is True + assert resolved.tracks == ((src, 3), (src, 4)) + + +def test_resolve_subtitle_tracks_for_encode_marks_incomplete_when_probe_fails(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config(src, tmp_path / "out.mkv", subtitle_tracks=[], copy_subtitles=True) + + resolved = resolve_subtitle_tracks_for_encode( + cfg, + [src], + probe_indices=lambda *_args: None, + ) + + assert resolved.complete is False + assert resolved.tracks == () + + +def test_build_offset_specs_rejects_negative_video_offsets(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + track_time_offsets=[ + TrackTimeOffset(track_type="video", source_path=src, stream_index=0, offset_ms=-100), + ], + ) + + with pytest.raises(EncodeError): + build_offset_specs( + cfg, + track_mappings=[((src, 0, "video"), src, 0)], + ) + + +def test_build_offset_specs_and_video_map_arg_use_lookup(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + track_time_offsets=[ + TrackTimeOffset(track_type="audio", source_path=src, stream_index=1, offset_ms=125), + ], + ) + + lookup = track_time_offset_lookup(cfg) + specs = build_offset_specs( + cfg, + track_mappings=[((src, 1, "audio"), src, 1)], + offset_lookup=lookup, + ) + + assert len(specs) == 1 + assert specs[0].offset_ms == 125 + assert video_map_arg((0, 0), offset_remap={}, map_key=(src, 0, "video")) == "0:v:0" + assert video_map_arg((0, 0), offset_remap={(src, 0, "video"): (2, 0)}, map_key=(src, 0, "video")) == "2:0" + + +def test_prepare_container_metadata_inputs_materializes_chapters_and_extra_tag_input(tmp_path): + src = tmp_path / "src.mkv" + tag = tmp_path / "tag.mkv" + src.touch() + tag.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + chapter_overrides=[ChapterEntry(timecode_s=0.0, name="Intro")], + tag_sources=[tag], + ) + cmd: list[str] = [] + expected_chapter_file = tmp_path / "chapters_1_123.ffmeta" + + planned = prepare_container_metadata_inputs( + cmd, + cfg, + source_idx={src: 0}, + next_input_index=1, + chapter_materialize_dir=tmp_path, + probe_duration_seconds=lambda path: 123.0 if path == src else None, + write_ffmetadata_chapters=lambda entries, out_dir, duration_s: out_dir / f"chapters_{len(entries)}_{int(duration_s or 0)}.ffmeta", + ) + + assert planned.chapter_input_index == 1 + assert planned.tag_input_index == 2 + assert planned.next_input_index == 3 + assert cmd == ["-i", str(expected_chapter_file), "-i", str(tag)] + + +def test_materialize_container_metadata_inputs_returns_args_and_indices(tmp_path): + src = tmp_path / "src.mkv" + tag = tmp_path / "tag.mkv" + src.touch() + tag.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + chapter_overrides=[ChapterEntry(timecode_s=0.0, name="Intro")], + tag_sources=[tag], + ) + + materialized = materialize_container_metadata_inputs( + cfg, + source_idx={src: 0}, + next_input_index=1, + container_metadata_plan=build_container_metadata_plan( + cfg, + resolve_global_tags=lambda _cfg: {"title": "Titre"}, + ), + chapter_materialize_dir=tmp_path, + probe_duration_seconds=lambda _path: 123.0, + write_ffmetadata_chapters=lambda entries, out_dir, duration_s: out_dir / f"chapters_{len(entries)}_{int(duration_s or 0)}.ffmeta", + ) + + assert materialized.chapter_input_index == 1 + assert materialized.tag_input_index == 2 + assert materialized.next_input_index == 3 + assert materialized.input_args == ( + "-i", + str(tmp_path / "chapters_1_123.ffmeta"), + "-i", + str(tag), + ) + + +def test_append_container_metadata_args_applies_maps_and_tags(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + tag_overrides={"GENRE": "Drama"}, + file_title="Titre", + ) + cmd: list[str] = [] + + append_container_metadata_args( + cmd, + cfg, + default_metadata_input_index=0, + default_chapter_input_index=0, + chapter_input_index=None, + tag_input_index=None, + include_copy_video_stream_passthrough=True, + is_video_passthrough=lambda _cfg: True, + resolve_global_tags=lambda _cfg: {"title": "Titre", "GENRE": "Drama"}, + build_track_meta_args=lambda _cfg: ["-metadata:s:a:0", "language=fre"], + ) + + assert cmd[:4] == ["-map_metadata", "0", "-map_metadata:s:v:0", "0:s:v:0"] + assert "-map_chapters" in cmd and cmd[cmd.index("-map_chapters") + 1] == "0" + assert "title=Titre" in cmd + assert "GENRE=Drama" in cmd + assert cmd[-2:] == ["-metadata:s:a:0", "language=fre"] + + +def test_container_metadata_helpers_keep_existing_behavior(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + tag_overrides={}, + chapter_overrides=[], + ) + + chapter_map = container_chapter_map_value( + cfg, + default_chapter_input_index=0, + chapter_input_index=None, + ) + metadata_map = container_metadata_map_value( + cfg, + default_metadata_input_index=0, + chapter_input_index=None, + tag_input_index=None, + include_copy_video_stream_passthrough=False, + is_video_passthrough=lambda _cfg: False, + chapter_map=chapter_map, + ) + + assert chapter_map == "-1" + assert metadata_map == "-1" + + +def test_build_sync_mapped_tracks_keeps_source_order(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=alt)], + ) + + mapped = build_sync_mapped_tracks( + cfg, + {src: 0, alt: 1}, + [(src, 3)], + ) + + assert [(track.source_file_index, track.stream_index, track.track.track_type) for track in mapped] == [ + (0, 0, "video"), + (1, 1, "audio"), + (0, 3, "subtitle"), + ] + + +def test_build_probe_remux_config_reuses_resolved_sync_tracks(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=alt)], + keep_chapters=False, + ) + + remux_cfg = build_probe_remux_config( + cfg, + [src, alt], + {src: 0, alt: 1}, + [(src, 3)], + ) + + assert [source.path for source in remux_cfg.sources] == [src, alt] + assert [(track.track_type, track.mkv_tid) for track in remux_cfg.sources[0].tracks] == [("video", 0), ("subtitle", 3)] + assert [(track.track_type, track.mkv_tid) for track in remux_cfg.sources[1].tracks] == [("audio", 1)] + assert remux_cfg.keep_chapters is False + + +def test_requires_file_sync_fallback_for_offsets_detects_foreign_audio_offset(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + cfg = _make_config(src, tmp_path / "out.mkv") + mapped_tracks = [ + EncodeSyncMappedTrack(source_file_index=0, stream_index=0, track=EncodeSyncTrack("video")), + EncodeSyncMappedTrack(source_file_index=1, stream_index=2, track=EncodeSyncTrack("audio")), + ] + + requires_fallback = requires_file_sync_fallback_for_offsets( + cfg, + mapped_tracks, + {0: src, 1: alt}, + track_offset_ms=lambda lookup, **kwargs: lookup.get((kwargs["track_type"], kwargs["source_path"], kwargs["stream_index"]), 0), + offset_lookup={("audio", alt, 2): 120}, + ) + + assert requires_fallback is True + + +def test_needs_strict_interleave_for_encode_tracks_foreign_audio(): + mapped_tracks = [ + EncodeSyncMappedTrack(source_file_index=0, stream_index=0, track=EncodeSyncTrack("video")), + EncodeSyncMappedTrack(source_file_index=1, stream_index=1, track=EncodeSyncTrack("audio")), + EncodeSyncMappedTrack(source_file_index=0, stream_index=3, track=EncodeSyncTrack("subtitle")), + ] + + assert needs_strict_interleave_for_encode(mapped_tracks) is True + + +def test_build_sync_analysis_plan_marks_foreign_offset_as_file_fallback(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + src.touch() + alt.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=alt)], + subtitle_tracks=[(src, 3)], + track_time_offsets=[ + TrackTimeOffset(track_type="audio", source_path=alt, stream_index=1, offset_ms=125), + ], + ) + + analysis = build_sync_analysis_plan( + cfg, + [src, alt], + {src: 0, alt: 1}, + [(src, 3)], + subtitles_resolved=True, + offset_lookup=track_time_offset_lookup(cfg), + ) + + assert analysis.enabled is True + assert analysis.offset_requires_file_fallback is True + assert analysis.strict_interleave_without_prescan is True + assert analysis.allow_live_sync is False + assert analysis.probe_remux_config is None + + +def test_resolve_track_assembly_reuses_sync_and_video_maps(tmp_path): + src = tmp_path / "src.mkv" + alt = tmp_path / "alt.mkv" + sync_audio = tmp_path / "sync_audio.mka" + src.touch() + alt.touch() + sync_audio.touch() + cfg = _make_config( + src, + tmp_path / "out.mkv", + video=VideoEncodeSettings(codec="copy", source_path=alt, stream_index=2), + audio_tracks=[AudioTrackSettings(stream_index=1, codec="copy", source_path=alt)], + subtitle_tracks=[(src, 3)], + ) + plan = build_encode_plan( + cfg, + probe_subtitle_indices=lambda *_args: pytest.fail("explicit subtitles should skip probe"), + resolve_global_tags=lambda _cfg: {}, + video_tracks=lambda config: list(config.video_tracks) if config.video_tracks else [config.video], + video_source_from_settings=lambda config, video: video.source_path or config.source, + video_source_path=lambda config: config.video.source_path or config.source, + video_stream_index=lambda config: config.video.stream_index, + video_map_key=lambda config: (Path(config.video.source_path or config.source), int(config.video.stream_index), "video"), + ) + + assembly = resolve_track_assembly( + cfg, + plan, + source_idx={src: 0, alt: 1}, + track_input_paths=build_track_input_paths(all_sources=[src, alt], sync_inputs=[sync_audio]), + sync_remap={(alt, 1, "audio"): (2, 0)}, + ) + + assert assembly.video_map == (1, 2) + assert ((alt, 2, "video"), alt, 2) in assembly.track_mappings + assert ((alt, 1, "audio"), sync_audio, 0) in assembly.track_mappings + assert ((src, 3, "subtitle"), src, 3) in assembly.track_mappings + + +def test_build_encode_command_selection_picks_pass2_for_preview(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cfg = _make_config(src, tmp_path / "out.mkv") + plan = build_encode_plan( + cfg, + probe_subtitle_indices=lambda *_args: [], + resolve_global_tags=lambda _cfg: {}, + video_tracks=lambda config: list(config.video_tracks) if config.video_tracks else [config.video], + video_source_from_settings=lambda config, video: video.source_path or config.source, + video_source_path=lambda config: config.video.source_path or config.source, + video_stream_index=lambda config: config.video.stream_index, + video_map_key=lambda config: (Path(config.video.source_path or config.source), int(config.video.stream_index), "video"), + ) + + selection = build_encode_command_selection( + cfg, + plan=plan, + is_multi_video=lambda _cfg: False, + uses_two_pass=lambda _cfg: True, + build_multi_video_preview=lambda *_args, **_kwargs: pytest.fail("multi-video path should not be used"), + build_two_pass=lambda *_args, **_kwargs: [["ffmpeg", "-pass", "1"], ["ffmpeg", "-pass", "2", "out.mkv"]], + build_single_pass=lambda *_args, **_kwargs: pytest.fail("single-pass path should not be used"), + ) + + assert selection.is_two_pass is True + assert selection.is_multi_video is False + assert selection.preview_command == ("ffmpeg", "-pass", "2", "out.mkv") + + +def test_format_preview_selection_adds_two_pass_prefix(): + from core.workflows.encode.planning.plan_models import EncodeCommandSelection + + selection = EncodeCommandSelection( + commands=(("ffmpeg", "-pass", "1"), ("ffmpeg", "-pass", "2", "out.mkv")), + preview_index=1, + is_multi_video=False, + is_two_pass=True, + ) + + preview = format_preview_selection(selection) + + assert preview.startswith("# Mode taille cible") + assert "-pass 2" in preview diff --git a/tests/test_encode_runtime_services.py b/tests/test_encode_runtime_services.py new file mode 100644 index 0000000..126e9c1 --- /dev/null +++ b/tests/test_encode_runtime_services.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +import os +from pathlib import Path +from types import SimpleNamespace +from typing import Any, cast + +import pytest + +from core.workflows.encode.models import AudioTrackSettings, EncodeConfig, EncodeError, QualityMode, VideoEncodeSettings +from core.workflows.encode.runtime.attachment_preparation import ( + AttachmentPreparationService, + AttachmentPreparationServiceCallbacks, +) +from core.workflows.encode.runtime.storage_guard import ( + ensure_inject_storage_available, + estimate_inject_storage_requirements, +) + + +def _make_config(source: Path, output: Path, **kwargs) -> EncodeConfig: + config = EncodeConfig( + source=source, + output=output, + video=kwargs.pop( + "video", + VideoEncodeSettings(codec="libx265", quality_mode=QualityMode.BITRATE, bitrate_kbps=8_000), + ), + ) + for key, value in kwargs.items(): + setattr(config, key, value) + return config + + +def test_estimate_inject_storage_requirements_counts_audio_and_attachments(tmp_path): + src = tmp_path / "src.mkv" + src.write_bytes(b"\x00" * 200_000) + attachment = tmp_path / "cover.jpg" + attachment.write_bytes(b"\x01" * 4_096) + cfg = _make_config( + src, + tmp_path / "out.mkv", + audio_tracks=[AudioTrackSettings(stream_index=1, codec="aac", bitrate_kbps=192)], + extra_attachments=[attachment], + ) + + work_required, output_required = estimate_inject_storage_requirements( + cfg, + probe_duration_seconds=lambda _source: 10.0, + size_to_bitrate_kbps=lambda _config: pytest.fail("size mode should not be used"), + ) + + assert work_required == (2 * 128 * 1024 * 1024) + (32 * 1024 * 1024) + assert output_required == (128 * 1024 * 1024) + (64 * 1024 * 1024) + 240_000 + 4_096 + + +def test_ensure_inject_storage_available_raises_on_same_fs_shortfall(tmp_path): + src = tmp_path / "src.mkv" + src.write_bytes(b"\x00") + cfg = _make_config(src, tmp_path / "out.mkv", work_dir=tmp_path / "work") + logs: list[str] = [] + + with pytest.raises(EncodeError, match="Espace disque insuffisant"): + ensure_inject_storage_available( + cfg, + estimate_requirements=lambda _config: (10_000, 20_000), + log_info=logs.append, + ram_buffer_enabled=False, + ram_buffer_dir=lambda: None, + disk_usage=cast(Any, lambda _path: SimpleNamespace(total=100_000, used=90_000, free=25_000)), + stat=cast(Any, lambda _path: SimpleNamespace(st_dev=42)), + temp_dir=lambda: str(tmp_path / "tmp"), + ) + + assert any("Estimation espace injection" in message for message in logs) + + +def test_attachment_preparation_service_extracts_only_attached_pics(tmp_path): + src = tmp_path / "src.mkv" + src.touch() + cover = tmp_path / "cover.png" + font = tmp_path / "font.ttf" + extracted: list[tuple[Path, int, Path]] = [] + cfg = _make_config( + src, + tmp_path / "out.mkv", + attachment_streams=[(src, 5), (src, 6)], + extra_attachments=[font], + ) + + def _extract_attached_pic(source: Path, stream_idx: int, dest: Path, _signals: Any) -> None: + extracted.append((source, stream_idx, dest)) + dest.write_bytes(b"img") + + service = AttachmentPreparationService( + AttachmentPreparationServiceCallbacks( + check_cancelled=lambda _signals: None, + describe_attachment_stream=lambda _source, stream_idx: ( + {"is_attached_pic": True, "filename": "cover.png", "mimetype": "image/png"} + if stream_idx == 5 + else {"is_attached_pic": False, "filename": "font.ttf", "mimetype": "font/ttf"} + ), + attachment_filename=lambda meta, _stream_idx: str(meta["filename"]), + unique_attachment_path=lambda tmp_dir, filename: tmp_dir / filename, + extract_attached_pic=_extract_attached_pic, + ) + ) + + prepared, cleanup_dir = service.prepare(cfg, work_dir=tmp_path) + + assert cleanup_dir is not None + assert prepared.attachment_streams == [(src, 6)] + assert prepared.extra_attachments[0].name == "cover.png" + assert prepared.extra_attachments[1] == font + assert extracted == [(src, 5, Path(cleanup_dir) / "cover.png")] + assert (Path(cleanup_dir) / "cover.png").exists() diff --git a/tests/test_encode_workflow.py b/tests/test_encode_workflow.py index 50ae30a..2daed2e 100644 --- a/tests/test_encode_workflow.py +++ b/tests/test_encode_workflow.py @@ -105,9 +105,13 @@ from unittest.mock import MagicMock, patch, call import tempfile import os +from typing import Any, cast import pytest from PySide6.QtCore import QCoreApplication, Qt +import core.workflows.encode.workflow as encode_workflow_mod +from core.workflows.encode.runtime import ram_buffer as _ram_buffer_mod +from core.workflows.encode.domain.codecs import hdr_meta_args as _hdr_meta_args _app: QCoreApplication | None = None @@ -153,7 +157,7 @@ def _make_video_settings(**kw) -> VideoEncodeSettings: max_cll="", tonemap_to_sdr=False, tonemap_algorithm="hable", ) defaults.update(kw) - return VideoEncodeSettings(**defaults) + return VideoEncodeSettings(**cast(Any, defaults)) def _make_config(source: Path, output: Path, **kw) -> EncodeConfig: @@ -163,16 +167,27 @@ def _make_config(source: Path, output: Path, **kw) -> EncodeConfig: copy_dv=False, copy_hdr10plus=False, dovi_profile="0", work_dir=None, ) defaults.update(kw) - return EncodeConfig(**defaults) + return EncodeConfig(**cast(Any, defaults)) -def _make_workflow(enabled=True, threshold=15, ffmpeg_threads=None) -> EncodeWorkflow: +def _as_single_command(cmd: list[str] | list[list[str]]) -> list[str]: + assert cmd and isinstance(cmd[0], str) + return cast(list[str], cmd) + + +def _make_workflow( + enabled=True, + threshold=15, + ffmpeg_threads=None, + max_parallel_video_encodes=1, +) -> EncodeWorkflow: return EncodeWorkflow( ffmpeg_bin="ffmpeg", dovi_tool_bin="dovi_tool", hdr10plus_bin="hdr10plus_tool", ram_buffer_enabled=enabled, ram_buffer_threshold_pct=threshold, ffmpeg_threads=ffmpeg_threads, + max_parallel_video_encodes=max_parallel_video_encodes, ) @@ -212,6 +227,147 @@ def test_two_pass_build_includes_threads_on_both_passes(self, tmp_path): assert cmds[1][cmds[1].index("-threads") + 1] == "14" +class TestMaxParallelVideoEncodes: + + def test_default_is_one(self): + wf = _make_workflow() + assert wf._max_parallel_video_encodes == 1 + + def test_setter_normalizes_to_minimum_one(self): + wf = _make_workflow() + wf.set_max_parallel_video_encodes(4) + assert wf._max_parallel_video_encodes == 4 + wf.set_max_parallel_video_encodes(0) + assert wf._max_parallel_video_encodes == 1 + + +class TestVideoTrackPreparationOrchestrator: + + def test_ram_guard_serializes_disjoint_resources_when_budget_is_tight(self): + available_ram = [10] + first_started = threading.Event() + second_started = threading.Event() + release_first = threading.Event() + active = 0 + max_active = 0 + gate = threading.Lock() + results: list[tuple[int, dict[str, object], list[Path]]] = [] + + def _task(order: int): + def _run(): + nonlocal active, max_active + with gate: + active += 1 + max_active = max(max_active, active) + try: + if order == 0: + first_started.set() + available_ram[0] = 5 + assert release_first.wait(1.0) + available_ram[0] = 10 + else: + second_started.set() + prepared: dict[str, object] = {"path": Path(f"/tmp/video_{order}.mkv")} + cleanup: list[Path] = [] + return prepared, cleanup + finally: + with gate: + active -= 1 + return _run + + orchestrator = encode_workflow_mod._VideoTrackPreparationOrchestrator( + max_parallel=2, + cancel_cb=lambda: None, + min_available_ram_bytes=4, + available_ram_cb=lambda: available_ram[0], + ) + tasks = [ + encode_workflow_mod._VideoTrackPrepTask( + order=0, + resource_key="cpu", + estimated_ram_bytes=3, + run=_task(0), + ), + encode_workflow_mod._VideoTrackPrepTask( + order=1, + resource_key="gpu:nvenc", + estimated_ram_bytes=3, + run=_task(1), + ), + ] + + worker = threading.Thread(target=lambda: results.extend(orchestrator.execute(tasks))) + worker.start() + assert first_started.wait(1.0) + time.sleep(0.1) + assert not second_started.is_set() + release_first.set() + worker.join(1.0) + + assert not worker.is_alive() + assert second_started.is_set() + assert max_active == 1 + assert [order for order, _prepared, _cleanup in sorted(results)] == [0, 1] + + def test_ram_guard_allows_overlap_when_budget_is_sufficient(self): + available_ram = [20] + both_started = threading.Event() + release_tasks = threading.Event() + active = 0 + max_active = 0 + gate = threading.Lock() + results: list[tuple[int, dict[str, object], list[Path]]] = [] + + def _task(order: int): + def _run(): + nonlocal active, max_active + with gate: + active += 1 + max_active = max(max_active, active) + if active >= 2: + both_started.set() + try: + assert release_tasks.wait(1.0) + prepared: dict[str, object] = {"path": Path(f"/tmp/video_{order}.mkv")} + cleanup: list[Path] = [] + return prepared, cleanup + finally: + with gate: + active -= 1 + return _run + + orchestrator = encode_workflow_mod._VideoTrackPreparationOrchestrator( + max_parallel=2, + cancel_cb=lambda: None, + min_available_ram_bytes=4, + available_ram_cb=lambda: available_ram[0], + ) + tasks = [ + encode_workflow_mod._VideoTrackPrepTask( + order=0, + resource_key="cpu", + estimated_ram_bytes=3, + run=_task(0), + ), + encode_workflow_mod._VideoTrackPrepTask( + order=1, + resource_key="gpu:nvenc", + estimated_ram_bytes=3, + run=_task(1), + ), + ] + + worker = threading.Thread(target=lambda: results.extend(orchestrator.execute(tasks))) + worker.start() + assert both_started.wait(1.0) + release_tasks.set() + worker.join(1.0) + + assert not worker.is_alive() + assert max_active >= 2 + assert [order for order, _prepared, _cleanup in sorted(results)] == [0, 1] + + class TestFfmpegProgressArgs: def test_single_pass_build_includes_machine_progress_flags(self, tmp_path): @@ -252,6 +408,55 @@ def test_video_only_build_includes_machine_progress_flags(self, tmp_path): assert cmd[cmd.index("-progress") + 1] == "pipe:1" assert "-nostats" in cmd + +class TestTwoPassPerTrackPasslogfile: + + def test_multi_video_track_two_pass_uses_track_specific_passlogfile(self, tmp_path): + src = tmp_path / "source.mkv" + src.touch() + out = tmp_path / "video_1.mkv" + passlog_prefix = tmp_path / "ffmpeg2pass-video_1" + wf = _make_workflow() + video = _make_video_settings(codec="libx265", quality_mode=QualityMode.SIZE) + cfg = _make_config(src, tmp_path / "out.mkv", video=video, duration_s=3600.0) + + cmds = wf._build_multi_video_track_encode_commands( + cfg, + video, + src, + out, + passlog_prefix=passlog_prefix, + ) + + assert len(cmds) == 2 + assert "-passlogfile" in cmds[0] + assert cmds[0][cmds[0].index("-passlogfile") + 1] == str(passlog_prefix) + assert "-passlogfile" in cmds[1] + assert cmds[1][cmds[1].index("-passlogfile") + 1] == str(passlog_prefix) + + def test_video_only_two_pass_for_track_uses_track_specific_passlogfile(self, tmp_path): + src = tmp_path / "source.mkv" + src.touch() + out = tmp_path / "video_1.hevc" + passlog_prefix = tmp_path / "ffmpeg2pass-video_1" + wf = _make_workflow() + video = _make_video_settings(codec="libx265", quality_mode=QualityMode.SIZE) + cfg = _make_config(src, tmp_path / "out.mkv", video=video, duration_s=3600.0) + + cmds = wf._build_video_only_two_pass_for_track( + cfg, + video, + src, + out, + passlog_prefix=passlog_prefix, + ) + + assert len(cmds) == 2 + assert "-passlogfile" in cmds[0] + assert cmds[0][cmds[0].index("-passlogfile") + 1] == str(passlog_prefix) + assert "-passlogfile" in cmds[1] + assert cmds[1][cmds[1].index("-passlogfile") + 1] == str(passlog_prefix) + # =========================================================================== # _total_ram_bytes — cross-platform # =========================================================================== @@ -263,49 +468,49 @@ def test_linux_reads_memtotal(self): fake = "MemTotal: 16384000 kB\nMemFree: 4096000 kB\n" with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", return_value=fake): - assert EncodeWorkflow._total_ram_bytes() == 16_384_000 * 1024 + assert _ram_buffer_mod.total_ram_bytes() == 16_384_000 * 1024 def test_linux_returns_zero_if_memtotal_absent(self): fake = "MemFree: 4096000 kB\n" with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", return_value=fake): - assert EncodeWorkflow._total_ram_bytes() == 0 + assert _ram_buffer_mod.total_ram_bytes() == 0 def test_linux_returns_zero_on_ioerror(self): with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", side_effect=OSError): - assert EncodeWorkflow._total_ram_bytes() == 0 + assert _ram_buffer_mod.total_ram_bytes() == 0 def test_macos_parses_sysctl(self): """macOS : sysctl hw.memsize retourne RAM totale en octets.""" with patch("sys.platform", "darwin"), \ patch("subprocess.run") as mock_run: mock_run.return_value = MagicMock(returncode=0, stdout="17179869184\n") - assert EncodeWorkflow._total_ram_bytes() == 17_179_869_184 + assert _ram_buffer_mod.total_ram_bytes() == 17_179_869_184 def test_macos_returns_zero_on_sysctl_failure(self): with patch("sys.platform", "darwin"), \ patch("subprocess.run") as mock_run: mock_run.return_value = MagicMock(returncode=1, stdout="") - assert EncodeWorkflow._total_ram_bytes() == 0 + assert _ram_buffer_mod.total_ram_bytes() == 0 def test_windows_uses_ctypes(self): """Windows : lit ullTotalPhys via GlobalMemoryStatusEx.""" fake_stat = MagicMock() fake_stat.ullTotalPhys = 17_179_869_184 with patch("sys.platform", "win32"), \ - patch.object(EncodeWorkflow, "_win_mem_status", return_value=fake_stat): - assert EncodeWorkflow._total_ram_bytes() == 17_179_869_184 + patch.object(_ram_buffer_mod, "_win_mem_status", return_value=fake_stat): + assert _ram_buffer_mod.total_ram_bytes() == 17_179_869_184 def test_windows_returns_zero_if_ctypes_unavailable(self): """Windows : fallback à 0 si ctypes/_ctypes est indisponible.""" with patch("sys.platform", "win32"), \ - patch.object(EncodeWorkflow, "_win_mem_status", side_effect=ImportError): - assert EncodeWorkflow._total_ram_bytes() == 0 + patch.object(_ram_buffer_mod, "_win_mem_status", side_effect=ImportError): + assert _ram_buffer_mod.total_ram_bytes() == 0 def test_unknown_platform_returns_zero(self): with patch("sys.platform", "freebsd"): - assert EncodeWorkflow._total_ram_bytes() == 0 + assert _ram_buffer_mod.total_ram_bytes() == 0 # =========================================================================== @@ -319,46 +524,46 @@ def test_linux_reads_memavailable(self): fake = "MemTotal: 16384000 kB\nMemAvailable: 8192000 kB\n" with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", return_value=fake): - assert EncodeWorkflow._available_ram_bytes() == 8_192_000 * 1024 + assert _ram_buffer_mod.available_ram_bytes() == 8_192_000 * 1024 def test_linux_returns_zero_when_memavailable_absent(self): fake = "MemTotal: 16384000 kB\nMemFree: 4096000 kB\n" with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", return_value=fake): - assert EncodeWorkflow._available_ram_bytes() == 0 + assert _ram_buffer_mod.available_ram_bytes() == 0 def test_linux_returns_zero_on_ioerror(self): with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", side_effect=OSError): - assert EncodeWorkflow._available_ram_bytes() == 0 + assert _ram_buffer_mod.available_ram_bytes() == 0 def test_linux_parses_large_values(self): fake = "MemAvailable: 67108864 kB\n" with patch("sys.platform", "linux"), \ patch.object(Path, "read_text", return_value=fake): - assert EncodeWorkflow._available_ram_bytes() == 67_108_864 * 1024 + assert _ram_buffer_mod.available_ram_bytes() == 67_108_864 * 1024 def test_macos_delegates_to_macos_available_ram(self): """macOS : délègue à _macos_available_ram.""" with patch("sys.platform", "darwin"), \ - patch.object(EncodeWorkflow, "_macos_available_ram", return_value=4_000_000_000): - assert EncodeWorkflow._available_ram_bytes() == 4_000_000_000 + patch.object(_ram_buffer_mod, "macos_available_ram", return_value=4_000_000_000): + assert _ram_buffer_mod.available_ram_bytes() == 4_000_000_000 def test_windows_reads_ullavailphys(self): fake_stat = MagicMock() fake_stat.ullAvailPhys = 8_589_934_592 with patch("sys.platform", "win32"), \ - patch.object(EncodeWorkflow, "_win_mem_status", return_value=fake_stat): - assert EncodeWorkflow._available_ram_bytes() == 8_589_934_592 + patch.object(_ram_buffer_mod, "_win_mem_status", return_value=fake_stat): + assert _ram_buffer_mod.available_ram_bytes() == 8_589_934_592 def test_windows_available_returns_zero_if_ctypes_unavailable(self): with patch("sys.platform", "win32"), \ - patch.object(EncodeWorkflow, "_win_mem_status", side_effect=ImportError): - assert EncodeWorkflow._available_ram_bytes() == 0 + patch.object(_ram_buffer_mod, "_win_mem_status", side_effect=ImportError): + assert _ram_buffer_mod.available_ram_bytes() == 0 def test_unknown_platform_returns_zero(self): with patch("sys.platform", "freebsd"): - assert EncodeWorkflow._available_ram_bytes() == 0 + assert _ram_buffer_mod.available_ram_bytes() == 0 # =========================================================================== @@ -382,14 +587,14 @@ def test_parses_free_inactive_speculative_purgeable(self): """Additionne free+inactive+speculative+purgeable avec la bonne taille de page.""" with patch("subprocess.run") as mock_run: mock_run.return_value = MagicMock(returncode=0, stdout=self._VM_STAT_SAMPLE) - result = EncodeWorkflow._macos_available_ram() + result = _ram_buffer_mod.macos_available_ram() expected = (1234 + 2000 + 500 + 300) * 16384 assert result == expected def test_returns_zero_on_subprocess_failure(self): with patch("subprocess.run") as mock_run: mock_run.return_value = MagicMock(returncode=1, stdout="") - assert EncodeWorkflow._macos_available_ram() == 0 + assert _ram_buffer_mod.macos_available_ram() == 0 def test_uses_default_page_size_4096_when_not_parseable(self): """Taille de page par défaut = 4096 si non parseable.""" @@ -401,7 +606,7 @@ def test_uses_default_page_size_4096_when_not_parseable(self): ) with patch("subprocess.run") as mock_run: mock_run.return_value = MagicMock(returncode=0, stdout=vm_stat_no_pagesize) - result = EncodeWorkflow._macos_available_ram() + result = _ram_buffer_mod.macos_available_ram() assert result == (1000 + 500 + 200 + 100) * 4096 @@ -415,33 +620,33 @@ def test_linux_returns_dev_shm_when_writable(self): with patch("sys.platform", "linux"), \ patch.object(Path, "is_dir", return_value=True), \ patch("os.access", return_value=True): - assert EncodeWorkflow._ram_buffer_dir() == Path("/dev/shm") + assert _ram_buffer_mod.ram_buffer_dir() == Path("/dev/shm") def test_linux_returns_none_when_shm_not_dir(self): with patch("sys.platform", "linux"), \ patch.object(Path, "is_dir", return_value=False): - assert EncodeWorkflow._ram_buffer_dir() is None + assert _ram_buffer_mod.ram_buffer_dir() is None def test_linux_returns_none_when_shm_not_writable(self): with patch("sys.platform", "linux"), \ patch.object(Path, "is_dir", return_value=True), \ patch("os.access", return_value=False): - assert EncodeWorkflow._ram_buffer_dir() is None + assert _ram_buffer_mod.ram_buffer_dir() is None def test_macos_returns_dev_shm_when_available(self): with patch("sys.platform", "darwin"), \ patch.object(Path, "is_dir", return_value=True), \ patch("os.access", return_value=True): - assert EncodeWorkflow._ram_buffer_dir() == Path("/dev/shm") + assert _ram_buffer_mod.ram_buffer_dir() == Path("/dev/shm") def test_windows_always_returns_none(self): """Windows n'a pas de répertoire RAM standard.""" with patch("sys.platform", "win32"): - assert EncodeWorkflow._ram_buffer_dir() is None + assert _ram_buffer_mod.ram_buffer_dir() is None def test_unknown_platform_returns_none(self): with patch("sys.platform", "freebsd11"): - assert EncodeWorkflow._ram_buffer_dir() is None + assert _ram_buffer_mod.ram_buffer_dir() is None # =========================================================================== @@ -459,9 +664,9 @@ def _wf(self, enabled=True, threshold=15) -> EncodeWorkflow: def _patch_ram(self, available: int, total: int) -> ExitStack: stack = ExitStack() - stack.enter_context(patch.object(EncodeWorkflow, "_available_ram_bytes", return_value=available)) - stack.enter_context(patch.object(EncodeWorkflow, "_total_ram_bytes", return_value=total)) - stack.enter_context(patch.object(EncodeWorkflow, "_ram_buffer_dir", return_value=Path("/dev/shm"))) + stack.enter_context(patch.object(_ram_buffer_mod, "available_ram_bytes", return_value=available)) + stack.enter_context(patch.object(_ram_buffer_mod, "total_ram_bytes", return_value=total)) + stack.enter_context(patch.object(_ram_buffer_mod, "ram_buffer_dir", return_value=Path("/dev/shm"))) return stack # ── formule correcte ───────────────────────────────────────────────────── @@ -531,9 +736,9 @@ def test_disabled_config_always_returns_disk(self, tmp_path): def test_no_ram_dir_returns_disk(self, tmp_path): """Pas de répertoire RAM (Windows) → disque même avec RAM suffisante.""" wf = self._wf() - with patch.object(EncodeWorkflow, "_ram_buffer_dir", return_value=None), \ - patch.object(EncodeWorkflow, "_available_ram_bytes", return_value=10 * 2**30), \ - patch.object(EncodeWorkflow, "_total_ram_bytes", return_value=10 * 2**30): + with patch.object(_ram_buffer_mod, "ram_buffer_dir", return_value=None), \ + patch.object(_ram_buffer_mod, "available_ram_bytes", return_value=10 * 2**30), \ + patch.object(_ram_buffer_mod, "total_ram_bytes", return_value=10 * 2**30): result = wf._shm_path(tmp_path, "test.hevc", 1_000_000) assert result == tmp_path / "test.hevc" @@ -542,18 +747,18 @@ def test_no_ram_dir_returns_disk(self, tmp_path): def test_zero_total_returns_disk(self, tmp_path): """total=0 → impossible d'évaluer le seuil → disque.""" wf = self._wf() - with patch.object(EncodeWorkflow, "_ram_buffer_dir", return_value=Path("/dev/shm")), \ - patch.object(EncodeWorkflow, "_available_ram_bytes", return_value=8 * 2**30), \ - patch.object(EncodeWorkflow, "_total_ram_bytes", return_value=0): + with patch.object(_ram_buffer_mod, "ram_buffer_dir", return_value=Path("/dev/shm")), \ + patch.object(_ram_buffer_mod, "available_ram_bytes", return_value=8 * 2**30), \ + patch.object(_ram_buffer_mod, "total_ram_bytes", return_value=0): result = wf._shm_path(tmp_path, "test.hevc", 1_000_000) assert result == tmp_path / "test.hevc" def test_zero_available_returns_disk(self, tmp_path): """available=0 → disque.""" wf = self._wf() - with patch.object(EncodeWorkflow, "_ram_buffer_dir", return_value=Path("/dev/shm")), \ - patch.object(EncodeWorkflow, "_available_ram_bytes", return_value=0), \ - patch.object(EncodeWorkflow, "_total_ram_bytes", return_value=10 * 2**30): + with patch.object(_ram_buffer_mod, "ram_buffer_dir", return_value=Path("/dev/shm")), \ + patch.object(_ram_buffer_mod, "available_ram_bytes", return_value=0), \ + patch.object(_ram_buffer_mod, "total_ram_bytes", return_value=10 * 2**30): result = wf._shm_path(tmp_path, "test.hevc", 1_000_000) assert result == tmp_path / "test.hevc" @@ -911,7 +1116,7 @@ def test_size_mode_returns_two_commands(self, tmp_path): def test_single_pass_contains_source_and_output(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() out = tmp_path / "out.mkv" - cmd = self.wf.build_command(_make_config(src, out)) + cmd = _as_single_command(self.wf.build_command(_make_config(src, out))) cmd_str = " ".join(cmd) assert str(src) in cmd_str and str(out) in cmd_str @@ -938,8 +1143,9 @@ def test_audio_aac_has_bitrate(self, tmp_path): def test_audio_ac3_has_bitrate(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() track = AudioTrackSettings(stream_index=1, codec="ac3", bitrate_kbps=448) - cmd = self.wf.build_command(_make_config(src, tmp_path / "out.mkv", - audio_tracks=[track])) + cmd = _as_single_command( + self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=[track])) + ) assert "-c:a:0" in cmd and cmd[cmd.index("-c:a:0") + 1] == "ac3" assert "-b:a:0" in cmd and "448k" in cmd @@ -972,7 +1178,9 @@ def test_audio_eac3_7_1_forces_5_1_downmix(self, tmp_path): input_channels=8, input_channel_layout="7.1", ) - cmd = self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=[track])) + cmd = _as_single_command( + self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=[track])) + ) assert "-ac:a:0" in cmd and cmd[cmd.index("-ac:a:0") + 1] == "6" assert "-channel_layout:a:0" in cmd and cmd[cmd.index("-channel_layout:a:0") + 1] == "5.1" @@ -993,16 +1201,18 @@ def test_truehd_core_bsf_present(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() track = AudioTrackSettings(stream_index=2, codec="copy", bitrate_kbps=384, extract_truehd_core=True) - cmd = self.wf.build_command(_make_config(src, tmp_path / "out.mkv", - audio_tracks=[track])) + cmd = _as_single_command( + self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=[track])) + ) assert "truehd_core" in " ".join(cmd) def test_truehd_core_bsf_ignored_for_transcoded_audio(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() track = AudioTrackSettings(stream_index=2, codec="eac3", bitrate_kbps=640, extract_truehd_core=True) - cmd = self.wf.build_command(_make_config(src, tmp_path / "out.mkv", - audio_tracks=[track])) + cmd = _as_single_command( + self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=[track])) + ) assert "truehd_core" not in cmd assert "-bsf:a:0" not in cmd assert "-c:a:0" in cmd and cmd[cmd.index("-c:a:0") + 1] == "eac3" @@ -1014,7 +1224,7 @@ def test_audio_track_order_follows_config_across_sources(self, tmp_path): AudioTrackSettings(stream_index=5, codec="aac", bitrate_kbps=192, source_path=alt), AudioTrackSettings(stream_index=1, codec="copy", source_path=src), ] - cmd = self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=tracks)) + cmd = _as_single_command(self.wf.build_command(_make_config(src, tmp_path / "out.mkv", audio_tracks=tracks))) map_values = [cmd[i + 1] for i, arg in enumerate(cmd[:-1]) if arg == "-map"] assert map_values[:3] == ["0:v:0", "1:5", "0:1"] @@ -1025,12 +1235,12 @@ def test_audio_track_order_follows_config_across_sources(self, tmp_path): def test_subtitle_track_order_follows_config_across_sources(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() alt = tmp_path / "alt.mkv"; alt.touch() - cmd = self.wf.build_command(_make_config( + cmd = _as_single_command(self.wf.build_command(_make_config( src, tmp_path / "out.mkv", copy_subtitles=False, subtitle_tracks=[(alt, 7), (src, 4)], - )) + ))) map_values = [cmd[i + 1] for i, arg in enumerate(cmd[:-1]) if arg == "-map"] assert map_values[:3] == ["0:v:0", "1:7", "0:4"] @@ -1038,11 +1248,11 @@ def test_subtitle_track_order_follows_config_across_sources(self, tmp_path): def test_video_track_mapping_uses_selected_stream_index(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() - cmd = self.wf.build_command(_make_config( + cmd = _as_single_command(self.wf.build_command(_make_config( src, tmp_path / "out.mkv", video=_make_video_settings(codec="copy", stream_index=4, source_path=src), - )) + ))) map_values = [cmd[i + 1] for i, arg in enumerate(cmd[:-1]) if arg == "-map"] assert map_values[0] == "0:4" @@ -1050,11 +1260,11 @@ def test_video_track_mapping_uses_selected_stream_index(self, tmp_path): def test_video_track_mapping_uses_selected_source(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() alt = tmp_path / "alt.mkv"; alt.touch() - cmd = self.wf.build_command(_make_config( + cmd = _as_single_command(self.wf.build_command(_make_config( src, tmp_path / "out.mkv", video=_make_video_settings(codec="copy", stream_index=3, source_path=alt), - )) + ))) first_input = cmd.index("-i") second_input = cmd.index("-i", first_input + 1) @@ -1109,6 +1319,152 @@ def test_vaapi_two_pass_adds_device_on_both_passes(self, tmp_path): assert "-hwaccel" in pass_cmd and pass_cmd[pass_cmd.index("-hwaccel") + 1] == "vaapi" assert "-vf" not in pass_cmd + def test_h264_vaapi_force_8bit_disables_hw_surface_decode_and_adds_upload(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch.object(EncodeWorkflow, "_vaapi_device", return_value="/dev/dri/renderD128"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="h264_vaapi", force_8bit=True), + ) + ) + + assert "-vaapi_device" in cmd + assert "-hwaccel_output_format" not in cmd + assert "-vf" in cmd + assert cmd[cmd.index("-vf") + 1].endswith("format=nv12,hwupload") + assert "-pix_fmt" in cmd + assert cmd[cmd.index("-pix_fmt") + 1] == "nv12" + + def test_h264_nvenc_force_8bit_disables_hw_decode_and_sets_nv12(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="h264_nvenc", force_8bit=True), + ) + ) + + assert "-hwaccel_output_format" not in cmd + assert "-pix_fmt" in cmd + assert cmd[cmd.index("-pix_fmt") + 1] == "nv12" + + def test_qsv_single_pass_adds_explicit_device_on_linux(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch.object(EncodeWorkflow, "_qsv_device", return_value="/dev/dri/renderD129"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="hevc_qsv"), + ) + ) + + assert "-qsv_device" in cmd + assert cmd[cmd.index("-qsv_device") + 1] == "/dev/dri/renderD129" + assert cmd.index("-qsv_device") < cmd.index("-i") + assert "-hwaccel" in cmd and cmd[cmd.index("-hwaccel") + 1] == "qsv" + assert "-hwaccel_output_format" in cmd + assert cmd[cmd.index("-hwaccel_output_format") + 1] == "qsv" + + def test_qsv_without_resolved_device_keeps_legacy_hwaccel_flags(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch.object(EncodeWorkflow, "_qsv_device", return_value=None): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="h264_qsv"), + ) + ) + + assert "-qsv_device" not in cmd + assert "-hwaccel" in cmd and cmd[cmd.index("-hwaccel") + 1] == "qsv" + + def test_qsv_windows_single_pass_adds_explicit_adapter_index(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch("core.workflows.encode.workflow.sys.platform", "win32"), \ + patch.object(EncodeWorkflow, "_qsv_device", return_value="1"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="hevc_qsv"), + ) + ) + + assert "-qsv_device" in cmd + assert cmd[cmd.index("-qsv_device") + 1] == "1" + assert "-hwaccel" in cmd and cmd[cmd.index("-hwaccel") + 1] == "qsv" + + def test_amf_windows_single_pass_adds_named_d3d11_device(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch("core.workflows.encode.workflow.sys.platform", "win32"), \ + patch.object(EncodeWorkflow, "_amf_device", return_value="2"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="h264_amf"), + ) + ) + + assert "-init_hw_device" in cmd + assert cmd[cmd.index("-init_hw_device") + 1] == "d3d11va=mre_amf:2" + assert "-filter_hw_device" in cmd + assert cmd[cmd.index("-filter_hw_device") + 1] == "mre_amf" + assert "-hwaccel_device" in cmd + assert cmd[cmd.index("-hwaccel_device") + 1] == "mre_amf" + + def test_amf_windows_tonemap_adds_hwupload_to_selected_device(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch("core.workflows.encode.workflow.sys.platform", "win32"), \ + patch.object(EncodeWorkflow, "_amf_device", return_value="2"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="hevc_amf", tonemap_to_sdr=True), + ) + ) + + assert "-init_hw_device" in cmd + assert "-vf" in cmd + assert cmd[cmd.index("-vf") + 1].endswith("format=nv12,hwupload") + assert "-hwaccel_output_format" not in cmd + + def test_nvenc_windows_single_pass_adds_explicit_gpu_index(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + with patch("core.workflows.encode.workflow.sys.platform", "win32"), \ + patch.object(EncodeWorkflow, "_nvenc_device", return_value="1"): + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="h264_nvenc"), + ) + ) + + assert "-hwaccel_device" in cmd + assert cmd[cmd.index("-hwaccel_device") + 1] == "1" + assert "-gpu" in cmd + assert cmd[cmd.index("-gpu") + 1] == "1" + + def test_libx264_force_8bit_sets_yuv420p(self, tmp_path): + src = tmp_path / "src.mkv"; src.touch() + cmd = self.wf.build_command_single( + _make_config( + src, + tmp_path / "out.mkv", + video=_make_video_settings(codec="libx264", force_8bit=True), + ) + ) + + assert "-pix_fmt" in cmd + assert cmd[cmd.index("-pix_fmt") + 1] == "yuv420p" + def test_non_vaapi_codec_does_not_receive_vaapi_args(self, tmp_path): src = tmp_path / "src.mkv"; src.touch() with patch.object(EncodeWorkflow, "_vaapi_device", return_value="/dev/dri/renderD128"): @@ -1258,7 +1614,7 @@ def _vs(self, codec: str, master: str = "G(8500,39850)B(6550,2300)R(35400,14600) def test_libx265_color_flags_only_no_standalone_master_display(self): """libx265 : couleur seulement — master_display/max_cll via -x265-params.""" - args = self.wf._hdr_meta_args(self._vs("libx265")) + args = _hdr_meta_args(self._vs("libx265")) assert "-color_primaries" in args assert "-master_display" not in args assert "-max_cll" not in args @@ -1291,7 +1647,7 @@ def test_libx265_x265_params_merges_extra_params(self, tmp_path): def test_hevc_nvenc_adds_master_display_and_max_cll(self): """hevc_nvenc : -master_display et -max_cll ajoutés comme options encoder.""" - args = self.wf._hdr_meta_args(self._vs("hevc_nvenc")) + args = _hdr_meta_args(self._vs("hevc_nvenc")) assert "-color_primaries" in args assert "-master_display" in args assert "-max_cll" in args @@ -1302,39 +1658,39 @@ def test_hevc_nvenc_no_master_display_when_empty(self): """hevc_nvenc : pas de -master_display si master_display vide.""" vs = _make_video_settings(codec="hevc_nvenc", inject_hdr_meta=True, master_display="", max_cll="") - args = self.wf._hdr_meta_args(vs) + args = _hdr_meta_args(vs) assert "-master_display" not in args assert "-max_cll" not in args assert "-color_primaries" in args def test_hevc_amf_color_flags_only(self): """hevc_amf : couleur seulement, pas de master_display/max_cll.""" - args = self.wf._hdr_meta_args(self._vs("hevc_amf")) + args = _hdr_meta_args(self._vs("hevc_amf")) assert "-color_primaries" in args assert "-master_display" not in args assert "-max_cll" not in args def test_hevc_qsv_color_flags_only(self): """hevc_qsv : couleur seulement.""" - args = self.wf._hdr_meta_args(self._vs("hevc_qsv")) + args = _hdr_meta_args(self._vs("hevc_qsv")) assert "-color_primaries" in args assert "-master_display" not in args def test_libsvtav1_color_flags_only(self): """libsvtav1 : couleur seulement, pas de master_display/max_cll.""" - args = self.wf._hdr_meta_args(self._vs("libsvtav1")) + args = _hdr_meta_args(self._vs("libsvtav1")) assert "-color_primaries" in args assert "-master_display" not in args def test_copy_no_color_flags(self): """copy : aucun flag de couleur — pas pertinent pour un stream copié.""" - args = self.wf._hdr_meta_args(self._vs("copy")) + args = _hdr_meta_args(self._vs("copy")) assert args == [] def test_h264_codecs_no_flags(self): """h264_* et libx264 : aucun flag HDR — H.264 est SDR.""" for codec in ("libx264", "h264_nvenc", "h264_amf", "h264_qsv"): - args = self.wf._hdr_meta_args(self._vs(codec)) + args = _hdr_meta_args(self._vs(codec)) assert args == [], f"{codec} devrait retourner [] mais retourne {args}" @@ -1550,8 +1906,8 @@ def test_copy_size_mode_forces_single_pass_with_map_metadata(self, tmp_path): """codec=copy en mode SIZE reste en single-pass (pas de 2-pass).""" src = tmp_path / "src.mkv"; src.touch() vs = _make_video_settings(codec="copy", quality_mode=QualityMode.SIZE) - cmd = self.wf.build_command( - _make_config(src, tmp_path / "out.mkv", video=vs, duration_s=3600.0) + cmd = _as_single_command( + self.wf.build_command(_make_config(src, tmp_path / "out.mkv", video=vs, duration_s=3600.0)) ) assert isinstance(cmd[0], str), "codec=copy doit retourner list[str] (single-pass)" assert "-pass" not in cmd @@ -1628,7 +1984,7 @@ def test_copy_codec_does_not_call_metadata_inject(self, tmp_path, copy_dv, copy_ original = wf._run_with_metadata_inject - def _spy(cfg): + def _spy(cfg, **_kwargs): inject_called[0] = True return original(cfg) @@ -1670,7 +2026,7 @@ def test_non_copy_with_copy_dv_calls_metadata_inject(self, tmp_path): wf = _make_workflow() inject_called = [False] - def _spy(cfg): + def _spy(cfg, **_kwargs): inject_called[0] = True return MagicMock() @@ -1694,7 +2050,7 @@ def test_non_copy_with_copy_dv_but_no_hdr_presence_uses_standard_runner(self, tm wf = _make_workflow() inject_called = [False] - def _spy(_cfg): + def _spy(_cfg, **_kwargs): inject_called[0] = True return MagicMock() @@ -1927,6 +2283,26 @@ def _fake_subprocess_run(cmd, *args, **kwargs): with patch("subprocess.run", side_effect=_fake_subprocess_run): assert wf._detect_source_dynamic_hdr_presence(src) == (True, True) + def test_detect_source_dynamic_hdr_presence_falls_back_to_ffprobe_frames(self, tmp_path): + src = tmp_path / "source.mkv" + src.write_bytes(b"\x00" * 1000) + wf = _make_workflow() + + with patch.object( + wf, + "_ffprobe_streams_payload", + return_value={"streams": [{"codec_type": "video", "side_data_list": []}]}, + ), patch.object( + wf, + "_mediainfo_hdr_flags", + return_value=None, + ), patch.object( + wf, + "_ffprobe_frame_dynamic_hdr_flags", + return_value=(True, True), + ): + assert wf._detect_source_dynamic_hdr_presence(src) == (True, True) + class TestIntegratedMetadataCommand: def test_tag_overrides_disable_metadata_copy(self, tmp_path): @@ -2445,7 +2821,7 @@ def _run_inject(self, tmp_path: Path, **config_overrides) -> list[list[str]]: file_title="Film DV", ) default_cfg.update(config_overrides) - config = _make_config(**default_cfg) + config = _make_config(**cast(Any, default_cfg)) wf = EncodeWorkflow( ffmpeg_bin="ffmpeg", dovi_tool_bin="dovi_tool", @@ -2628,7 +3004,8 @@ def _pass2(self, tmp_path: Path, extras: list[Path]) -> list[str]: _make_config(src, tmp_path / "out.mkv", video=vs, duration_s=3600.0, extra_attachments=extras) ) - return cmds[1] # pass2 + assert cmds and isinstance(cmds[0], list) + return cast(list[list[str]], cmds)[1] # pass2 # ── single pass — présence et valeurs ──────────────────────────────────── @@ -3076,7 +3453,7 @@ def _fake_run(cmd, signals=None, cwd=None, progress_cb=None): map_values = [cmd[i + 1] for i, tok in enumerate(cmd[:-1]) if tok == "-map"] assert "4:0" in map_values - def test_prepare_multisource_sync_prefers_live_on_posix(self, tmp_path, monkeypatch): + def test_prepare_multisource_sync_prefers_live_when_available(self, tmp_path, monkeypatch): src_main = tmp_path / "main.mkv" src_alt = tmp_path / "alt.mkv" out = tmp_path / "out.mkv" @@ -3114,8 +3491,9 @@ def prepare_from_mapped_tracks_mmap(self, **_kwargs): calls["file"] += 1 return [SimpleNamespace(key=(1, 1, "audio"), path=sync_audio, input_idx=2)] + # The syncer is fully stubbed here; patching os.name would leak into pathlib + # on Windows if the assertion fails, which breaks pytest error reporting. monkeypatch.setattr("core.workflows.encode.workflow.FfmpegTimelineSync", _FakeSyncer) - monkeypatch.setattr("core.workflows.encode.workflow.os.name", "posix", raising=False) remap, sync_inputs, live, strict = wf._prepare_multisource_sync( config=cfg, @@ -3171,7 +3549,6 @@ def prepare_from_mapped_tracks(self, **_kwargs): pytest.fail("file fallback should not be used when RAM mmap works") monkeypatch.setattr("core.workflows.encode.workflow.FfmpegTimelineSync", _FakeSyncer) - monkeypatch.setattr("core.workflows.encode.workflow.os.name", "posix", raising=False) monkeypatch.setattr(EncodeWorkflow, "_ram_buffer_dir", staticmethod(lambda: ram_dir)) remap, sync_inputs, live, strict = wf._prepare_multisource_sync( @@ -3332,6 +3709,466 @@ def test_runtime_two_pass_applies_offsets_after_sync_remap(self, tmp_path): map_values = [pass2[i + 1] for i, tok in enumerate(pass2[:-1]) if tok == "-map"] assert "3:0" in map_values + def test_run_multi_video_pipeline_applies_audio_and_subtitle_offsets(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="copy", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="copy", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[AudioTrackSettings(stream_index=2, codec="copy", source_path=src)], + subtitle_tracks=[(src, 3)], + copy_subtitles=False, + track_time_offsets=[ + TrackTimeOffset(track_type="audio", source_path=src, stream_index=2, offset_ms=120), + TrackTimeOffset(track_type="subtitle", source_path=src, stream_index=3, offset_ms=-80), + ], + ) + + wf = _make_workflow() + captured: list[list[str]] = [] + + def _fake_run_cmd(cmd, **_kwargs): + captured.append(list(cmd)) + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + final_cmd = captured[-1] + assert "-itsoffset" in final_cmd + assert final_cmd[final_cmd.index("-itsoffset") + 1] == "0.120" + assert "-ss" in final_cmd + assert final_cmd[final_cmd.index("-ss") + 1] == "0.080" + map_values = [final_cmd[i + 1] for i, tok in enumerate(final_cmd[:-1]) if tok == "-map"] + assert "3:2" in map_values + assert "4:3" in map_values + + def test_run_multi_video_pipeline_keeps_video_offsets_stream_specific(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="copy", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="copy", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + subtitle_tracks=[], + copy_subtitles=False, + track_time_offsets=[ + TrackTimeOffset(track_type="video", source_path=src, stream_index=1, offset_ms=1000), + ], + ) + + wf = _make_workflow() + captured: list[list[str]] = [] + + def _fake_run_cmd(cmd, **_kwargs): + captured.append(list(cmd)) + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + final_cmd = captured[-1] + assert final_cmd.count("-itsoffset") == 1 + assert final_cmd[final_cmd.index("-itsoffset") + 1] == "1.000" + map_values = [final_cmd[i + 1] for i, tok in enumerate(final_cmd[:-1]) if tok == "-map"] + assert "0:0" in map_values + assert "1:1" in map_values + + def test_run_with_preparation_multi_video_binds_post_actions(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + work_dir = tmp_path / "work" + work_dir.mkdir() + + video_a = _make_video_settings(codec="copy", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="copy", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow() + prep_signals = TaskSignals() + inner_signals = TaskSignals() + logs: list[tuple[str, str]] = [] + wf.log_message.connect(lambda level, message: logs.append((str(level), str(message)))) + + with patch( + "core.workflows.encode.workflow.prepare_process_work_dir", + return_value=work_dir, + ), patch( + "core.workflows.encode.workflow.relocate_tmdb_covers_to_process_dir", + return_value=[], + ), patch.object( + wf, + "_prepare_attachment_config", + return_value=(cfg, None), + ), patch.object( + wf, + "_run_multi_video_pipeline", + return_value=inner_signals, + ) as run_multi, patch.object( + wf, + "_bind_temp_cleanup", + ) as bind_cleanup, patch.object( + wf, + "_bind_matroska_segment_muxing_patch", + ) as bind_mux, patch.object( + wf, + "_bind_nfo_write", + ) as bind_nfo: + result = wf._run_with_preparation(cfg, validate=False, prep_signals=prep_signals) + + assert result is inner_signals + run_multi.assert_called_once() + assert bind_cleanup.call_count == 2 + bind_cleanup.assert_any_call(prep_signals, [work_dir]) + bind_cleanup.assert_any_call(inner_signals, [work_dir]) + assert bind_mux.call_count == 2 + bind_mux.assert_any_call(prep_signals, out) + bind_mux.assert_any_call(inner_signals, out) + assert bind_nfo.call_count == 2 + bind_nfo.assert_any_call(prep_signals, out) + bind_nfo.assert_any_call(inner_signals, out) + assert any( + level == "INFO" + and message == "STEP 4 - Routage du workflow (pipeline multi-pistes vidéo)" + for level, message in logs + ) + + def test_run_with_preparation_prebinds_hooks_before_sync_multi_video_pipeline(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + work_dir = tmp_path / "work" + work_dir.mkdir() + + video_a = _make_video_settings(codec="copy", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="copy", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow() + prep_signals = TaskSignals() + call_order: list[str] = [] + + def _fake_bind_cleanup(signals, _paths): + assert signals is prep_signals + call_order.append("bind_cleanup") + + def _fake_bind_mux(signals, _out): + assert signals is prep_signals + call_order.append("bind_mux") + + def _fake_bind_nfo(signals, _out): + assert signals is prep_signals + call_order.append("bind_nfo") + + def _fake_run_multi(_cfg, _cleanup_paths, *, prep_signals=None, plan=None): + assert prep_signals is prep_signals_ref + assert plan is not None + call_order.append("run_multi") + assert call_order[:3] == ["bind_cleanup", "bind_mux", "bind_nfo"] + prep_signals_ref.finished.emit("done") + return prep_signals_ref + + prep_signals_ref = prep_signals + + with patch( + "core.workflows.encode.workflow.prepare_process_work_dir", + return_value=work_dir, + ), patch( + "core.workflows.encode.workflow.relocate_tmdb_covers_to_process_dir", + return_value=[], + ), patch.object( + wf, + "_prepare_attachment_config", + return_value=(cfg, None), + ), patch.object( + wf, + "_run_multi_video_pipeline", + side_effect=_fake_run_multi, + ), patch.object( + wf, + "_bind_temp_cleanup", + side_effect=_fake_bind_cleanup, + ), patch.object( + wf, + "_bind_matroska_segment_muxing_patch", + side_effect=_fake_bind_mux, + ), patch.object( + wf, + "_bind_nfo_write", + side_effect=_fake_bind_nfo, + ): + result = wf._run_with_preparation(cfg, validate=False, prep_signals=prep_signals) + + assert result is prep_signals + assert call_order[:4] == ["bind_cleanup", "bind_mux", "bind_nfo", "run_multi"] + + def test_run_with_preparation_prebinds_hooks_before_sync_direct_output(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + work_dir = tmp_path / "work" + work_dir.mkdir() + + video = _make_video_settings(codec="libx265", source_path=src, stream_index=0) + cfg = _make_config( + src, + out, + video=video, + video_tracks=[video], + audio_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow() + prep_signals = TaskSignals() + call_order: list[str] = [] + + def _fake_bind_cleanup(signals, _paths): + assert signals is prep_signals + call_order.append("bind_cleanup") + + def _fake_bind_mux(signals, _out): + assert signals is prep_signals + call_order.append("bind_mux") + + def _fake_bind_nfo(signals, _out): + assert signals is prep_signals + call_order.append("bind_nfo") + + def _fake_run_direct(_cfg, _cleanup_paths, *, prep_signals=None, plan=None): + assert prep_signals is prep_signals_ref + assert plan is not None + call_order.append("run_direct") + assert call_order[:3] == ["bind_cleanup", "bind_mux", "bind_nfo"] + prep_signals_ref.finished.emit("done") + return prep_signals_ref + + prep_signals_ref = prep_signals + + with patch( + "core.workflows.encode.workflow.prepare_process_work_dir", + return_value=work_dir, + ), patch( + "core.workflows.encode.workflow.relocate_tmdb_covers_to_process_dir", + return_value=[], + ), patch.object( + wf, + "_prepare_attachment_config", + return_value=(cfg, None), + ), patch.object( + wf, + "_run_direct_output", + side_effect=_fake_run_direct, + ), patch.object( + wf, + "_bind_temp_cleanup", + side_effect=_fake_bind_cleanup, + ), patch.object( + wf, + "_bind_matroska_segment_muxing_patch", + side_effect=_fake_bind_mux, + ), patch.object( + wf, + "_bind_nfo_write", + side_effect=_fake_bind_nfo, + ): + result = wf._run_with_preparation(cfg, validate=False, prep_signals=prep_signals) + + assert result is prep_signals + assert call_order[:4] == ["bind_cleanup", "bind_mux", "bind_nfo", "run_direct"] + + def test_run_multi_video_pipeline_serializes_same_resource(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="libx264", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="libx265", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + subtitle_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow(max_parallel_video_encodes=2) + active = 0 + max_active = 0 + gate = threading.Lock() + + def _fake_run_cmd(_cmd, **kwargs): + nonlocal active, max_active + label = str(kwargs.get("label", "")) + if label.startswith("ffmpeg-video-"): + with gate: + active += 1 + max_active = max(max_active, active) + time.sleep(0.06) + with gate: + active -= 1 + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + assert max_active == 1 + + def test_run_multi_video_pipeline_parallelizes_disjoint_resources(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="libx264", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="h264_nvenc", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + subtitle_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow(max_parallel_video_encodes=2) + active = 0 + max_active = 0 + gate = threading.Lock() + + def _fake_run_cmd(_cmd, **kwargs): + nonlocal active, max_active + label = str(kwargs.get("label", "")) + if label.startswith("ffmpeg-video-"): + with gate: + active += 1 + max_active = max(max_active, active) + time.sleep(0.06) + with gate: + active -= 1 + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + assert max_active >= 2 + + def test_run_multi_video_pipeline_scales_threads_for_disjoint_resources(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="libx264", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="h264_nvenc", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + subtitle_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow(ffmpeg_threads=12, max_parallel_video_encodes=2) + commands_by_label: dict[str, list[str]] = {} + + def _fake_run_cmd(cmd, **kwargs): + label = str(kwargs.get("label", "")) + commands_by_label[label] = list(cmd) + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + assert commands_by_label["ffmpeg-video-1"][commands_by_label["ffmpeg-video-1"].index("-threads") + 1] == "6" + assert commands_by_label["ffmpeg-video-2"][commands_by_label["ffmpeg-video-2"].index("-threads") + 1] == "6" + assert commands_by_label["ffmpeg-multi-video"][commands_by_label["ffmpeg-multi-video"].index("-threads") + 1] == "12" + + def test_run_multi_video_pipeline_keeps_full_threads_for_serialized_resource(self, tmp_path): + src = tmp_path / "src.mkv" + out = tmp_path / "out.mkv" + src.touch() + + video_a = _make_video_settings(codec="libx264", source_path=src, stream_index=0) + video_b = _make_video_settings(codec="libx265", source_path=src, stream_index=1) + cfg = _make_config( + src, + out, + video=video_a, + video_tracks=[video_a, video_b], + audio_tracks=[], + subtitle_tracks=[], + copy_subtitles=False, + ) + + wf = _make_workflow(ffmpeg_threads=12, max_parallel_video_encodes=2) + commands_by_label: dict[str, list[str]] = {} + + def _fake_run_cmd(cmd, **kwargs): + label = str(kwargs.get("label", "")) + commands_by_label[label] = list(cmd) + return "ok" + + with patch.object(wf._runner, "_run_cmd", side_effect=_fake_run_cmd), patch.object( + wf, + "_prepare_multisource_sync", + return_value=({}, [], None, False), + ): + wf._run_multi_video_pipeline(cfg, cleanup_paths=[], prep_signals=TaskSignals()) + + assert commands_by_label["ffmpeg-video-1"][commands_by_label["ffmpeg-video-1"].index("-threads") + 1] == "12" + assert commands_by_label["ffmpeg-video-2"][commands_by_label["ffmpeg-video-2"].index("-threads") + 1] == "12" + def test_prepare_multisource_sync_disables_live_when_foreign_offset(self, tmp_path, monkeypatch): src_main = tmp_path / "main.mkv" src_alt = tmp_path / "alt.mkv" @@ -3362,7 +4199,7 @@ def prepare(self, **kwargs): return SimpleNamespace(prepared_inputs=[], live_session=None) monkeypatch.setattr("core.workflows.encode.workflow.TimelineSyncFallbackHelper", _FakeHelper) - monkeypatch.setattr(wf._postproc_helper, "_decide_strict_interleave_with_prescan", lambda _cfg: True) + monkeypatch.setattr(wf._postprocess_service, "decide_strict_interleave_with_prescan", lambda _cfg, *, log_cb: True) remap, sync_inputs, live, strict = wf._prepare_multisource_sync( config=cfg, @@ -3413,7 +4250,7 @@ def prepare(self, **kwargs): return SimpleNamespace(prepared_inputs=[], live_session=None) monkeypatch.setattr("core.workflows.encode.workflow.TimelineSyncFallbackHelper", _FakeHelper) - monkeypatch.setattr(wf._postproc_helper, "_decide_strict_interleave_with_prescan", lambda _cfg: False) + monkeypatch.setattr(wf._postprocess_service, "decide_strict_interleave_with_prescan", lambda _cfg, *, log_cb: False) remap, sync_inputs, live, strict = wf._prepare_multisource_sync( config=cfg, @@ -3465,9 +4302,9 @@ def prepare(self, **kwargs): monkeypatch.setattr("core.workflows.encode.workflow.TimelineSyncFallbackHelper", _FakeHelper) monkeypatch.setattr( - wf._postproc_helper, - "_decide_strict_interleave_with_prescan", - lambda _cfg: pytest.fail("subtitle prescan must be skipped when foreign offset already forces sync"), + wf._postprocess_service, + "decide_strict_interleave_with_prescan", + lambda _cfg, *, log_cb: pytest.fail("subtitle prescan must be skipped when foreign offset already forces sync"), ) remap, sync_inputs, live, strict = wf._prepare_multisource_sync( @@ -3573,9 +4410,9 @@ def test_merge_fr_FR_in_track_meta_edit(self, tmp_path): _merge_remux_extras avec une piste language='fr-FR' doit produire un TrackMetaEdit avec language='fr-FR' (conservé tel quel). """ + from core.workflows.encode.remux_bridge import merge_remux_into_encode_config from core.workflows.remux_models import RemuxConfig, SourceInput, TrackEntry from core.workflows.encode.models import EncodeConfig, VideoEncodeSettings - from ui.main_window import MainWindow src = tmp_path / "src.mkv" src.touch() @@ -3600,7 +4437,7 @@ def test_merge_fr_FR_in_track_meta_edit(self, tmp_path): audio_tracks=[], ) - result = MainWindow._merge_remux_extras(None, enc, rmx) + result = merge_remux_into_encode_config(enc, rmx) assert result.track_meta_edits, "Aucun TrackMetaEdit généré" edit = result.track_meta_edits[0] diff --git a/tests/test_inspector.py b/tests/test_inspector.py index 0dd43e6..d429642 100644 --- a/tests/test_inspector.py +++ b/tests/test_inspector.py @@ -37,6 +37,7 @@ import json from pathlib import Path +from typing import Any, cast from unittest.mock import MagicMock, patch import pytest @@ -230,7 +231,7 @@ def _make(self, **kwargs) -> VideoTrack: color_matrix="bt2020nc", ) defaults.update(kwargs) - return VideoTrack(**defaults) + return VideoTrack(**cast(Any, defaults)) def test_resolution(self): t = self._make(width=1920, height=1080) @@ -267,7 +268,7 @@ def _make(self, **kwargs) -> AudioTrack: language="eng", title="Atmos", ) defaults.update(kwargs) - return AudioTrack(**defaults) + return AudioTrack(**cast(Any, defaults)) def test_channels_label_from_layout(self): t = self._make(channel_layout="7.1") @@ -309,7 +310,7 @@ def _make(self, **kwargs) -> FileInfo: bit_rate=11943000, ) defaults.update(kwargs) - return FileInfo(**defaults) + return FileInfo(**cast(Any, defaults)) def test_size_human_go(self): info = self._make(size_bytes=10 * (1 << 30)) @@ -534,9 +535,9 @@ def _patch_ffprobe(self, raw: dict): """Patch _run_ffprobe pour retourner le dict donné.""" return patch.object(self.insp, "_run_ffprobe", return_value=raw) - def _patch_mi(self, dovi: bool = False, hdr10plus: bool = False): + def _patch_mi(self, dovi: bool = False, hdr10plus: bool = False, responded: bool = True): return patch.object( - self.insp, "_mediainfo_hdr_flags", return_value=(dovi, hdr10plus) + self.insp, "_mediainfo_hdr_flags", return_value=(dovi, hdr10plus, responded) ) def test_sdr_returns_none(self): @@ -608,13 +609,78 @@ def test_hdr10plus_via_mediainfo_fallback(self): with self._patch_ffprobe(raw), self._patch_mi(hdr10plus=True): assert self.insp.detect_hdr_type(self.path) == HDRType.HDR10PLUS - def test_hlg_detected_as_hdr10(self): + def test_hdr10plus_via_ffprobe_frame_fallback(self): + # mediainfo absent / silencieux : seul cas où le frame-level fallback + # se déclenche encore (économie de 3-6 s sur les fichiers HDR10/DV). + raw = _make_ffprobe_output(video_streams=[_video_stream( + color_transfer="smpte2084", + side_data_list=[{"side_data_type": "DOVI configuration record"}], + )]) + with self._patch_ffprobe(raw), self._patch_mi(responded=False), patch.object( + self.insp, + "_ffprobe_frame_dynamic_hdr_flags", + return_value=(True, True), + ): + assert self.insp.detect_hdr_type(self.path) == HDRType.DOLBY_VISION_HDR10PLUS + + def test_frame_fallback_skipped_when_mediainfo_responded(self): + # Même cas que ci-dessus mais mediainfo a répondu (négatif pour HDR10+) + # → le frame-level NE doit PAS être appelé même si on garde un mock. + raw = _make_ffprobe_output(video_streams=[_video_stream( + color_transfer="smpte2084", + side_data_list=[{"side_data_type": "DOVI configuration record"}], + )]) + frame_mock = MagicMock(return_value=(True, True)) + with self._patch_ffprobe(raw), self._patch_mi(responded=True), patch.object( + self.insp, "_ffprobe_frame_dynamic_hdr_flags", frame_mock, + ): + assert self.insp.detect_hdr_type(self.path) == HDRType.DOLBY_VISION + frame_mock.assert_not_called() + + def test_hlg_detected_as_hlg(self): raw = _make_ffprobe_output(video_streams=[_video_stream( color_transfer="arib-std-b67", side_data_list=[], )]) with self._patch_ffprobe(raw), self._patch_mi(): - assert self.insp.detect_hdr_type(self.path) == HDRType.HDR10 + assert self.insp.detect_hdr_type(self.path) == HDRType.HLG + + def test_attached_pic_is_ignored_for_hdr_detection(self): + raw = _make_ffprobe_output(video_streams=[ + _video_stream( + index=0, + codec_name="mjpeg", + color_transfer="bt709", + side_data_list=[], + disposition={"attached_pic": 1}, + ), + _video_stream( + index=1, + color_transfer="smpte2084", + side_data_list=[ + {"side_data_type": "DOVI configuration record"}, + {"side_data_type": "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)"}, + ], + ), + ]) + with self._patch_ffprobe(raw), self._patch_mi(): + assert self.insp.detect_hdr_type(self.path) == HDRType.DOLBY_VISION_HDR10PLUS + + def test_dynamic_hdr_can_be_detected_from_secondary_video_stream(self): + raw = _make_ffprobe_output(video_streams=[ + _video_stream( + index=0, + color_transfer="smpte2084", + side_data_list=[{"side_data_type": "Mastering display metadata"}], + ), + _video_stream( + index=1, + color_transfer="smpte2084", + side_data_list=[{"side_data_type": "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)"}], + ), + ]) + with self._patch_ffprobe(raw), self._patch_mi(): + assert self.insp.detect_hdr_type(self.path) == HDRType.HDR10PLUS def test_no_video_stream_returns_none(self): raw = _make_ffprobe_output(video_streams=[]) @@ -671,6 +737,27 @@ def test_returns_dict_on_valid_json(self): out = self.insp._run_ffprobe(self.path) assert out == payload + def test_emits_verbose_lines_for_ffprobe_probe(self, fake_path): + verbose_lines: list[str] = [] + inspector = FileInspector( + ffprobe_bin="ffprobe", + mediainfo_bin="mediainfo", + verbose_output=verbose_lines.append, + ) + payload = {"streams": [], "format": {}, "chapters": []} + result = MagicMock() + result.returncode = 0 + result.stdout = json.dumps(payload) + result.stderr = "" + + with patch("subprocess.run", return_value=result): + out = inspector._run_ffprobe(fake_path) + + assert out == payload + assert any(line.startswith("$ ffprobe ") for line in verbose_lines) + assert any("ffprobe rc=0" in line for line in verbose_lines) + assert any("ffprobe JSON parsé" in line for line in verbose_lines) + # =========================================================================== # FileInspector.inspect — intégration légère @@ -740,6 +827,62 @@ def fake_run(cmd, **kwargs): assert info.frame_count is None # mediainfo absent → None assert isinstance(info, FileInfo) # pas d'exception levée + def test_inspect_emits_verbose_start_and_summary(self): + verbose_lines: list[str] = [] + inspector = FileInspector( + ffprobe_bin="ffprobe", + mediainfo_bin="mediainfo", + verbose_output=verbose_lines.append, + ) + info = FileInfo( + path=self.path, + format="matroska,webm", + duration_s=None, + size_bytes=None, + bit_rate=None, + video_tracks=[ + VideoTrack( + index=0, + codec="hevc", + codec_long="H.265", + width=1920, + height=1080, + frame_rate="24000/1001", + bit_depth=10, + color_space="yuv420p10le", + color_primaries="bt2020", + color_transfer="smpte2084", + color_matrix="bt2020nc", + ) + ], + audio_tracks=[ + AudioTrack( + index=1, + codec="eac3", + codec_long="E-AC-3", + channels=6, + channel_layout="5.1", + sample_rate=48000, + bit_rate=640000, + language="fra", + title="VF", + ) + ], + ) + + with ( + patch.object(inspector, "_run_ffprobe", return_value={}), + patch.object(inspector, "_parse_ffprobe", return_value=info), + patch.object(inspector, "get_frame_count", return_value=1200), + patch.object(inspector, "_extract_mkv_track_data_from_raw", return_value=(0, {})), + patch.object(inspector, "_detect_hdr_from_raw", return_value=HDRType.HDR10), + ): + out = inspector.inspect(self.path) + + assert out.frame_count == 1200 + assert any("Inspection démarrée" in line for line in verbose_lines) + assert any("Inspection terminée" in line and "HDR=HDR10" in line for line in verbose_lines) + def _audio_only_info(self, *, fmt: str, language: str, title: str = "") -> FileInfo: return FileInfo( path=self.path, @@ -772,7 +915,7 @@ def test_inspect_regionalizes_short_ietf_from_track_enrichment(self): patch.object(self.insp, "_run_ffprobe", return_value={}), patch.object(self.insp, "_parse_ffprobe", return_value=info), patch.object(self.insp, "get_frame_count", return_value=None), - patch.object(self.insp, "_get_mkv_track_data", return_value=(0, {1: "en"})), + patch.object(self.insp, "_extract_mkv_track_data_from_raw", return_value=(0, {1: "en"})), ): out = self.insp.inspect(self.path) @@ -825,7 +968,7 @@ def test_inspect_keeps_already_regional_tag(self): patch.object(self.insp, "_run_ffprobe", return_value={}), patch.object(self.insp, "_parse_ffprobe", return_value=info), patch.object(self.insp, "get_frame_count", return_value=None), - patch.object(self.insp, "_get_mkv_track_data", return_value=(0, {1: "en-GB"})), + patch.object(self.insp, "_extract_mkv_track_data_from_raw", return_value=(0, {1: "en-GB"})), ): out = self.insp.inspect(self.path) @@ -871,7 +1014,9 @@ def test_inspect_canonicalizes_case_for_known_ietf(self): class TestMkvTrackDataFromFfprobe: def test_prefers_language_ietf_and_counts_non_title_tags(self, inspector, fake_path): - payload = { + # La méthode opère désormais sur le ``raw`` ffprobe déjà parsé + # (zéro appel subprocess). Reproduit le contenu attendu directement. + raw = { "format": { "tags": { "TITLE": "Movie", @@ -884,17 +1029,13 @@ def test_prefers_language_ietf_and_counts_non_title_tags(self, inspector, fake_p {"index": 1, "tags": {"language-ietf": "fr-CA", "language": "fra"}}, ], } - run_result = MagicMock(returncode=0, stdout=json.dumps(payload), stderr="") - with patch("subprocess.run", return_value=run_result): - tag_count, lang_map = inspector._get_mkv_track_data(fake_path) + tag_count, lang_map = inspector._extract_mkv_track_data_from_raw(raw) assert tag_count == 2 assert lang_map == {0: "eng", 1: "fr-CA"} - def test_returns_empty_when_ffprobe_fails(self, inspector, fake_path): - run_result = MagicMock(returncode=1, stdout="", stderr="ffprobe error") - with patch("subprocess.run", return_value=run_result): - tag_count, lang_map = inspector._get_mkv_track_data(fake_path) + def test_returns_empty_when_raw_is_empty(self, inspector, fake_path): + tag_count, lang_map = inspector._extract_mkv_track_data_from_raw({}) assert tag_count == 0 assert lang_map == {} diff --git a/tests/test_launcher.py b/tests/test_launcher.py index 116abfc..9cd367a 100644 --- a/tests/test_launcher.py +++ b/tests/test_launcher.py @@ -1,5 +1,6 @@ from __future__ import annotations +import importlib import sys from types import SimpleNamespace from unittest.mock import MagicMock, patch @@ -8,6 +9,17 @@ class TestLauncherWindowsControlledFolderAccess: + def test_core_logging_import_stays_headless_without_i18n(self): + previous_logging = sys.modules.pop("core.logging", None) + try: + with patch.dict(sys.modules, {"core.i18n": None}): + module = importlib.import_module("core.logging") + assert module.get_logger("mediarecode.tmdb").name == "mediarecode.tmdb" + finally: + sys.modules.pop("core.logging", None) + if previous_logging is not None: + sys.modules["core.logging"] = previous_logging + def test_restart_current_app_uses_sys_executable_when_frozen(self): with patch.object(launcher.sys, "executable", r"C:\Apps\Mediarecode\mediarecode.exe"), \ patch.object(launcher.sys, "frozen", True, create=True), \ diff --git a/tests/test_main_startup_paths.py b/tests/test_main_startup_paths.py index 42f3a4d..923c63a 100644 --- a/tests/test_main_startup_paths.py +++ b/tests/test_main_startup_paths.py @@ -2,6 +2,7 @@ import types from pathlib import Path +from typing import Callable from unittest.mock import MagicMock, patch import main as main_mod @@ -60,13 +61,14 @@ def __getattr__(self, name): return getattr(fake_window, name) fake_ui_main_window = types.ModuleType("ui.main_window") - fake_ui_main_window.MainWindow = FakeMainWindow + setattr(fake_ui_main_window, "MainWindow", FakeMainWindow) - scheduled: dict[str, object] = {} + scheduled_delay: list[int] = [] + scheduled_callbacks: list[Callable[[], None]] = [] - def fake_single_shot(delay_ms, callback): - scheduled["delay_ms"] = delay_ms - scheduled["callback"] = callback + def fake_single_shot(delay_ms: int, callback: Callable[[], None]) -> None: + scheduled_delay.append(delay_ms) + scheduled_callbacks.append(callback) fake_config = types.SimpleNamespace( theme="dark", @@ -96,6 +98,7 @@ def fake_single_shot(delay_ms, callback): fake_app.setOrganizationName.assert_called_once_with("mediarecode") fake_app.setFont.assert_called_once() fake_window.show.assert_called_once_with() - assert scheduled["delay_ms"] == 0 - scheduled["callback"]() + assert scheduled_delay == [0] + assert len(scheduled_callbacks) == 1 + scheduled_callbacks[0]() fake_window.open_startup_paths.assert_called_once_with([startup_file]) diff --git a/tests/test_main_window_progress.py b/tests/test_main_window_progress.py new file mode 100644 index 0000000..313795e --- /dev/null +++ b/tests/test_main_window_progress.py @@ -0,0 +1,246 @@ +from __future__ import annotations + +import json +from types import MethodType, SimpleNamespace +from unittest.mock import MagicMock, patch + +from ui.main_window import ( + MainWindow, + _ENCODE_INTERNAL_PROGRESS_PREFIX, + _multi_encode_remaining_seconds, + _select_multi_encode_label, +) + + +class _FakeProgressBar: + def __init__(self) -> None: + self.value = 0 + + def setValue(self, value: int) -> None: + self.value = value + + +class _FakeLabel: + def __init__(self) -> None: + self.text = "" + + def setText(self, value: str) -> None: + self.text = value + + +class _FakeTimer: + def __init__(self) -> None: + self.active = False + + def isActive(self) -> bool: + return self.active + + def start(self) -> None: + self.active = True + + def stop(self) -> None: + self.active = False + + +def test_select_multi_encode_label_prefers_longest_remaining_and_reswitches() -> None: + now = 100.0 + states = { + "ffmpeg-video-1": { + "started_at": 0.0, + "duration_s": 600.0, + "elapsed_video": 300.0, + "done": False, + "last_update": 90.0, + }, + "ffmpeg-video-2": { + "started_at": 0.0, + "duration_s": 600.0, + "elapsed_video": 100.0, + "done": False, + "last_update": 95.0, + }, + } + + assert _multi_encode_remaining_seconds(states["ffmpeg-video-1"], now) == 100.0 + assert _multi_encode_remaining_seconds(states["ffmpeg-video-2"], now) == 500.0 + assert _select_multi_encode_label(states, "ffmpeg-video-1", now) == "ffmpeg-video-2" + + states["ffmpeg-video-2"]["done"] = True + assert _select_multi_encode_label(states, "ffmpeg-video-2", now) == "ffmpeg-video-1" + + +def test_handle_encode_internal_progress_updates_longest_remaining_bar_and_legend() -> None: + dummy = SimpleNamespace() + dummy._running = True + dummy._op_mode = "encode" + dummy._op_encode_multi_targets = { + 1: {"duration_s": 600.0, "total_frames": None, "source_name": "a.mkv"}, + 2: {"duration_s": 1200.0, "total_frames": None, "source_name": "b.mkv"}, + } + dummy._op_encode_multi_state = {} + dummy._op_encode_multi_active_label = None + dummy._op_encode_multi_reselect_timer = _FakeTimer() + dummy._prog_bar = _FakeProgressBar() + dummy._prog_lbl = _FakeLabel() + dummy.log_requested = SimpleNamespace(emit=MagicMock()) + dummy._stop_prep_progress = MagicMock() + + dummy._ensure_multi_encode_state = MethodType(MainWindow._ensure_multi_encode_state, dummy) + dummy._multi_encode_progress_parts = MethodType(MainWindow._multi_encode_progress_parts, dummy) + dummy._reevaluate_multi_encode_progress = MethodType(MainWindow._reevaluate_multi_encode_progress, dummy) + dummy._handle_encode_internal_progress = MethodType(MainWindow._handle_encode_internal_progress, dummy) + + line_track_1 = _ENCODE_INTERNAL_PROGRESS_PREFIX + json.dumps( + {"kind": "encode_ffmpeg", "label": "ffmpeg-video-1", "event": "line", "line": "out_time=00:05:00.000000"}, + ensure_ascii=False, + ) + line_track_2 = _ENCODE_INTERNAL_PROGRESS_PREFIX + json.dumps( + {"kind": "encode_ffmpeg", "label": "ffmpeg-video-2", "event": "line", "line": "out_time=00:02:00.000000"}, + ensure_ascii=False, + ) + done_track_2 = _ENCODE_INTERNAL_PROGRESS_PREFIX + json.dumps( + {"kind": "encode_ffmpeg", "label": "ffmpeg-video-2", "event": "done", "line": ""}, + ensure_ascii=False, + ) + + with patch("ui.main_window.time.monotonic", return_value=300.0): + assert dummy._handle_encode_internal_progress(line_track_1) is True + with patch("ui.main_window.time.monotonic", return_value=600.0): + assert dummy._handle_encode_internal_progress(line_track_2) is True + dummy._op_encode_multi_state["ffmpeg-video-2"]["started_at"] = 0.0 + with patch("ui.main_window.time.monotonic", return_value=600.0): + dummy._reevaluate_multi_encode_progress() + + assert dummy._op_encode_multi_active_label == "ffmpeg-video-2" + assert dummy._prog_bar.value == 10 + assert "Piste vidéo 2/2" in dummy._prog_lbl.text + assert "10%" in dummy._prog_lbl.text + + with patch("ui.main_window.time.monotonic", return_value=601.0): + assert dummy._handle_encode_internal_progress(done_track_2) is True + + assert dummy._op_encode_multi_active_label == "ffmpeg-video-1" + assert dummy._prog_bar.value == 50 + assert "Piste vidéo 1/2" in dummy._prog_lbl.text + + +def test_capture_verbose_progress_line_records_wrapped_tool_output(tmp_path) -> None: + dummy = SimpleNamespace() + dummy._config = SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ) + dummy._op_mode = "encode" + dummy._log_panel = SimpleNamespace(log=MagicMock()) + dummy._verbose_log_file_path = None + dummy._verbose_log_session_stamp = None + dummy._verbose_log_file_index = 1 + dummy._verbose_log_file_error_reported = False + dummy._NOISE_RE = MainWindow._NOISE_RE + dummy._encode_panel = SimpleNamespace(get_total_frames=lambda: None) + + dummy._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, dummy) + dummy._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, dummy) + dummy._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, dummy) + dummy._append_verbose_tool_output = MethodType(MainWindow._append_verbose_tool_output, dummy) + dummy._capture_verbose_progress_line = MethodType(MainWindow._capture_verbose_progress_line, dummy) + + wrapped = _ENCODE_INTERNAL_PROGRESS_PREFIX + json.dumps( + {"kind": "encode_ffmpeg", "label": "ffmpeg-video-1", "event": "line", "line": "out_time=00:00:05.000000"}, + ensure_ascii=False, + ) + + dummy._capture_verbose_progress_line(wrapped) + + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-*.log")) + assert len(log_files) == 1 + content = log_files[0].read_text(encoding="utf-8") + assert "[TOOL] [ffmpeg-video-1] out_time=00:00:05.000000" in content + + +def test_capture_verbose_progress_line_records_remux_ffmpeg_progress(tmp_path) -> None: + dummy = SimpleNamespace() + dummy._config = SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ) + dummy._op_mode = "remux" + dummy._log_panel = SimpleNamespace(log=MagicMock()) + dummy._verbose_log_file_path = None + dummy._verbose_log_session_stamp = None + dummy._verbose_log_file_index = 1 + dummy._verbose_log_file_error_reported = False + dummy._NOISE_RE = MainWindow._NOISE_RE + + dummy._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, dummy) + dummy._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, dummy) + dummy._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, dummy) + dummy._append_verbose_tool_output = MethodType(MainWindow._append_verbose_tool_output, dummy) + dummy._capture_verbose_progress_line = MethodType(MainWindow._capture_verbose_progress_line, dummy) + + dummy._capture_verbose_progress_line("out_time=00:00:10.000000") + + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-*.log")) + assert len(log_files) == 1 + content = log_files[0].read_text(encoding="utf-8") + assert "[TOOL] out_time=00:00:10.000000" in content + + +def test_on_tool_output_requested_records_inspector_verbose_lines(tmp_path) -> None: + dummy = SimpleNamespace() + dummy._config = SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ) + dummy._log_panel = SimpleNamespace(log=MagicMock()) + dummy._verbose_log_file_path = None + dummy._verbose_log_session_stamp = None + dummy._verbose_log_file_index = 1 + dummy._verbose_log_file_error_reported = False + + dummy._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, dummy) + dummy._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, dummy) + dummy._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, dummy) + dummy._append_verbose_tool_output = MethodType(MainWindow._append_verbose_tool_output, dummy) + dummy._on_tool_output_requested = MethodType(MainWindow._on_tool_output_requested, dummy) + + dummy._on_tool_output_requested("inspector", "Inspection démarrée : /tmp/movie.mkv") + + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-*.log")) + assert len(log_files) == 1 + content = log_files[0].read_text(encoding="utf-8") + assert "[TOOL] [inspector] Inspection démarrée : /tmp/movie.mkv" in content + + +def test_standard_file_logging_skips_verbose_tool_lines(tmp_path) -> None: + dummy = SimpleNamespace() + dummy._config = SimpleNamespace( + enable_file_logging=True, + file_logging_level="standard", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ) + dummy._op_mode = "encode" + dummy._log_panel = SimpleNamespace(log=MagicMock()) + dummy._NOISE_RE = MainWindow._NOISE_RE + dummy._encode_panel = SimpleNamespace(get_total_frames=lambda: None) + + dummy._append_verbose_tool_output = MethodType(MainWindow._append_verbose_tool_output, dummy) + dummy._capture_verbose_progress_line = MethodType(MainWindow._capture_verbose_progress_line, dummy) + dummy._on_tool_output_requested = MethodType(MainWindow._on_tool_output_requested, dummy) + + wrapped = _ENCODE_INTERNAL_PROGRESS_PREFIX + json.dumps( + {"kind": "encode_ffmpeg", "label": "ffmpeg-video-1", "event": "line", "line": "out_time=00:00:05.000000"}, + ensure_ascii=False, + ) + dummy._capture_verbose_progress_line(wrapped) + dummy._on_tool_output_requested("inspector", "Inspection démarrée : /tmp/movie.mkv") + + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-*.log")) + assert not log_files diff --git a/tests/test_main_window_startup_panel.py b/tests/test_main_window_startup_panel.py index 3ad3126..14903fe 100644 --- a/tests/test_main_window_startup_panel.py +++ b/tests/test_main_window_startup_panel.py @@ -2,12 +2,16 @@ tests/test_main_window_startup_panel.py — Mapping startup panel -> index stack. """ +import os +from types import MethodType from types import SimpleNamespace +from typing import Any, cast from unittest.mock import MagicMock, patch from pathlib import Path from PySide6.QtWidgets import QMessageBox +from core.logging import LogLevel, VerboseFileLogger from ui.main_window import LogPanel, MainWindow @@ -42,11 +46,12 @@ def test_scale_change_prompt_can_restart_application(qt_app) -> None: fake_window = SimpleNamespace( _config=SimpleNamespace(restart_application=MagicMock(return_value=True)), ) + window = cast(Any, fake_window) fake_app = SimpleNamespace(quit=MagicMock()) with patch("ui.main_window.QMessageBox.question", return_value=QMessageBox.StandardButton.Yes), \ patch("ui.main_window.QApplication.instance", return_value=fake_app): - MainWindow._prompt_restart_for_scale_change(fake_window, 125) + MainWindow._prompt_restart_for_scale_change(window, 125) fake_window._config.restart_application.assert_called_once_with() fake_app.quit.assert_called_once_with() @@ -56,10 +61,11 @@ def test_scale_change_prompt_shows_warning_when_restart_fails(qt_app) -> None: fake_window = SimpleNamespace( _config=SimpleNamespace(restart_application=MagicMock(return_value=False)), ) + window = cast(Any, fake_window) with patch("ui.main_window.QMessageBox.question", return_value=QMessageBox.StandardButton.Yes), \ patch("ui.main_window.QMessageBox.warning") as mock_warning: - MainWindow._prompt_restart_for_scale_change(fake_window, 150) + MainWindow._prompt_restart_for_scale_change(window, 150) fake_window._config.restart_application.assert_called_once_with() mock_warning.assert_called_once() @@ -74,11 +80,169 @@ def test_open_startup_paths_routes_files_to_container_page(tmp_path, qt_app) -> _sidebar=SimpleNamespace(select_page=MagicMock()), _remux_panel=SimpleNamespace(add_sources=MagicMock()), ) + window = cast(Any, fake_window) - MainWindow.open_startup_paths(fake_window, [media_path, Path(tmp_path / "missing.mp4")]) + MainWindow.open_startup_paths(window, [media_path, Path(tmp_path / "missing.mp4")]) fake_window._stack.setCurrentIndex.assert_called_once_with(3) fake_window._sidebar.select_page.assert_called_once_with(3) fake_window._remux_panel.add_sources.assert_called_once() routed_paths = fake_window._remux_panel.add_sources.call_args.args[0] assert routed_paths == [media_path] + + +def test_emit_log_entry_writes_verbose_file_when_enabled(tmp_path) -> None: + fake_window = SimpleNamespace( + _config=SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ), + _log_panel=SimpleNamespace(log=MagicMock()), + _verbose_log_file_path=None, + _verbose_log_session_stamp=None, + _verbose_log_file_index=1, + _verbose_log_file_error_reported=False, + ) + fake_window._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, fake_window) + fake_window._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, fake_window) + fake_window._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, fake_window) + fake_window._append_verbose_log_file = MethodType(MainWindow._append_verbose_log_file, fake_window) + fake_window._emit_log_entry = MethodType(MainWindow._emit_log_entry, fake_window) + + fake_window._emit_log_entry("Bonjour", LogLevel.INFO) + + fake_window._log_panel.log.assert_called_once_with("Bonjour", LogLevel.INFO) + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-*.log")) + assert len(log_files) == 1 + assert "[INFO] Bonjour" in log_files[0].read_text(encoding="utf-8") + + +def test_emit_log_entry_skips_file_when_verbose_logging_disabled(tmp_path) -> None: + fake_window = SimpleNamespace( + _config=SimpleNamespace( + enable_file_logging=False, + file_logging_level="standard", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ), + _log_panel=SimpleNamespace(log=MagicMock()), + _verbose_log_file_path=None, + _verbose_log_session_stamp=None, + _verbose_log_file_index=1, + _verbose_log_file_error_reported=False, + ) + fake_window._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, fake_window) + fake_window._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, fake_window) + fake_window._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, fake_window) + fake_window._append_verbose_log_file = MethodType(MainWindow._append_verbose_log_file, fake_window) + fake_window._emit_log_entry = MethodType(MainWindow._emit_log_entry, fake_window) + + fake_window._emit_log_entry("Bonjour", LogLevel.INFO) + + fake_window._log_panel.log.assert_called_once_with("Bonjour", LogLevel.INFO) + assert not (tmp_path / "chosen_logs").exists() + + +def test_verbose_log_rotation_rolls_and_caps_at_three_files(tmp_path) -> None: + fake_window = SimpleNamespace( + _config=SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + ), + _log_panel=SimpleNamespace(log=MagicMock()), + ) + fake_window._verbose_file_logger = VerboseFileLogger( + app_data_dir=tmp_path, + verbose_log_dir=tmp_path / "chosen_logs", + enabled=True, + max_bytes=40, + max_files=3, + ) + fake_window._verbose_file_logger._session_stamp = "20260423-181000" + fake_window._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, fake_window) + fake_window._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, fake_window) + fake_window._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, fake_window) + fake_window._append_verbose_log_file = MethodType(MainWindow._append_verbose_log_file, fake_window) + + fake_window._append_verbose_log_file("A" * 20, LogLevel.INFO) + fake_window._append_verbose_log_file("B" * 20, LogLevel.INFO) + fake_window._append_verbose_log_file("C" * 20, LogLevel.INFO) + fake_window._append_verbose_log_file("D" * 20, LogLevel.INFO) + + log_files = sorted((tmp_path / "chosen_logs").glob("mediarecode-verbose-20260423-181000-*.log")) + assert [path.name for path in log_files] == [ + "mediarecode-verbose-20260423-181000-01.log", + "mediarecode-verbose-20260423-181000-02.log", + "mediarecode-verbose-20260423-181000-03.log", + ] + assert "D" * 20 in (tmp_path / "chosen_logs" / "mediarecode-verbose-20260423-181000-01.log").read_text(encoding="utf-8") + assert "B" * 20 in (tmp_path / "chosen_logs" / "mediarecode-verbose-20260423-181000-02.log").read_text(encoding="utf-8") + assert "C" * 20 in (tmp_path / "chosen_logs" / "mediarecode-verbose-20260423-181000-03.log").read_text(encoding="utf-8") + + +def test_verbose_log_rotation_resumes_last_existing_file_and_continues_roll(tmp_path) -> None: + logs_dir = tmp_path / "chosen_logs" + logs_dir.mkdir(parents=True, exist_ok=True) + old_path = logs_dir / "mediarecode-verbose-20260420-080000-03.log" + resumed_path = logs_dir / "mediarecode-verbose-20260423-181000-02.log" + old_path.write_text("legacy\n", encoding="utf-8") + resumed_path.write_text("X" * 95, encoding="utf-8") + os.utime(old_path, (100.0, 100.0)) + os.utime(resumed_path, (200.0, 200.0)) + + fake_window = SimpleNamespace( + _config=SimpleNamespace( + enable_file_logging=True, + file_logging_level="verbose", + app_data_dir=tmp_path, + verbose_log_dir=logs_dir, + ), + _log_panel=SimpleNamespace(log=MagicMock()), + ) + fake_window._verbose_file_logger = VerboseFileLogger( + app_data_dir=tmp_path, + verbose_log_dir=logs_dir, + enabled=True, + max_bytes=100, + max_files=3, + ) + fake_window._verbose_log_part_path = MethodType(MainWindow._verbose_log_part_path, fake_window) + fake_window._verbose_log_session_path = MethodType(MainWindow._verbose_log_session_path, fake_window) + fake_window._prepare_verbose_log_target = MethodType(MainWindow._prepare_verbose_log_target, fake_window) + fake_window._append_verbose_log_file = MethodType(MainWindow._append_verbose_log_file, fake_window) + + fake_window._append_verbose_log_file("Suite", LogLevel.INFO) + + rotated_path = logs_dir / "mediarecode-verbose-20260423-181000-03.log" + assert fake_window._verbose_file_logger.session_stamp == "20260423-181000" + assert fake_window._verbose_file_logger.file_index == 3 + assert rotated_path.exists() + assert "[INFO] Suite" in rotated_path.read_text(encoding="utf-8") + assert "[INFO] Suite" not in resumed_path.read_text(encoding="utf-8") + + +def test_verbose_log_resume_prefers_latest_filename_not_latest_mtime(tmp_path) -> None: + logs_dir = tmp_path / "chosen_logs" + logs_dir.mkdir(parents=True, exist_ok=True) + newer_name = logs_dir / "mediarecode-verbose-20260423-181000-02.log" + older_name = logs_dir / "mediarecode-verbose-20260422-181000-03.log" + newer_name.write_text("newer-name\n", encoding="utf-8") + older_name.write_text("older-name\n", encoding="utf-8") + os.utime(newer_name, (100.0, 100.0)) + os.utime(older_name, (200.0, 200.0)) + + logger = VerboseFileLogger( + app_data_dir=tmp_path, + verbose_log_dir=logs_dir, + enabled=True, + max_bytes=100, + max_files=3, + ) + + assert logger.session_path() == newer_name + assert logger.session_stamp == "20260423-181000" + assert logger.file_index == 2 diff --git a/tests/test_merge_dovi_panel.py b/tests/test_merge_dovi_panel.py index a35fc0c..b06635e 100644 --- a/tests/test_merge_dovi_panel.py +++ b/tests/test_merge_dovi_panel.py @@ -1,6 +1,7 @@ from __future__ import annotations from pathlib import Path +from typing import Any, cast class _DummySignal: @@ -43,7 +44,7 @@ def _factory(**kwargs): monkeypatch.setattr(panel_mod, "MergeDoviWorkflow", _factory) cfg = _DummyConfig(tmp_path) - panel_mod.MergeDoviPanel(cfg) + panel_mod.MergeDoviPanel(cast(Any, cfg)) assert captured["mediainfo_bin"] == "mi" assert captured["ffmpeg_bin"] == "ffm" diff --git a/tests/test_merge_remux_extras.py b/tests/test_merge_remux_extras.py index c59339d..dc96f68 100644 --- a/tests/test_merge_remux_extras.py +++ b/tests/test_merge_remux_extras.py @@ -23,6 +23,7 @@ from __future__ import annotations from pathlib import Path +from typing import Any, cast import pytest @@ -32,7 +33,9 @@ # Appel unbound : _merge_remux_extras n'utilise pas self from ui.main_window import MainWindow -_merge = MainWindow._merge_remux_extras + +def _merge(encode_cfg: EncodeConfig, remux_cfg: RemuxConfig) -> EncodeConfig: + return MainWindow._merge_remux_extras(cast(Any, object()), encode_cfg, remux_cfg) # --------------------------------------------------------------------------- @@ -51,11 +54,14 @@ def _encode_cfg( extra_attachments: list | None = None, keep_chapters: bool = True, audio_tracks: list | None = None, + video_tracks: list | None = None, ) -> EncodeConfig: + videos = video_tracks or [_video_settings()] return EncodeConfig( source=src, output=output, - video=_video_settings(), + video=videos[0], + video_tracks=videos, audio_tracks=audio_tracks or [], keep_chapters=keep_chapters, file_title=file_title, @@ -122,7 +128,7 @@ def test_title_preserved_when_subtitles_merged(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="Mon Film Test") rmx = _remux_cfg(src, out, tracks=[_track(0, "video"), sub_track]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.file_title == "Mon Film Test", \ f"file_title perdu après fusion sous-titres : {result.file_title!r}" @@ -136,7 +142,7 @@ def test_title_preserved_when_keep_chapters_differs(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="Film Chapitres", keep_chapters=True) rmx = _remux_cfg(src, out, keep_chapters=False) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.file_title == "Film Chapitres", \ f"file_title perdu suite à changement keep_chapters : {result.file_title!r}" @@ -150,7 +156,7 @@ def test_title_preserved_with_tag_sources(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="Film Tags") rmx = _remux_cfg(src, out, copy_tags=True) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.file_title == "Film Tags", \ f"file_title perdu avec tag_sources : {result.file_title!r}" @@ -164,7 +170,7 @@ def test_empty_title_preserved(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="") rmx = _remux_cfg(src, out, copy_tags=True) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.file_title == "" @@ -189,7 +195,7 @@ def test_extra_attachments_preserved_when_chapters_differ(self, tmp_path, qt_app enc = _encode_cfg(src, out, extra_attachments=[cover], keep_chapters=True) rmx = _remux_cfg(src, out, keep_chapters=False) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.extra_attachments == [cover], \ f"extra_attachments perdus : {result.extra_attachments!r}" @@ -205,7 +211,7 @@ def test_extra_attachments_preserved_with_tag_sources(self, tmp_path, qt_app): enc = _encode_cfg(src, out, extra_attachments=[cover]) rmx = _remux_cfg(src, out, copy_tags=True) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.extra_attachments == [cover], \ f"extra_attachments perdus avec tag_sources : {result.extra_attachments!r}" @@ -236,7 +242,7 @@ def test_returns_same_object_when_nothing_to_merge(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="Film", keep_chapters=True) rmx = _remux_cfg(src, out, keep_chapters=True, tracks=[bare_track]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result is enc, "Devrait retourner l'objet original sans reconstruction" @@ -269,12 +275,49 @@ def test_cleared_video_language_emits_und_track_meta_edit(self, tmp_path, qt_app enc = _encode_cfg(src, out) rmx = _remux_cfg(src, out, tracks=[cleared_video]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert [(edit.track_order, edit.language, edit.title) for edit in result.track_meta_edits] == [ (1, "und", None), ] + def test_multiple_video_tracks_are_ordered_before_audio_tracks(self, tmp_path, qt_app): + src = tmp_path / "src.mkv" + src.touch() + out = tmp_path / "out.mkv" + + video_a = TrackEntry( + mkv_tid=0, track_type="video", codec="H264", display_info="", + language="fra", title="Vidéo A", orig_language="", orig_title="", + ) + video_b = TrackEntry( + mkv_tid=1, track_type="video", codec="HEVC", display_info="", + language="eng", title="Vidéo B", orig_language="", orig_title="", + ) + audio = TrackEntry( + mkv_tid=2, track_type="audio", codec="EAC3", display_info="", + language="jpn", title="Audio", orig_language="", orig_title="", + ) + + enc = _encode_cfg( + src, + out, + video_tracks=[ + VideoEncodeSettings(stream_index=0, source_path=src, track_entry_id=video_a.entry_id), + VideoEncodeSettings(stream_index=1, source_path=src, track_entry_id=video_b.entry_id), + ], + audio_tracks=[AudioTrackSettings(stream_index=2, source_path=src)], + ) + rmx = _remux_cfg(src, out, tracks=[video_a, video_b, audio]) + + result = _merge(enc, rmx) + + assert [(edit.track_order, edit.title, edit.language) for edit in result.track_meta_edits] == [ + (1, "Vidéo A", "fra"), + (2, "Vidéo B", "eng"), + (3, "Audio", "jpn"), + ] + # --------------------------------------------------------------------------- # tag_overrides propagés depuis RemuxConfig @@ -314,7 +357,7 @@ def test_tag_overrides_propagated_to_encode_cfg(self, tmp_path, qt_app): enc = _encode_cfg(src, out, file_title="Film") rmx = self._remux_with_tag_overrides(src, out, tags) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.tag_overrides == tags, \ f"tag_overrides non propagé : {result.tag_overrides!r}" @@ -328,7 +371,7 @@ def test_tag_overrides_disables_tag_sources(self, tmp_path, qt_app): enc = _encode_cfg(src, out) rmx = self._remux_with_tag_overrides(src, out, {"EPISODE": "3"}) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.tag_sources == [], \ f"tag_sources devrait être vide quand tag_overrides présent : {result.tag_sources!r}" @@ -342,7 +385,7 @@ def test_empty_tag_overrides_still_propagated(self, tmp_path, qt_app): enc = _encode_cfg(src, out) rmx = self._remux_with_tag_overrides(src, out, {}) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.tag_overrides == {}, \ f"tag_overrides={{}} non propagé : {result.tag_overrides!r}" @@ -357,7 +400,7 @@ def test_no_tag_overrides_uses_tag_sources(self, tmp_path, qt_app): # RemuxConfig sans tag_overrides mais copy_tags=True rmx = _remux_cfg(src, out, copy_tags=True) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.tag_sources == [src], \ f"tag_sources attendu [{src}], obtenu : {result.tag_sources!r}" @@ -395,7 +438,7 @@ def test_audio_track_meta_edits_follow_reordered_encode_audio_tracks(self, tmp_p ) rmx = _remux_cfg(src, out, tracks=[video, audio_2, audio_1]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert [(edit.track_order, edit.title, edit.language) for edit in result.track_meta_edits] == [ (2, "VO", "eng"), @@ -429,7 +472,7 @@ def test_subtitle_tracks_and_metadata_follow_remux_track_order(self, tmp_path, q keep_chapters=True, ) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result.subtitle_tracks == [(src_b, 7), (src_a, 4)] assert [(edit.track_order, edit.title, edit.language) for edit in result.track_meta_edits] == [ @@ -472,7 +515,7 @@ def test_track_flags_are_propagated_to_track_meta_edits(self, tmp_path, qt_app): ) rmx = _remux_cfg(src, out, tracks=[_track(0, "video"), audio]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) edit = next(e for e in result.track_meta_edits if e.track_order == 2) assert edit.flag_default is True @@ -517,7 +560,7 @@ def test_track_time_offsets_are_propagated_from_remux_tracks(self, tmp_path, qt_ ) rmx = _remux_cfg(src, out, tracks=[video, audio]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result is not enc assert all(isinstance(t, TrackTimeOffset) for t in result.track_time_offsets) @@ -548,5 +591,5 @@ def test_zero_offsets_are_not_propagated(self, tmp_path, qt_app): enc = _encode_cfg(src, out) rmx = _remux_cfg(src, out, tracks=[video]) - result = _merge(None, enc, rmx) + result = _merge(enc, rmx) assert result is enc diff --git a/tests/test_nfo_generation.py b/tests/test_nfo_generation.py index 0b98859..296bba4 100644 --- a/tests/test_nfo_generation.py +++ b/tests/test_nfo_generation.py @@ -15,6 +15,7 @@ import os import subprocess from pathlib import Path +from typing import Any, cast from unittest.mock import MagicMock, call, patch import pytest @@ -148,7 +149,7 @@ def test_checkbox_exists_and_defaults_checked(self, qt_app, tmp_path): cfg, _ = _make_config(tmp_path) panel = SettingsPanel(cfg) - cb = panel.widget_for("metadata", "generate_nfo") + cb = cast(Any, panel.widget_for("metadata", "generate_nfo")) assert cb.isChecked() is True def test_checkbox_reflects_false_from_config(self, qt_app, tmp_path): @@ -157,7 +158,7 @@ def test_checkbox_reflects_false_from_config(self, qt_app, tmp_path): cfg, _ = _make_config(tmp_path, "[metadata]\ngenerate_nfo = false\n") panel = SettingsPanel(cfg) - cb = panel.widget_for("metadata", "generate_nfo") + cb = cast(Any, panel.widget_for("metadata", "generate_nfo")) assert cb.isChecked() is False def test_save_writes_false_to_ini(self, qt_app, tmp_path): @@ -176,7 +177,7 @@ def test_save_writes_false_to_ini(self, qt_app, tmp_path): cfg = AppConfig() panel = SettingsPanel(cfg) - cb = panel.widget_for("metadata", "generate_nfo") + cb = cast(Any, panel.widget_for("metadata", "generate_nfo")) cb.setChecked(False) panel._on_save_clicked() @@ -199,7 +200,7 @@ def test_save_writes_true_to_ini(self, qt_app, tmp_path): cfg = AppConfig() panel = SettingsPanel(cfg) - cb = panel.widget_for("metadata", "generate_nfo") + cb = cast(Any, panel.widget_for("metadata", "generate_nfo")) cb.setChecked(True) panel._on_save_clicked() @@ -410,6 +411,7 @@ def test_generate_nfo_false_skips_bind(self, tmp_path): wf._bind_nfo_write(signals, output) signals.finished.emit("done") app = QCoreApplication.instance() + assert app is not None deadline = time.monotonic() + 1.0 while time.monotonic() < deadline: app.processEvents() @@ -433,6 +435,7 @@ def test_generate_nfo_true_binds_slot(self, tmp_path): wf._bind_nfo_write(signals, output) signals.finished.emit("done") app = QCoreApplication.instance() + assert app is not None deadline = time.monotonic() + 3.0 while not called and time.monotonic() < deadline: app.processEvents() @@ -457,6 +460,7 @@ def test_bound_slot_calls_write_mediainfo_nfo(self, tmp_path): wf._bind_nfo_write(signals, output) signals.finished.emit("done") app = QCoreApplication.instance() + assert app is not None deadline = time.monotonic() + 3.0 while not captured and time.monotonic() < deadline: app.processEvents() @@ -486,9 +490,10 @@ def test_constructor_mediainfo_bin_stored_in_bins(self): wf = EncodeWorkflow(mediainfo_bin="/custom/mi") assert wf._bins["mediainfo"] == "/custom/mi" - def test_postproc_helper_has_generate_nfo_false(self): - """Le RemuxWorkflow interne (_postproc_helper) a toujours generate_nfo=False.""" + def test_encode_uses_dedicated_postprocess_service(self): + """EncodeWorkflow n'embarque plus un RemuxWorkflow interne pour le post-traitement.""" + from core.workflows.common.remux_postprocess import RemuxPostprocessService from core.workflows.encode.workflow import EncodeWorkflow wf = EncodeWorkflow(generate_nfo=True) - assert wf._postproc_helper._generate_nfo is False + assert isinstance(wf._postprocess_service, RemuxPostprocessService) diff --git a/tests/test_package.py b/tests/test_package.py index 560bd5a..023d99a 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -421,6 +421,7 @@ def test_stage_msix_layout_embeds_file_associations_and_bundle(tmp_path): with patch.object(package_mod, "ROOT", tmp_path), \ patch.object(package_mod, "APP_NAME", "Mediarecode"), \ + patch.object(package_mod, "_MSIX_PACKAGE_NAME", "AOTRMediarecode"), \ patch.object(package_mod, "_msix_processor_architecture", return_value="x64"), \ patch.object(package_mod, "_load_msix_store_metadata", return_value=metadata): layout_dir = package_mod._stage_msix_layout(bundle_dir, "1.3.2", metadata=metadata) @@ -428,7 +429,7 @@ def test_stage_msix_layout_embeds_file_associations_and_bundle(tmp_path): manifest = (layout_dir / "AppxManifest.xml").read_text(encoding="utf-8") assert '' in manifest assert '.mkv' in manifest - assert (layout_dir / "VFS" / "ProgramFilesX64" / "Mediarecode" / "mediarecode.exe").exists() + assert (layout_dir / "VFS" / "ProgramFilesX64" / "AOTRMediarecode" / "mediarecode.exe").exists() def test_build_msixupload_wraps_msix_for_partner_center(tmp_path): @@ -448,7 +449,7 @@ def test_build_msix_package_invokes_makeappx_and_signing(tmp_path): bundle_dir.mkdir(parents=True) layout_dir = tmp_path / "layout" layout_dir.mkdir() - output_path = tmp_path / "Mediarecode-1.3.2.msix" + output_path = tmp_path / "AOTRMediarecode-1.3.2.msix" commands: list[list[str]] = [] def fake_run(cmd, **kwargs): @@ -458,6 +459,7 @@ def fake_run(cmd, **kwargs): with patch.object(package_mod, "OS", "Windows"), \ patch.object(package_mod, "ROOT", tmp_path), \ + patch.object(package_mod, "_MSIX_PACKAGE_NAME", "AOTRMediarecode"), \ patch.object(package_mod, "_stage_msix_layout", return_value=layout_dir), \ patch.object(package_mod, "_ensure_windows_sdk_tool", return_value="C:\\sdk\\makeappx.exe"), \ patch.object(package_mod, "_sign_msix_package") as mock_sign, \ diff --git a/tests/test_remux.py b/tests/test_remux.py index bdd375c..b0d3390 100644 --- a/tests/test_remux.py +++ b/tests/test_remux.py @@ -125,7 +125,9 @@ import sys import time from pathlib import Path -from unittest.mock import patch +from types import SimpleNamespace +from typing import Any, cast +from unittest.mock import MagicMock, patch import pytest from PySide6.QtCore import Qt @@ -142,6 +144,7 @@ from core.media_info_fetcher import MediaDetails from core.runner import TaskSignals from core.workflows.remux import RemuxWorkflow +from core.workflows.remux_mapping import resolved_global_tags from core.workflows.remux_models import ( RemuxConfig, RemuxError, SourceInput, TrackEntry, clone_track_entry, tracks_from_file_info, ) @@ -152,6 +155,7 @@ _TRACK_INFO_OFFSET_VALUE_ROLE, _normalize_tmdb_manual_title_suggestion, _pick_file_color, ) +from ui.panels.remux_panel.functions import inspection as inspection_functions from ui.panels.remux_panel.widgets.attachments import _AttachmentNameButton, _pretty_text_attachment_content from ui.panels.remux_panel.widgets.file_list import _ACCEPTED_EXT from ui.panels.track_edit_dialog import TrackEditDialog @@ -1250,6 +1254,122 @@ def test_update_audio_encoding_updates_codec_and_info_cells(self, table): assert track.codec == "AAC" assert track.display_info == "5.1 384 kbps" + def test_update_video_encoding_plan_updates_codec_cell_and_state(self, table): + track = _track(0, "video", file_id="fid", codec="HEVC") + table.append_tracks(_COLOR_A, [track]) + + changed = table.update_video_encoding_plans({ + track.entry_id: "libx265", + }) + + assert changed is True + assert table.item(0, _TrackTable.COL_CODEC).text() == "HEVC" + assert track.encode_plan_codec == "libx265" + assert track.encode_plan_modified is True + + def test_update_video_encoding_plan_is_per_track(self, table): + first = _track(0, "video", file_id="fid", codec="HEVC") + second = _track(1, "video", file_id="fid", codec="H264") + table.append_tracks(_COLOR_A, [first, second]) + + changed = table.update_video_encoding_plans({ + first.entry_id: "libx265", + second.entry_id: "copy", + }) + + assert changed is True + assert table.item(0, _TrackTable.COL_CODEC).text() == "HEVC" + assert table.item(1, _TrackTable.COL_CODEC).text() == "H264" + assert first.encode_plan_codec == "libx265" + assert second.encode_plan_codec == "copy" + + def test_update_video_encoding_plan_clear_missing_resets_codec_cell_and_state(self, table): + track = _track(0, "video", file_id="fid", codec="HEVC") + table.append_tracks(_COLOR_A, [track]) + table.update_video_encoding_plans({ + track.entry_id: "copy", + }) + + changed = table.update_video_encoding_plans({}, clear_missing=True) + + assert changed is True + assert table.item(0, _TrackTable.COL_CODEC).text() == "HEVC" + assert track.encode_plan_codec == "" + assert track.encode_plan_summary == "" + assert track.encode_plan_hdr_badges == () + assert track.encode_plan_modified is False + + def test_video_codec_cell_stays_normal_when_copy_untouched(self, table): + track = _track(0, "video", file_id="fid", codec="HEVC") + table.append_tracks(_COLOR_A, [track]) + + codec_item = table.item(0, _TrackTable.COL_CODEC) + assert codec_item is not None + assert codec_item.foreground().color().name().lower() != table._VIDEO_ENCODE_COLOR.name().lower() + assert codec_item.font().bold() is False + + def test_video_row_turns_blue_and_bold_when_encode_codec_is_set(self, table): + track = _track(0, "video", file_id="fid", codec="HEVC") + table.append_tracks(_COLOR_A, [track]) + + table.update_video_encoding_plans({track.entry_id: "libx265"}) + + for col in ( + _TrackTable.COL_TYPE, + _TrackTable.COL_LANG, + _TrackTable.COL_TITLE, + _TrackTable.COL_INFO, + ): + item = table.item(0, col) + assert item is not None + assert item.foreground().color().name().lower() == table._VIDEO_ENCODE_COLOR.name().lower() + assert item.font().bold() is True + codec_item = table.item(0, _TrackTable.COL_CODEC) + assert codec_item is not None + assert codec_item.foreground().color().name().lower() == table._VIDEO_ENCODE_CODEC_COLOR.name().lower() + assert codec_item.font().bold() is True + + def test_video_row_rolls_back_when_codec_returns_to_copy(self, table): + track = _track(0, "video", file_id="fid", codec="HEVC") + table.append_tracks(_COLOR_A, [track]) + table.update_video_encoding_plans({track.entry_id: "libx265"}) + + table.update_video_encoding_plans({track.entry_id: "copy"}) + + codec_item = table.item(0, _TrackTable.COL_CODEC) + assert codec_item is not None + assert codec_item.text() == "HEVC" + assert codec_item.foreground().color().name().lower() != table._VIDEO_ENCODE_COLOR.name().lower() + assert codec_item.font().bold() is False + + def test_remux_panel_update_video_track_encoding_empty_list_clears_state(self, qt_app, tmp_path): + panel = RemuxPanel(AppConfig()) + src = tmp_path / "source.mkv" + src.touch() + + video = _track(0, "video", file_id="fid", codec="HEVC") + video.encode_plan_codec = "libx265" + video.encode_plan_modified = True + + info = _file_info(path=src, videos=[_video(index=0)]) + source = SourceFile(id="fid", path=src, color=_COLOR_A, info=info, tracks=[video]) + panel._source_files = [source] + panel._source_colors = {"fid": _COLOR_A} + panel._source_names = {"fid": "source.mkv"} + panel._track_table.append_tracks(_COLOR_A, [video]) + + with patch.object(panel, "_rebuild_preview"), patch.object(panel, "_emit_video_tracks"): + panel.update_video_track_encoding([]) + + assert video.encode_plan_codec == "" + assert video.encode_plan_summary == "" + assert video.encode_plan_hdr_badges == () + assert video.encode_plan_modified is False + codec_item = panel._track_table.item(0, _TrackTable.COL_CODEC) + assert codec_item is not None + assert codec_item.text() == "HEVC" + panel.close() + # =========================================================================== # RemuxPanel — pistes NEW synchronisées avec EncodePanel @@ -1293,7 +1413,9 @@ def test_new_track_remains_available_when_source_track_is_unselected(self, qt_ap panel.audio_tracks_changed.connect(lambda tracks: emitted.append(tracks)) source_row = self._row_for_entry(panel, source) - panel._track_table.item(source_row, _TrackTable.COL_CHECK).setCheckState(Qt.CheckState.Unchecked) + source_item = panel._track_table.item(source_row, _TrackTable.COL_CHECK) + assert source_item is not None + source_item.setCheckState(Qt.CheckState.Unchecked) panel._emit_audio_tracks() assert emitted @@ -1378,13 +1500,67 @@ def test_table_checkbox_change_reemits_video_tracks(self, qt_app, tmp_path): panel.video_tracks_changed.connect(lambda tracks: emitted.append(tracks)) row = self._row_for_entry(panel, video_b) - panel._track_table.item(row, _TrackTable.COL_CHECK).setCheckState(Qt.CheckState.Unchecked) + row_item = panel._track_table.item(row, _TrackTable.COL_CHECK) + assert row_item is not None + row_item.setCheckState(Qt.CheckState.Unchecked) panel._on_table_changed() assert emitted assert [entry.entry_id for _info, entry, _color in emitted[-1]] == [video_a.entry_id] panel.close() + def test_reemitted_video_tracks_are_detached_objects(self, qt_app, tmp_path): + video = _track(0, "video", file_id="fid", codec="HEVC") + panel = self._panel_with_video_tracks(qt_app, tmp_path, [video]) + emitted: list = [] + panel.video_tracks_changed.connect(lambda tracks: emitted.append(tracks)) + + panel._emit_video_tracks() + + assert emitted + emitted_entry = emitted[-1][0][1] + assert emitted_entry is not video + emitted_entry.encode_plan_codec = "libx264" + assert video.encode_plan_codec == "" + panel.close() + + +def test_inspect_file_routes_verbose_inspector_output_to_panel_signal(tmp_path): + path = tmp_path / "movie.mkv" + path.touch() + info = FileInfo(path=path, format="matroska,webm", duration_s=None, size_bytes=None, bit_rate=None) + verbose_lines: list[tuple[str, str]] = [] + + panel = SimpleNamespace( + _config=SimpleNamespace(tool_ffprobe="ffprobe", tool_mediainfo="mediainfo"), + tool_output=SimpleNamespace(emit=lambda label, line: verbose_lines.append((str(label), str(line)))), + _inspection_done=SimpleNamespace(emit=MagicMock()), + _inspection_error=SimpleNamespace(emit=MagicMock()), + log_message=SimpleNamespace(emit=MagicMock()), + ) + + callback_holder: dict[str, object] = {} + inspector_instance = MagicMock() + + def fake_ctor(*args, **kwargs): + callback_holder["verbose_output"] = kwargs["verbose_output"] + return inspector_instance + + def fake_inspect(_path): + callback = callback_holder["verbose_output"] + assert callable(callback) + callback("Inspection démarrée : /tmp/movie.mkv") + return info + + inspector_instance.inspect.side_effect = fake_inspect + + with patch.object(inspection_functions, "FileInspector", side_effect=fake_ctor): + inspection_functions.inspect_file(cast(Any, panel), "fid-1", path) + + assert verbose_lines == [("inspector", "Inspection démarrée : /tmp/movie.mkv")] + panel._inspection_done.emit.assert_called_once_with("fid-1", info) + panel._inspection_error.emit.assert_not_called() + # =========================================================================== # _AttachmentItemWidget — case cochée par défaut (tags et attachements) @@ -1835,9 +2011,7 @@ def _cfg(self, output: Path, *, tags: dict[str, str] | None = None) -> RemuxConf def test_resolved_global_tags_keeps_only_user_tags(self, tmp_path): output = tmp_path / "out.mkv" cfg = self._cfg(output, tags={"GENRE": "Drama", "EMPTY": " "}) - wf = RemuxWorkflow(ffmpeg_bin="ffmpeg", writing_application="MediarecodeMux") - - tags = wf._resolved_global_tags(cfg) + tags = resolved_global_tags(cfg) assert tags["GENRE"] == "Drama" assert "EMPTY" not in tags diff --git a/tests/test_remux_modules.py b/tests/test_remux_modules.py new file mode 100644 index 0000000..0aa6e66 --- /dev/null +++ b/tests/test_remux_modules.py @@ -0,0 +1,171 @@ +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace +from typing import Any, cast + +from core.inspector import AttachmentInfo +from core.runner import TaskSignals +from core.workflows.remux_attachments import attachment_names, build_attachment_mapping +from core.workflows.remux_command import build_remux_command, preview_remux_command +from core.workflows.remux_mapping import ( + MappedTrack, + metadata_context, + resolve_mapped_tracks, + requires_file_sync_fallback_for_offsets, +) +from core.workflows.remux_models import RemuxConfig, SourceInput, TrackEntry +from core.workflows.remux_sync import bind_temp_cleanup, decide_strict_interleave_with_prescan + + +def _track( + mkv_tid: int, + track_type: str, + *, + codec: str = "copy", + title: str = "", + language: str = "und", + entry_id: str | None = None, + time_shift_ms: int = 0, +) -> TrackEntry: + return TrackEntry( + mkv_tid=mkv_tid, + track_type=track_type, + codec=codec, + display_info=track_type, + language=language, + title=title, + entry_id=entry_id or f"{track_type}-{mkv_tid}", + time_shift_ms=time_shift_ms, + ) + + +class TestRemuxMappingModule: + def test_resolve_mapped_tracks_preserves_output_type_indices(self, tmp_path): + src = tmp_path / "in.mkv" + src.touch() + cfg = RemuxConfig( + sources=[SourceInput(path=src, file_index=0, tracks=[_track(0, "video"), _track(1, "audio"), _track(2, "subtitle")])], + output=tmp_path / "out.mkv", + track_order=[(0, 0), (0, 1), (0, 2)], + ) + mapped = resolve_mapped_tracks(cfg) + assert [(mt.track.track_type, mt.out_type_index) for mt in mapped] == [ + ("video", 0), + ("audio", 0), + ("subtitle", 0), + ] + + def test_metadata_context_uses_chapter_input_when_tags_are_overridden(self, tmp_path): + src = tmp_path / "in.mkv" + src.touch() + cfg = RemuxConfig( + sources=[SourceInput(path=src, file_index=0, tracks=[_track(0, "video")])], + output=tmp_path / "out.mkv", + track_order=[(0, 0)], + chapter_overrides=[SimpleNamespace(timecode_s=0.0, name="Intro")], + tag_overrides={"TMDB_ID": "42"}, + ) + meta = metadata_context(cfg, chapter_input_index=1) + assert meta.chapter_map == "1" + assert meta.metadata_map == "1" + assert meta.global_tags["TMDB_ID"] == "42" + + def test_requires_file_sync_fallback_for_offsets_detects_foreign_shift(self): + mapped = [ + MappedTrack(0, 0, Path("a.mkv"), 0, _track(0, "video"), 0), + MappedTrack(1, 1, Path("b.mkv"), 1, _track(1, "audio", time_shift_ms=90), 0), + ] + assert requires_file_sync_fallback_for_offsets(mapped) is True + + +class TestRemuxCommandModule: + def test_build_remux_command_applies_strict_interleave_and_metadata(self, tmp_path): + src_a = tmp_path / "a.mkv" + src_b = tmp_path / "b.mkv" + src_a.touch() + src_b.touch() + cfg = RemuxConfig( + sources=[ + SourceInput(path=src_a, file_index=0, tracks=[_track(0, "video"), _track(2, "subtitle", codec="subrip")]), + SourceInput(path=src_b, file_index=1, tracks=[_track(1, "audio", language="fr", title="VF")]), + ], + output=tmp_path / "out.mkv", + track_order=[(0, 0), (1, 1), (0, 2)], + keep_chapters=False, + tag_overrides={"GENRE": "Drama"}, + ) + cmd = build_remux_command( + cfg, + ffmpeg_bin="ffmpeg", + ffmpeg_progress_args=["-progress", "pipe:1", "-nostats"], + ffmpeg_thread_args=["-threads", "4"], + cli_path=str, + strict_interleave_override=True, + ) + assert "-max_interleave_delta" in cmd + assert "GENRE=Drama" in cmd + assert "-metadata:s:a:0" in cmd + assert "language=fr-FR" in cmd + + def test_preview_remux_command_formats_multiline_output(self, tmp_path): + src = tmp_path / "in.mkv" + src.touch() + cfg = RemuxConfig( + sources=[SourceInput(path=src, file_index=0, tracks=[_track(0, "video")])], + output=tmp_path / "out.mkv", + track_order=[(0, 0)], + ) + preview = preview_remux_command( + cfg, + build_command=lambda *_args, **_kwargs: ["ffmpeg", "-i", str(src), str(tmp_path / "out.mkv")], + ) + assert preview.startswith("ffmpeg") + assert "\\\n" in preview + + +class TestRemuxSyncModule: + def test_decide_strict_interleave_with_prescan_logs_foreign_offset(self): + cfg = SimpleNamespace() + mapped = [ + MappedTrack(0, 0, Path("a.mkv"), 0, _track(0, "video"), 0), + MappedTrack(1, 1, Path("b.mkv"), 1, _track(1, "audio", time_shift_ms=100), 0), + ] + logs: list[tuple[str, str]] = [] + result = decide_strict_interleave_with_prescan( + cast(Any, cfg), + resolve_mapped_tracks=lambda _cfg: mapped, + log_cb=lambda level, msg: logs.append((level, msg)), + ) + assert result is True + assert any("Décalage" in message for _, message in logs) + + def test_bind_temp_cleanup_removes_paths_once(self, tmp_path): + temp_dir = tmp_path / "work" + temp_dir.mkdir() + (temp_dir / "marker.txt").write_text("x", encoding="utf-8") + signals = TaskSignals() + bind_temp_cleanup(signals, [temp_dir]) + signals.finished.emit("ok") + assert not temp_dir.exists() + + +class TestRemuxAttachmentsModule: + def test_build_attachment_mapping_and_attachment_names(self, tmp_path): + src = tmp_path / "in.mkv" + src.touch() + attachment = AttachmentInfo( + index=5, + local_index=0, + filename="cover", + mimetype="image/jpeg", + is_attached_pic=False, + ) + cfg = RemuxConfig( + sources=[SourceInput(path=src, file_index=0, tracks=[_track(0, "video")], selected_attachments=[attachment])], + output=tmp_path / "out.mkv", + track_order=[(0, 0)], + ) + args = build_attachment_mapping(cfg) + assert args == ["-map", "0:5", "-c:t", "copy", "-map_metadata:s:t", "-1"] + assert attachment_names(attachment) == ("cover.jpg", "cover.jpg") diff --git a/tests/test_remux_timeline_sync.py b/tests/test_remux_timeline_sync.py index 88550ef..e7ba1e7 100644 --- a/tests/test_remux_timeline_sync.py +++ b/tests/test_remux_timeline_sync.py @@ -3,6 +3,7 @@ from dataclasses import dataclass import os from pathlib import Path +from typing import Any, cast import pytest @@ -11,6 +12,7 @@ from core.workflows.remux_timeline_sync import ( LiveSyncSession, FfmpegTimelineSync, + SyncPreparedInput, TimelineSyncFallbackHelper, ) @@ -75,8 +77,8 @@ def _fake_extract(*, source: Path, stream_index: int, destination: Path) -> None assert len(prepared) == 2 assert [p.input_idx for p in prepared] == [2, 3] - assert prepared[0].path.suffix == ".mka" - assert prepared[1].path.suffix == ".mks" + assert Path(prepared[0].path).suffix == ".mka" + assert Path(prepared[1].path).suffix == ".mks" assert len(calls) == 2 @@ -163,8 +165,8 @@ def kill(self): assert len(session.inputs) == 2 assert session.inputs[0].input_idx == 2 assert session.inputs[1].input_idx == 3 - assert session.inputs[0].path.suffix == ".mka" - assert session.inputs[1].path.suffix == ".mks" + assert Path(session.inputs[0].path).suffix == ".mka" + assert Path(session.inputs[1].path).suffix == ".mks" assert popen_cmds, "Aucune commande ffmpeg live capturée" assert "-y" in popen_cmds[0] assert "-start_at_zero" in popen_cmds[0] @@ -276,14 +278,14 @@ def _fake_extract_mmap(*, source: Path, stream_index: int, destination: Path) -> assert len(prepared) == 1 assert prepared[0].input_idx == 2 - assert prepared[0].path.suffix == ".mka" + assert Path(prepared[0].path).suffix == ".mka" assert len(calls) == 1 def test_fallback_helper_prefers_live_when_available(tmp_path): sync_path = tmp_path / "sync_live.mka" sync_path.touch() - expected = [type("P", (), {"key": (1, 1, "audio"), "path": sync_path, "input_idx": 2})()] + expected = [SyncPreparedInput(key=(1, 1, "audio"), path=sync_path, input_idx=2)] class _FakeSyncer: def start_live_demux_session(self, **_kwargs): @@ -296,7 +298,7 @@ def prepare_from_mapped_tracks(self, **_kwargs): pytest.fail("file fallback should not run when live is available") result = TimelineSyncFallbackHelper( - syncer=_FakeSyncer(), + syncer=cast(Any, _FakeSyncer()), work_dir=tmp_path, ram_dir=tmp_path / "ram", ).prepare( @@ -323,7 +325,7 @@ def start_live_demux_session(self, **_kwargs): def prepare_from_mapped_tracks_mmap(self, **kwargs): calls.append(Path(kwargs["tmp_dir"])) - return [type("P", (), {"key": (1, 1, "audio"), "path": ram_dir / "sync_ram.mka", "input_idx": 2})()] + return [SyncPreparedInput(key=(1, 1, "audio"), path=ram_dir / "sync_ram.mka", input_idx=2)] def prepare_from_mapped_tracks(self, **_kwargs): pytest.fail("file fallback should not run when RAM mmap works") @@ -334,7 +336,7 @@ def prepare_from_mapped_tracks(self, **_kwargs): sources = [_source(src, 1, [_track(1, "audio")])] result = TimelineSyncFallbackHelper( - syncer=_FakeSyncer(), + syncer=cast(Any, _FakeSyncer()), work_dir=work_dir, ram_dir=ram_dir, ).prepare( diff --git a/tests/test_remux_workflow.py b/tests/test_remux_workflow.py index d124b89..fea0b85 100644 --- a/tests/test_remux_workflow.py +++ b/tests/test_remux_workflow.py @@ -27,6 +27,7 @@ import subprocess import time from pathlib import Path +from typing import cast import pytest from PySide6.QtCore import QCoreApplication, Qt @@ -35,9 +36,21 @@ from core.runner import TaskSignals from core.version import APP_VERSION_LABEL from core.workflows.matroska_header_editor import MatroskaSegmentInfoHeaderEditor +from core.workflows.remux_mapping import ( + requires_file_sync_fallback_for_offsets, + resolve_mapped_tracks, +) from core.workflows.remux_models import RemuxConfig, RemuxError, SourceInput, TrackEntry from core.workflows.remux import RemuxWorkflow -from core.workflows.remux_timeline_sync import LiveSyncNotSupportedError, SyncPreparedInput +from core.workflows.remux_sync import ( + decide_strict_interleave_with_prescan, + prepare_timeline_sync_inputs, +) +from core.workflows.remux_timeline_sync import ( + LiveSyncNotSupportedError, + SyncPreparedInput, + TimelineSyncFallbackHelper, +) def _track( @@ -81,7 +94,10 @@ def _wait_task(signals, timeout: float = 20.0) -> dict[str, object]: } done = {"value": False} - signals.progress.connect(lambda msg: state["progress"].append(msg), Qt.ConnectionType.QueuedConnection) + signals.progress.connect( + lambda msg: cast(list[str], state["progress"]).append(msg), + Qt.ConnectionType.QueuedConnection, + ) signals.finished.connect(lambda res: (state.__setitem__("finished", res), done.__setitem__("value", True)), Qt.ConnectionType.QueuedConnection) signals.failed.connect(lambda msg, exc: (state.__setitem__("failed", (msg, exc)), done.__setitem__("value", True)), Qt.ConnectionType.QueuedConnection) signals.cancelled.connect(lambda: (state.__setitem__("cancelled", True), done.__setitem__("value", True)), Qt.ConnectionType.QueuedConnection) @@ -242,8 +258,8 @@ def test_requires_file_sync_fallback_for_offsets_detects_foreign_offset(self, tm keep_chapters=False, ) - mapped = wf._resolve_mapped_tracks(cfg) - assert wf._requires_file_sync_fallback_for_offsets(mapped) is True + mapped = resolve_mapped_tracks(cfg) + assert requires_file_sync_fallback_for_offsets(mapped) is True def test_requires_file_sync_fallback_for_offsets_ignores_zero_offsets(self, tmp_path): wf = RemuxWorkflow(ffmpeg_bin="ffmpeg", ffprobe_bin="ffprobe") @@ -262,8 +278,8 @@ def test_requires_file_sync_fallback_for_offsets_ignores_zero_offsets(self, tmp_ keep_chapters=False, ) - mapped = wf._resolve_mapped_tracks(cfg) - assert wf._requires_file_sync_fallback_for_offsets(mapped) is False + mapped = resolve_mapped_tracks(cfg) + assert requires_file_sync_fallback_for_offsets(mapped) is False def test_decide_strict_interleave_with_prescan_forces_sync_on_foreign_offset(self, tmp_path): wf = RemuxWorkflow(ffmpeg_bin="ffmpeg", ffprobe_bin="ffprobe") @@ -282,7 +298,11 @@ def test_decide_strict_interleave_with_prescan_forces_sync_on_foreign_offset(sel keep_chapters=False, ) - assert wf._decide_strict_interleave_with_prescan(cfg) is True + assert decide_strict_interleave_with_prescan( + cfg, + resolve_mapped_tracks=resolve_mapped_tracks, + log_cb=lambda *_args: None, + ) is True def test_build_command_does_not_force_muxing_application_metadata(self, tmp_path): wf = RemuxWorkflow( @@ -577,7 +597,7 @@ def test_prepare_sync_inputs_windows_prefers_mmap_fallback(self, tmp_path, monke track_order=[(0, 0), (1, 1), (0, 2)], keep_chapters=False, ) - mapped = wf._resolve_mapped_tracks(cfg) + mapped = resolve_mapped_tracks(cfg) class _FakeSyncer: def __init__(self, **_kwargs): @@ -592,14 +612,19 @@ def prepare_from_mapped_tracks_mmap(self, **_kwargs): def prepare_from_mapped_tracks(self, **_kwargs): pytest.fail("temp fallback should not be used when mmap works") - monkeypatch.setattr("core.workflows.remux.FfmpegTimelineSync", _FakeSyncer) - monkeypatch.setattr("core.workflows.remux.os.name", "nt", raising=False) + monkeypatch.setattr("core.workflows.remux_timeline_sync.os.name", "nt", raising=False) - remapped, extra_inputs, live = wf._prepare_timeline_sync_inputs( + remapped, extra_inputs, live = prepare_timeline_sync_inputs( cfg, mapped, tmp_path, TaskSignals(), + allow_live=True, + ffmpeg_bin="ffmpeg", + ffmpeg_thread_args=wf._ffmpeg_thread_args(), + log_cb=lambda *_args: None, + syncer_factory=_FakeSyncer, + fallback_helper_factory=TimelineSyncFallbackHelper, ) assert live is None assert [item.path for item in extra_inputs] == [tmp_path / "mmap.mka"] @@ -623,7 +648,7 @@ def test_prepare_sync_inputs_windows_falls_back_to_temp_when_mmap_fails(self, tm track_order=[(0, 0), (1, 1), (0, 2)], keep_chapters=False, ) - mapped = wf._resolve_mapped_tracks(cfg) + mapped = resolve_mapped_tracks(cfg) class _FakeSyncer: def __init__(self, **_kwargs): @@ -638,14 +663,19 @@ def prepare_from_mapped_tracks_mmap(self, **_kwargs): def prepare_from_mapped_tracks(self, **_kwargs): return [SyncPreparedInput(key=(1, 1, "audio"), path=tmp_path / "temp.mka", input_idx=2)] - monkeypatch.setattr("core.workflows.remux.FfmpegTimelineSync", _FakeSyncer) - monkeypatch.setattr("core.workflows.remux.os.name", "nt", raising=False) + monkeypatch.setattr("core.workflows.remux_timeline_sync.os.name", "nt", raising=False) - remapped, extra_inputs, live = wf._prepare_timeline_sync_inputs( + remapped, extra_inputs, live = prepare_timeline_sync_inputs( cfg, mapped, tmp_path, TaskSignals(), + allow_live=True, + ffmpeg_bin="ffmpeg", + ffmpeg_thread_args=wf._ffmpeg_thread_args(), + log_cb=lambda *_args: None, + syncer_factory=_FakeSyncer, + fallback_helper_factory=TimelineSyncFallbackHelper, ) assert live is None assert [item.path for item in extra_inputs] == [tmp_path / "temp.mka"] diff --git a/tests/test_settings_panel.py b/tests/test_settings_panel.py index 9223910..d0709f0 100644 --- a/tests/test_settings_panel.py +++ b/tests/test_settings_panel.py @@ -5,6 +5,7 @@ from __future__ import annotations from pathlib import Path +from typing import Any, cast from unittest.mock import MagicMock, patch from core.i18n import translate_text @@ -16,6 +17,10 @@ def _mock_qsettings(): return inst +def _field_widget(panel: object, section: str, key: str) -> Any: + return cast(Any, panel).widget_for(section, key) + + def test_settings_panel_language_combo_shows_full_language_name(tmp_path, qt_app): import core.config as cfg_mod from core.config import AppConfig @@ -31,7 +36,7 @@ def test_settings_panel_language_combo_shows_full_language_name(tmp_path, qt_app cfg = AppConfig() panel = SettingsPanel(cfg) - combo = panel.widget_for("ui", "language") + combo = _field_widget(panel, "ui", "language") assert combo.currentData() == "fra" assert "=" not in combo.currentText() assert "Français" in combo.currentText() or "French" in combo.currentText() @@ -52,7 +57,7 @@ def test_settings_panel_writes_selected_language_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - combo = panel.widget_for("ui", "language") + combo = _field_widget(panel, "ui", "language") index = combo.findData("fra") assert index >= 0 combo.setCurrentIndex(index) @@ -77,7 +82,7 @@ def test_settings_panel_writes_selected_startup_panel_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - combo = panel.widget_for("ui", "startup_panel") + combo = _field_widget(panel, "ui", "startup_panel") index = combo.findData("encoding") assert index >= 0 combo.setCurrentIndex(index) @@ -102,7 +107,7 @@ def test_settings_panel_writes_startup_menu_compact_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - checkbox = panel.widget_for("ui", "startup_menu_compact") + checkbox = _field_widget(panel, "ui", "startup_menu_compact") checkbox.setChecked(True) panel._on_save_clicked() @@ -125,7 +130,7 @@ def test_settings_panel_writes_startup_logs_expanded_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - checkbox = panel.widget_for("ui", "startup_logs_expanded") + checkbox = _field_widget(panel, "ui", "startup_logs_expanded") checkbox.setChecked(True) panel._on_save_clicked() @@ -133,6 +138,174 @@ def test_settings_panel_writes_startup_logs_expanded_to_ini(tmp_path, qt_app): assert cfg.startup_logs_expanded is True +def test_settings_panel_writes_enable_file_logging_to_ini(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nenable_file_logging = false\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + checkbox = _field_widget(panel, "ui", "enable_file_logging") + checkbox.setChecked(True) + panel._on_save_clicked() + + assert "enable_file_logging = true" in ini_path.read_text(encoding="utf-8") + assert cfg.enable_file_logging is True + + +def test_settings_panel_writes_file_logging_level_to_ini(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nfile_logging_level = standard\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + combo = _field_widget(panel, "ui", "file_logging_level") + index = combo.findData("verbose") + assert index >= 0 + combo.setCurrentIndex(index) + panel._on_save_clicked() + + assert "file_logging_level = verbose" in ini_path.read_text(encoding="utf-8") + assert cfg.file_logging_level == "verbose" + + +def test_settings_panel_disables_file_logging_level_when_file_logging_is_off(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nenable_file_logging = false\nfile_logging_level = verbose\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + checkbox = _field_widget(panel, "ui", "enable_file_logging") + combo = _field_widget(panel, "ui", "file_logging_level") + assert combo.isEnabled() is False + + checkbox.setChecked(True) + assert combo.isEnabled() is True + + +def test_settings_panel_reload_resyncs_file_logging_level_state(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nenable_file_logging = true\nfile_logging_level = verbose\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + checkbox = _field_widget(panel, "ui", "enable_file_logging") + combo = _field_widget(panel, "ui", "file_logging_level") + + assert checkbox.isChecked() is True + assert combo.isEnabled() is True + + ini_path.write_text("[ui]\nenable_file_logging = false\nfile_logging_level = verbose\n", encoding="utf-8") + panel._on_reload_clicked() + + assert checkbox.isChecked() is False + assert combo.isEnabled() is False + + +def test_settings_panel_prefills_verbose_log_dir_full_path(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + panel = SettingsPanel(cfg) + + edit = _field_widget(panel, "ui", "verbose_log_dir") + assert edit.text() == str(tmp_path / "logs") + + +def test_settings_panel_writes_verbose_log_dir_to_ini(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + target = tmp_path / "custom_logs" + ini_path.write_text("[ui]\nverbose_log_dir = /tmp/old_logs\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + edit = _field_widget(panel, "ui", "verbose_log_dir") + edit.setText(str(target)) + panel._on_save_clicked() + + assert f"verbose_log_dir = {target}" in ini_path.read_text(encoding="utf-8") + assert cfg.verbose_log_dir == target + + +def test_settings_panel_rejects_empty_verbose_log_dir_when_logging_enabled(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nenable_file_logging = true\nverbose_log_dir = /tmp/logs\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + checkbox = _field_widget(panel, "ui", "enable_file_logging") + checkbox.setChecked(True) + edit = _field_widget(panel, "ui", "verbose_log_dir") + edit.setText("") + with patch("ui.panels.settings_panel.QMessageBox.warning") as warn: + panel._on_save_clicked() + + warn.assert_called_once() + assert "verbose_log_dir = /tmp/logs" in ini_path.read_text(encoding="utf-8") + + def test_settings_panel_writes_ui_scale_percent_to_ini(tmp_path, qt_app): import core.config as cfg_mod from core.config import AppConfig @@ -148,7 +321,7 @@ def test_settings_panel_writes_ui_scale_percent_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - spin = panel.widget_for("ui", "ui_scale_percent") + spin = _field_widget(panel, "ui", "ui_scale_percent") spin.setValue(125) panel._on_save_clicked() @@ -202,7 +375,7 @@ def test_settings_panel_writes_ffmpeg_threads_to_ini(tmp_path, qt_app): cfg = AppConfig() panel = SettingsPanel(cfg) - spin = panel.widget_for("ffmpeg", "threads") + spin = _field_widget(panel, "ffmpeg", "threads") spin.setValue(18) panel._on_save_clicked() @@ -210,6 +383,29 @@ def test_settings_panel_writes_ffmpeg_threads_to_ini(tmp_path, qt_app): assert cfg.ffmpeg_threads == 18 +def test_settings_panel_writes_max_parallel_video_encodes_to_ini(tmp_path, qt_app): + import core.config as cfg_mod + from core.config import AppConfig + from ui.panels.settings_panel import SettingsPanel + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[encoding]\nmax_parallel_video_encodes = 1\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs, \ + patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + mock_qs.return_value = _mock_qsettings() + cfg = AppConfig() + + panel = SettingsPanel(cfg) + spin = _field_widget(panel, "encoding", "max_parallel_video_encodes") + spin.setValue(3) + panel._on_save_clicked() + + assert "max_parallel_video_encodes = 3" in ini_path.read_text(encoding="utf-8") + assert cfg.max_parallel_video_encodes == 3 + + def test_settings_panel_rerun_setup_restarts_app_on_confirmation(tmp_path, qt_app): import core.config as cfg_mod from core.config import AppConfig @@ -233,6 +429,7 @@ def test_settings_panel_rerun_setup_restarts_app_on_confirmation(tmp_path, qt_ap mock_rerun.assert_called_once_with() mock_restart.assert_called_once_with() + assert panel._status_label is not None assert panel._status_label.text() == translate_text( "Setup relancé avec succès. Un redémarrage de l'application est recommandé." ) @@ -258,4 +455,5 @@ def test_settings_panel_rerun_setup_shows_error_message(tmp_path, qt_app): panel._on_rerun_setup_clicked() mock_warning.assert_called_once() + assert panel._status_label is not None assert "boom" in panel._status_label.text() diff --git a/tests/test_setup_and_config.py b/tests/test_setup_and_config.py index 3dbaecf..090fba0 100644 --- a/tests/test_setup_and_config.py +++ b/tests/test_setup_and_config.py @@ -19,6 +19,14 @@ def _qt_app(qt_app): return qt_app +@pytest.fixture(autouse=True) +def _isolate_ini_path(tmp_path, monkeypatch): + """Évite toute pollution par un config.ini utilisateur réel.""" + import core.config as cfg_mod + + monkeypatch.setattr(cfg_mod, "_INI_PATH", tmp_path / "config.ini") + + def test_rerun_application_setup_windows_calls_setup_sequence(tmp_path): import core.config as cfg_mod @@ -51,6 +59,23 @@ def test_rerun_application_setup_windows_calls_setup_sequence(tmp_path): fake_setup.install_python_packages.assert_not_called() +def test_write_ini_settings_keeps_legacy_ui_key_when_ui_section_is_not_saved(tmp_path, monkeypatch): + import core.config as cfg_mod + + ini_path = tmp_path / "config.ini" + ini_path.write_text( + "[ui]\nverbose_file_logging = true\n\n[tools]\nffmpeg = ffmpeg\n", + encoding="utf-8", + ) + monkeypatch.setattr(cfg_mod, "_INI_PATH", ini_path) + + cfg_mod.write_ini_settings({"tools": {"ffmpeg": "ffmpeg-custom"}}) + + content = ini_path.read_text(encoding="utf-8") + assert "verbose_file_logging = true" in content + assert "ffmpeg = ffmpeg-custom" in content + + class TestAppConfigRamBuffer: """Tests des clés INI ram_buffer_enabled / ram_buffer_threshold_pct.""" @@ -105,6 +130,56 @@ def test_ini_sets_threshold(self, tmp_path): assert cfg.ram_buffer_threshold_pct == 25 + def test_default_max_parallel_video_encodes_is_one(self, tmp_path): + """Valeur par défaut: max_parallel_video_encodes=1 (séquentiel).""" + from core.config import AppConfig + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.dict(os.environ, {}, clear=False): + cfg = AppConfig() + + assert cfg.max_parallel_video_encodes == 1 + + def test_ini_sets_max_parallel_video_encodes(self, tmp_path): + """config.ini max_parallel_video_encodes=3 est bien lu.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[encoding]\nmax_parallel_video_encodes = 3\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.max_parallel_video_encodes == 3 + + def test_ini_invalid_max_parallel_video_encodes_falls_back_to_one(self, tmp_path): + """Une valeur <=0 est normalisée à 1.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[encoding]\nmax_parallel_video_encodes = 0\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.max_parallel_video_encodes == 1 + def test_explicit_blank_ini_uses_default_instead_of_qsettings(self, tmp_path): """Une clé présente mais vide revient au défaut documenté, pas à QSettings.""" import core.config as cfg_mod @@ -227,6 +302,20 @@ def test_default_startup_logs_expanded_is_false(self, tmp_path): assert cfg.startup_logs_expanded is False + def test_default_enable_file_logging_is_false(self, tmp_path): + """Sans clé explicite, le logging fichier est désactivé.""" + from core.config import AppConfig + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.dict(os.environ, {}, clear=False): + cfg = AppConfig() + + assert cfg.enable_file_logging is False + def test_default_ui_scale_percent_is_100(self, tmp_path): """Sans clé explicite, l'échelle UI vaut 100%.""" import core.config as cfg_mod @@ -271,6 +360,40 @@ def test_ui_scale_percent_is_clamped_from_ini(self, tmp_path, raw_value, expecte assert cfg.ui_scale_percent == expected + def test_ui_scale_percent_invalid_ini_value_falls_back_to_default(self, tmp_path): + """Une valeur INI non numérique revient à la valeur par défaut.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nui_scale_percent = not_a_number\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.ui_scale_percent == 100 + + def test_ui_scale_percent_invalid_qsettings_value_falls_back_to_default(self, tmp_path): + """Une valeur QSettings non numérique revient à la valeur par défaut.""" + from core.config import AppConfig + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = ( + lambda key, default=None: "not_a_number" if key == "ui/ui_scale_percent" else default + ) + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.dict(os.environ, {}, clear=False): + cfg = AppConfig() + + assert cfg.ui_scale_percent == 100 + def test_ui_scale_percent_save_and_reload_roundtrip(self, tmp_path): """La valeur sauvegardée en config est bien rechargée.""" import core.config as cfg_mod @@ -311,6 +434,131 @@ def test_ini_startup_logs_expanded_true_enables_expanded_logs(self, tmp_path): assert cfg.startup_logs_expanded is True + def test_ini_enable_file_logging_true_enables_file_logging(self, tmp_path): + """La clé enable_file_logging=true active l'écriture des logs dans un fichier.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nenable_file_logging = true\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.enable_file_logging is True + + def test_legacy_ini_verbose_file_logging_true_maps_to_enable_file_logging(self, tmp_path): + """Compat: verbose_file_logging=true (legacy) active encore le logging fichier.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nverbose_file_logging = true\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.enable_file_logging is True + assert cfg.file_logging_level == "verbose" + + def test_legacy_ini_verbose_file_logging_survives_non_ui_partial_save(self, tmp_path): + """Une sauvegarde partielle hors UI ne doit pas casser la compat legacy.""" + import core.config as cfg_mod + from core.config import AppConfig, write_ini_settings + + ini_path = tmp_path / "config.ini" + ini_path.write_text( + "[ui]\nverbose_file_logging = true\n\n[tools]\nffmpeg = ffmpeg\n", + encoding="utf-8", + ) + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + write_ini_settings({"tools": {"ffmpeg": "ffmpeg-custom"}}) + cfg = AppConfig() + + assert cfg.enable_file_logging is True + assert cfg.file_logging_level == "verbose" + + def test_default_file_logging_level_is_standard(self, tmp_path): + """Sans clé explicite, le niveau de logging fichier est standard.""" + from core.config import AppConfig + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.dict(os.environ, {}, clear=False): + cfg = AppConfig() + + assert cfg.file_logging_level == "standard" + + def test_ini_file_logging_level_is_loaded(self, tmp_path): + """La clé file_logging_level est lue depuis config.ini.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + ini_path.write_text("[ui]\nfile_logging_level = verbose\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.file_logging_level == "verbose" + + def test_default_verbose_log_dir_uses_full_current_path(self, tmp_path): + """Le dossier verbose est prérempli avec le chemin complet par défaut.""" + from core.config import AppConfig + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.dict(os.environ, {}, clear=False): + cfg = AppConfig() + + assert cfg.verbose_log_dir == tmp_path / "logs" + + def test_ini_verbose_log_dir_is_loaded(self, tmp_path): + """La clé verbose_log_dir est lue depuis config.ini.""" + import core.config as cfg_mod + from core.config import AppConfig + + ini_path = tmp_path / "config.ini" + target = tmp_path / "chosen_logs" + ini_path.write_text(f"[ui]\nverbose_log_dir = {target}\n", encoding="utf-8") + + with patch("core.config.QSettings") as mock_qs: + inst = MagicMock() + inst.value.side_effect = lambda key, default=None: default + mock_qs.return_value = inst + with patch("core.config._app_data_dir", return_value=tmp_path), \ + patch.object(cfg_mod, "_INI_PATH", ini_path): + cfg = AppConfig() + + assert cfg.verbose_log_dir == target + def test_work_dir_leftovers_ignores_empty_tmdb_covers(self, tmp_path): """tmdb_covers vide (même avec sous-dossiers vides) ne déclenche pas l'alerte startup.""" import core.config as cfg_mod diff --git a/tests/test_ui_scale_widgets.py b/tests/test_ui_scale_widgets.py index 80b0a8c..cc582f8 100644 --- a/tests/test_ui_scale_widgets.py +++ b/tests/test_ui_scale_widgets.py @@ -64,7 +64,8 @@ def test_log_panel_builds_at_multiple_scales(qt_app, percent): @pytest.mark.parametrize("percent", [50, 100, 150, 200]) def test_dashboard_page_builds_at_multiple_scales(tmp_path, qt_app, percent): - from ui.main_window import DashboardPage, LogLevel + from core.logging import LogLevel + from ui.main_window import DashboardPage DesignSystem.set_ui_scale(percent) cfg = _build_config(tmp_path) diff --git a/tests/test_workflows_common.py b/tests/test_workflows_common.py new file mode 100644 index 0000000..7920bff --- /dev/null +++ b/tests/test_workflows_common.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace +from typing import Any, cast + +from core.workflows.common.attachments import ( + attachment_filename_from_meta, + extension_for_mime, + mime_for_path, + sanitize_filename, +) +from core.workflows.common.ffmpeg_runtime import ( + cli_path, + default_ffmpeg_thread_count, + ffmpeg_progress_args, + normalize_ffmpeg_thread_count, + normalize_max_parallel_video_encodes, +) +from core.workflows.common.metadata import ( + disposition_value, + normalize_track_language, + normalize_track_language_from_track, + resolve_global_tags, +) +from core.workflows.common.timeline_sync import ( + append_strict_interleave_mux_flags, + append_sync_inputs, + needs_strict_interleave, + sync_cleanup_paths, +) +from core.workflows.common.track_types import ( + TrackMetaEdit, + TrackMetaPatch, + TrackOffset, + TrackTimeOffset, + TrackType, +) + + +class TestFfmpegRuntime: + def test_cli_path_preserves_named_pipe_strings(self): + named_pipe = r"\\.\pipe\mre_sync" + assert cli_path(named_pipe) == named_pipe + + def test_cli_path_normalizes_path_objects(self, tmp_path): + assert cli_path(tmp_path / "file.mkv").endswith("/file.mkv") + + def test_default_ffmpeg_thread_count_uses_75_percent_rounded_up(self, monkeypatch): + monkeypatch.setattr("core.workflows.common.ffmpeg_runtime.os.cpu_count", lambda: 8) + assert default_ffmpeg_thread_count() == 6 + + def test_normalize_ffmpeg_thread_count_preserves_zero(self): + assert normalize_ffmpeg_thread_count(0) == 0 + + def test_normalize_max_parallel_video_encodes_defaults_to_one(self): + assert normalize_max_parallel_video_encodes(None) == 1 + assert normalize_max_parallel_video_encodes(0) == 1 + + def test_ffmpeg_progress_args_are_stable(self): + assert ffmpeg_progress_args() == ["-progress", "pipe:1", "-nostats"] + + +class TestCommonAttachments: + def test_mime_for_path_and_extension_for_mime(self): + assert mime_for_path(Path("cover.jpg")) == "image/jpeg" + assert extension_for_mime("font/woff2") == ".woff2" + + def test_sanitize_filename_strips_directories(self): + assert sanitize_filename("../cover.jpg", "fallback.bin") == "cover.jpg" + + def test_attachment_filename_from_meta_appends_suffix_when_missing(self): + meta: dict[str, object] = {"filename": "cover", "mimetype": "image/jpeg"} + assert attachment_filename_from_meta(meta, 3) == "cover.jpg" + + +class TestCommonMetadata: + def test_resolve_global_tags_filters_blank_values_and_keeps_title(self): + tags = resolve_global_tags({"GENRE": "Drama", "EMPTY": " "}, file_title="Film") + assert tags == {"GENRE": "Drama", "title": "Film"} + + def test_normalize_track_language_blank_defaults_to_und_when_requested(self): + assert normalize_track_language("", default_und=True) == "und" + + def test_normalize_track_language_from_track_uses_title_context(self): + track = SimpleNamespace(language="fr", title="VF") + assert normalize_track_language_from_track(track) == "fr-FR" + + def test_disposition_value_supports_partial_flags(self): + value = disposition_value( + flag_default=True, + flag_forced=False, + flag_hearing_impaired=None, + flag_visual_impaired=None, + flag_original=None, + flag_commentary=None, + allow_partial=True, + ) + assert value == "default" + + +class TestCommonTimelineSync: + def test_needs_strict_interleave_detects_foreign_audio_plus_subtitle(self): + mapped_tracks = [ + SimpleNamespace(source_file_index=0, track=SimpleNamespace(track_type="video")), + SimpleNamespace(source_file_index=1, track=SimpleNamespace(track_type="audio")), + SimpleNamespace(source_file_index=0, track=SimpleNamespace(track_type="subtitle")), + ] + assert needs_strict_interleave(cast(Any, mapped_tracks)) is True + + def test_append_strict_interleave_mux_flags(self): + cmd: list[str] = [] + append_strict_interleave_mux_flags(cmd) + assert cmd == ["-max_interleave_delta", "0", "-max_muxing_queue_size", "9999"] + + def test_append_sync_inputs_uses_formats_and_sync_cleanup_paths_returns_paths(self, tmp_path): + sync_path = tmp_path / "sync.mka" + sync_path.touch() + cmd: list[str] = [] + append_sync_inputs(cmd, [sync_path, "pipe:sync"], input_formats=["matroska", "nut"]) + assert cmd == ["-f", "matroska", "-i", str(sync_path), "-f", "nut", "-i", "pipe:sync"] + assert sync_cleanup_paths([sync_path, "pipe:sync"]) == [sync_path] + + +class TestCommonTrackTypes: + def test_track_type_values_and_compat_aliases(self, tmp_path): + offset = TrackOffset(track_type=TrackType.AUDIO.value, source_path=tmp_path / "a.mkv", stream_index=1, offset_ms=125) + patch = TrackMetaPatch(track_order=2, language="fr-FR", title="VF") + assert TrackType.SUBTITLE.value == "subtitle" + assert isinstance(offset, TrackTimeOffset) + assert isinstance(patch, TrackMetaEdit) diff --git a/ui/file_inspector_widget.py b/ui/file_inspector_widget.py index f801440..5f4913e 100644 --- a/ui/file_inspector_widget.py +++ b/ui/file_inspector_widget.py @@ -29,7 +29,7 @@ from typing import Any from PySide6.QtCore import ( - Qt, Signal, QAbstractTableModel, QModelIndex, QObject, + Qt, Signal, QAbstractTableModel, QModelIndex, QPersistentModelIndex, QObject, ) from PySide6.QtGui import QColor, QDragEnterEvent, QDropEvent, QFont from PySide6.QtWidgets import ( @@ -72,13 +72,23 @@ def __init__( # --- Interface QAbstractTableModel --- - def rowCount(self, parent: QModelIndex = QModelIndex()) -> int: + def rowCount( + self, + parent: QModelIndex | QPersistentModelIndex = QModelIndex(), + ) -> int: return len(self._rows) - def columnCount(self, parent: QModelIndex = QModelIndex()) -> int: + def columnCount( + self, + parent: QModelIndex | QPersistentModelIndex = QModelIndex(), + ) -> int: return len(self._headers) - def data(self, index: QModelIndex, role: int = Qt.ItemDataRole.DisplayRole) -> Any: + def data( + self, + index: QModelIndex | QPersistentModelIndex, + role: int = Qt.ItemDataRole.DisplayRole, + ) -> Any: if not index.isValid(): return None @@ -228,7 +238,7 @@ def _video_rows(tracks: list[VideoTrack]) -> list[list[str]]: f"{t.bit_depth} bit" if t.bit_depth else "—", t.color_space or "—", t.color_transfer or "—", - t.hdr_type.label(), + t.hdr_label, t.language or "—", t.title or "—", ]) @@ -466,12 +476,15 @@ def update_from(self, info: FileInfo) -> None: self._cells["Taille"].setText(info.size_human) self._cells["Durée"].setText(info.duration_human) self._cells["Format"].setText(info.format.split(",")[0].upper()) - self._cells["HDR"].setText(info.hdr_type.label()) + primary = info.primary_video + hdr_display = primary.hdr_label if primary is not None else info.hdr_type.label() + self._cells["HDR"].setText(hdr_display) self._cells["Frames"].setText(str(info.frame_count) if info.frame_count else "—") # Coloriser le badge HDR hdr_color = { HDRType.NONE: _C.TEXT_DIM, + HDRType.HLG: "#7ed957", HDRType.HDR10: _C.INFO, HDRType.HDR10PLUS: "#4fc3f7", HDRType.DOLBY_VISION: "#ce93d8", diff --git a/ui/main_window.py b/ui/main_window.py index 5348511..8448c20 100644 --- a/ui/main_window.py +++ b/ui/main_window.py @@ -35,12 +35,12 @@ from __future__ import annotations +import json import re import subprocess import time from concurrent.futures import ThreadPoolExecutor from datetime import datetime -from enum import Enum from pathlib import Path from typing import Callable, TYPE_CHECKING, cast @@ -59,6 +59,7 @@ from core.config import AppConfig from core.i18n import apply_translations, set_current_language, translate_text +from core.logging import LogLevel, VerboseFileLogger, parse_log_level from core.runner import TaskSignals from core.subprocess_utils import subprocess_text_kwargs from core.version import APP_VERSION_LABEL, WRITING_APPLICATION_TAG @@ -80,12 +81,6 @@ # Niveaux de log # --------------------------------------------------------------------------- -class LogLevel(str, Enum): - INFO = "INFO" - OK = "OK" - WARN = "WARN" - ERROR = "ERROR" - def _level_color(level: LogLevel) -> str: if level == LogLevel.OK: @@ -130,6 +125,8 @@ def _level_color(level: LogLevel) -> str: "progress=", ) _STEP_PROGRESS_RE = re.compile(r"^STEP\s+\d+\s*-\s*(.+)$") +_ENCODE_INTERNAL_PROGRESS_PREFIX = "__MRE_PROGRESS__ " +_MULTI_ENCODE_LABEL_RE = re.compile(r"^ffmpeg-video-(\d+)(?:-pass(\d+))?$") def _is_encode_stage_message(line: str) -> bool: @@ -139,6 +136,142 @@ def _is_encode_stage_message(line: str) -> bool: def _is_encode_progress_noise(line: str) -> bool: return any(line.startswith(prefix) for prefix in _ENCODE_PROGRESS_NOISE_PREFIXES) + +def _parse_encode_internal_progress(line: str) -> dict[str, object] | None: + if not line.startswith(_ENCODE_INTERNAL_PROGRESS_PREFIX): + return None + payload = line[len(_ENCODE_INTERNAL_PROGRESS_PREFIX):].strip() + if not payload: + return None + try: + obj = json.loads(payload) + except json.JSONDecodeError: + return None + return obj if isinstance(obj, dict) else None + + +def _parse_multi_encode_label(label: str) -> tuple[int, int | None, int] | None: + match = _MULTI_ENCODE_LABEL_RE.match(str(label).strip()) + if match is None: + return None + order = int(match.group(1)) + pass_index_raw = match.group(2) + if pass_index_raw is None: + return order, None, 1 + pass_index = int(pass_index_raw) + return order, pass_index, 2 + + +def _state_float(state: dict[str, object], key: str, default: float = 0.0) -> float: + value = state.get(key, default) + if isinstance(value, (int, float)): + return float(value) + if isinstance(value, str): + try: + return float(value.strip()) + except ValueError: + return default + return default + + +def _config_file_logging_enabled(config: object) -> bool: + if hasattr(config, "enable_file_logging"): + return bool(getattr(config, "enable_file_logging", False)) + return bool(getattr(config, "verbose_file_logging", False)) + + +def _config_file_logging_level(config: object) -> str: + if hasattr(config, "file_logging_level"): + raw = str(getattr(config, "file_logging_level", "")).strip().lower() + if raw in {"standard", "verbose"}: + return raw + return "standard" + return "verbose" if bool(getattr(config, "verbose_file_logging", False)) else "standard" + + +def _config_file_logging_is_verbose(config: object) -> bool: + return _config_file_logging_enabled(config) and _config_file_logging_level(config) == "verbose" + + +def _ensure_verbose_file_logger(window: object) -> VerboseFileLogger: + logger = getattr(window, "_verbose_file_logger", None) + if isinstance(logger, VerboseFileLogger): + logger.configure( + app_data_dir=Path(getattr(window, "_config").app_data_dir), + verbose_log_dir=getattr(getattr(window, "_config"), "verbose_log_dir", None), + enabled=_config_file_logging_enabled(getattr(window, "_config")), + ) + return logger + + def _on_write_error() -> None: + log_panel = getattr(window, "_log_panel", None) + if log_panel is not None: + log_panel.log( + "Impossible d'écrire le fichier de logging.", + LogLevel.WARN, + ) + + logger = VerboseFileLogger( + app_data_dir=Path(getattr(window, "_config").app_data_dir), + verbose_log_dir=getattr(getattr(window, "_config"), "verbose_log_dir", None), + enabled=_config_file_logging_enabled(getattr(window, "_config")), + on_write_error=_on_write_error, + ) + setattr(window, "_verbose_file_logger", logger) + return logger + + +def _multi_encode_remaining_seconds(state: dict[str, object], now: float) -> float | None: + started_at = _state_float(state, "started_at", 0.0) + elapsed_wall = max(0.0, now - started_at) + if elapsed_wall <= 0: + return None + + duration_s = state.get("duration_s") + elapsed_video = state.get("elapsed_video") + if isinstance(duration_s, (int, float)) and duration_s > 0 and isinstance(elapsed_video, (int, float)) and elapsed_video > 0: + speed = float(elapsed_video) / elapsed_wall + if speed > 0: + return max(0.0, float(duration_s) - float(elapsed_video)) / speed + + total_frames = state.get("total_frames") + frame = state.get("frame") + if isinstance(total_frames, int) and total_frames > 0 and isinstance(frame, int) and frame > 0 and total_frames > frame: + frame_speed = frame / elapsed_wall + if frame_speed > 0: + return (total_frames - frame) / frame_speed + return None + + +def _select_multi_encode_label( + states: dict[str, dict[str, object]], + active_label: str | None, + now: float, +) -> str | None: + candidates = [ + (label, state) + for label, state in states.items() + if not bool(state.get("done", False)) + ] + if not candidates: + return None + + scored: list[tuple[int, float, float, int, str]] = [] + for label, state in candidates: + remaining_s = _multi_encode_remaining_seconds(state, now) + known_remaining = 1 if remaining_s is not None else 0 + scored.append( + ( + known_remaining, + float(remaining_s if remaining_s is not None else -1.0), + _state_float(state, "last_update", 0.0), + 1 if label == active_label else 0, + label, + ) + ) + scored.sort(reverse=True) + return scored[0][4] + # --------------------------------------------------------------------------- # LogPanel # --------------------------------------------------------------------------- @@ -1064,8 +1197,24 @@ def __init__(self, config: AppConfig) -> None: self._signals: TaskSignals | None = None self._op_start: float = 0.0 self._op_mode: str = "" # "remux" ou "encode" + self._op_encode_config: EncodeConfig | None = None self._op_encode_fps: float | None = None self._op_encode_frame: int | None = None + self._op_encode_multi_targets: dict[int, dict[str, object]] = {} + self._op_encode_multi_state: dict[str, dict[str, object]] = {} + self._op_encode_multi_active_label: str | None = None + self._op_encode_multi_reselect_timer = QTimer(self) + self._op_encode_multi_reselect_timer.setInterval(60_000) + self._op_encode_multi_reselect_timer.timeout.connect(self._reevaluate_multi_encode_progress) + self._verbose_file_logger = VerboseFileLogger( + app_data_dir=Path(self._config.app_data_dir), + verbose_log_dir=getattr(self._config, "verbose_log_dir", None), + enabled=_config_file_logging_enabled(self._config), + on_write_error=lambda: self._log_panel.log( + "Impossible d'écrire le fichier de logging.", + LogLevel.WARN, + ), + ) self._prep_progress_active = False self._prep_progress_value = 0 self._prep_progress_direction = 1 @@ -1401,9 +1550,13 @@ def _connect_signals(self) -> None: self._remux_panel.log_message.connect( self.log_requested, Qt.ConnectionType.QueuedConnection ) + self._remux_panel.tool_output.connect( + self._on_tool_output_requested, Qt.ConnectionType.QueuedConnection + ) # RemuxPanel → EncodePanel : pistes partagées + chemin de sortie commun self._remux_panel.video_tracks_changed.connect(self._encode_panel.set_video_tracks) self._remux_panel.audio_tracks_changed.connect(self._encode_panel.set_audio_tracks) + self._encode_panel.video_tracks_encoding_changed.connect(self._remux_panel.update_video_track_encoding) self._encode_panel.audio_track_meta_changed.connect(self._remux_panel.update_audio_track_meta) self._encode_panel.audio_track_encoding_changed.connect(self._remux_panel.update_audio_track_encoding) self._encode_panel.audio_track_add_requested.connect(self._remux_panel.add_audio_track_variant) @@ -1459,6 +1612,7 @@ def _on_run(self) -> None: self.log_requested.emit("ERROR", e) return self._op_mode = "encode" + self._op_encode_config = encode_cfg self.log_requested.emit("INFO", f"Encodage → {encode_cfg.output.name}") try: signals = self._encode_panel.run_operation(encode_cfg) @@ -1467,6 +1621,7 @@ def _on_run(self) -> None: return elif remux_cfg is not None: + self._op_encode_config = None errors = self._remux_panel.validate_config(remux_cfg) if errors: for e in errors: @@ -1492,6 +1647,9 @@ def _on_run(self) -> None: self._cancel_btn.setVisible(True) self._op_encode_fps = None self._op_encode_frame = None + self._reset_multi_encode_progress_tracking() + if self._op_mode == "encode" and self._op_encode_config is not None: + self._op_encode_multi_targets = self._encode_panel.get_video_progress_targets(self._op_encode_config) self._prep_progress_active = False if self._prep_progress_timer.isActive(): self._prep_progress_timer.stop() @@ -1520,213 +1678,193 @@ def _merge_remux_extras( encode_cfg: "EncodeConfig", remux_cfg: "RemuxConfig", ) -> "EncodeConfig": - """ - Enrichit l'encode config avec les informations du remux panel : - - sous-titres multi-sources (subtitle_tracks) - - attachements MKV (attachment_sources) - - balises MKV (tag_sources) - - chapitres (keep_chapters) - - éditions langue/titre de pistes (track_meta_edits) - - Retourne une EncodeConfig enrichie. Si le remux panel n'apporte rien, - retourne encode_cfg inchangé (sauf keep_chapters toujours synchronisé). - """ - from core.workflows.encode.models import EncodeConfig, TrackMetaEdit, TrackTimeOffset - - sub_tracks: list[tuple[Path, int]] = [] - attachment_streams: list[tuple[Path, int]] = [] # (source, ffprobe_stream_index) - tag_sources: list[Path] = [] - - source_by_index = {src.file_index: src for src in remux_cfg.sources} - remux_track_map: dict[tuple[Path, int], TrackEntry] = {} - remux_track_map_by_id: dict[str, TrackEntry] = {} - - for src in remux_cfg.sources: - for track in src.tracks: - remux_track_map[(src.path, track.mkv_tid)] = track - remux_track_map_by_id[track.entry_id] = track - for att in src.selected_attachments: - attachment_streams.append((src.path, att.index)) - if src.copy_tags: - tag_sources.append(src.path) - - ordered_tracks: list[tuple[Path, TrackEntry]] = [] - for item in remux_cfg.track_order: - file_index = int(item[0]) - mkv_tid = int(item[1]) - entry_id = str(item[2]).strip() if len(item) > 2 else "" - src = source_by_index.get(file_index) - if src is None: - continue - track = remux_track_map_by_id.get(entry_id) if entry_id else remux_track_map.get((src.path, mkv_tid)) - if track is None: - continue - ordered_tracks.append((src.path, track)) - - sub_tracks = [ - (src_path, track.mkv_tid) - for src_path, track in ordered_tracks - if track.track_type == "subtitle" - ] - - # tag_overrides depuis RemuxConfig (balises éditées dans l'UI) - # Prioritaire sur tag_sources : si présent, on ignore tag_sources pour l'encode. - tag_overrides = remux_cfg.tag_overrides - - # --- Métadonnées de pistes (langue + titre + dispositions) via FFmpeg --- - # ffmpeg peut perdre des infos de piste (langue, titre, flags) selon le - # type d'opération (ex: réencodage audio). On réécrit explicitement. - # - # Ordre des pistes dans le fichier de sortie ffmpeg : - # @1 = vidéo | @2…@N+1 = audio | @N+2… = sous-titres - track_meta_edits: list[TrackMetaEdit] = [] - track_time_offsets: list[TrackTimeOffset] = [] - - def _make_edit(track_order: int, t: TrackEntry) -> "TrackMetaEdit | None": - """Retourne un TrackMetaEdit si la piste a des infos à appliquer.""" - lang = (t.language or "").strip() - orig_lang = (t.orig_language or "").strip() - title = (t.title or "").strip() - # Une langue vidée explicitement doit rester une instruction explicite - # dans le workflow encode (FFmpeg), via "und". - if not lang and orig_lang and lang != orig_lang: - lang = "und" - has_flag_state = any(( - t.flag_default, - t.flag_forced, - t.flag_hearing_impaired, - t.flag_visual_impaired, - t.flag_original, - t.flag_commentary, - )) - has_flag_change = any(( - t.flag_default != t.orig_flag_default, - t.flag_forced != t.orig_flag_forced, - t.flag_hearing_impaired != t.orig_flag_hearing_impaired, - t.flag_visual_impaired != t.orig_flag_visual_impaired, - t.flag_original != t.orig_flag_original, - t.flag_commentary != t.orig_flag_commentary, - )) - if not lang and not title and not has_flag_state and not has_flag_change: - return None - return TrackMetaEdit( - track_order = track_order, - language = lang, - title = title if title else None, - flag_default = t.flag_default, - flag_forced = t.flag_forced, - flag_hearing_impaired = t.flag_hearing_impaired, - flag_visual_impaired = t.flag_visual_impaired, - flag_original = t.flag_original, - flag_commentary = t.flag_commentary, + from core.workflows.encode.remux_bridge import merge_remux_into_encode_config + + return merge_remux_into_encode_config(encode_cfg, remux_cfg) + + def _reset_multi_encode_progress_tracking(self) -> None: + if self._op_encode_multi_reselect_timer.isActive(): + self._op_encode_multi_reselect_timer.stop() + self._op_encode_multi_targets = {} + self._op_encode_multi_state = {} + self._op_encode_multi_active_label = None + + def _ensure_multi_encode_state(self, label: str) -> dict[str, object]: + state = self._op_encode_multi_state.get(label) + if state is not None: + return state + + parsed = _parse_multi_encode_label(label) + order = 0 + pass_index: int | None = None + pass_count = 1 + if parsed is not None: + order, pass_index, pass_count = parsed + target = self._op_encode_multi_targets.get(order, {}) + state = { + "label": label, + "order": order, + "pass_index": pass_index, + "pass_count": pass_count, + "duration_s": target.get("duration_s"), + "total_frames": target.get("total_frames"), + "source_name": target.get("source_name"), + "started_at": time.monotonic(), + "last_update": 0.0, + "fps": None, + "frame": None, + "elapsed_video": None, + "done": False, + } + self._op_encode_multi_state[label] = state + return state + + def _multi_encode_progress_parts( + self, + state: dict[str, object], + *, + now: float, + ) -> tuple[list[str], int | None]: + order_raw = state.get("order") + if isinstance(order_raw, int): + order = order_raw + elif isinstance(order_raw, float): + order = int(order_raw) + elif isinstance(order_raw, str): + try: + order = int(order_raw.strip()) + except ValueError: + order = 0 + else: + order = 0 + total_tracks = max(self._op_encode_multi_targets) if self._op_encode_multi_targets else 0 + track_label = ( + translate_text("Piste vidéo {current}/{total}", current=order, total=total_tracks) + if order > 0 and total_tracks > 1 + else translate_text("Piste vidéo") + ) + parts = [track_label] + + pass_index = state.get("pass_index") + pass_count_raw = state.get("pass_count") + if isinstance(pass_count_raw, int): + pass_count = pass_count_raw + elif isinstance(pass_count_raw, float): + pass_count = int(pass_count_raw) + elif isinstance(pass_count_raw, str): + try: + pass_count = int(pass_count_raw.strip()) + except ValueError: + pass_count = 1 + else: + pass_count = 1 + if isinstance(pass_index, int) and pass_count > 1: + parts.append( + translate_text("passe {current}/{total}", current=pass_index, total=pass_count) ) - def _append_track_offset(track_type: str, src_path: Path, stream_index: int, track: TrackEntry | None) -> None: - if track is None: - return - offset_ms = int(getattr(track, "time_shift_ms", 0) or 0) - if offset_ms == 0: - return - track_time_offsets.append(TrackTimeOffset( - track_type=track_type, - source_path=src_path, - stream_index=int(stream_index), - offset_ms=offset_ms, - )) - - def _find_track(src_path: Path, stream_index: int, track_type: str) -> TrackEntry | None: - t = remux_track_map.get((src_path, stream_index)) - if t is not None: - return t - # Fichier source unique : cherche uniquement par stream_index + type - for entry in remux_track_map.values(): - if entry.mkv_tid == stream_index and entry.track_type == track_type: - return entry - return None - - # @1 — piste vidéo (toujours depuis encode_cfg.source) - video_entry = _find_track(encode_cfg.source, 0, "video") - if video_entry is None: - # La vidéo peut être sur n'importe quel stream_index ; garde le premier ordre remux. - for _src_path, entry in ordered_tracks: - if entry.track_type == "video": - video_entry = entry - break - if video_entry is None: - for entry in remux_track_map.values(): - if entry.track_type == "video": - video_entry = entry - break - if video_entry is not None: - edit = _make_edit(1, video_entry) - if edit: - track_meta_edits.append(edit) - _append_track_offset("video", encode_cfg.source, 0, video_entry) - - # @2+ — pistes audio - audio_offset = 2 - for audio_order, ats in enumerate(encode_cfg.audio_tracks): - src_path = ats.source_path or encode_cfg.source - if ats.track_entry_id: - t = remux_track_map_by_id.get(ats.track_entry_id) - else: - t = _find_track(src_path, ats.stream_index, "audio") - if t is None: - continue - edit = _make_edit(audio_offset + audio_order, t) - if edit: - track_meta_edits.append(edit) - _append_track_offset("audio", src_path, ats.stream_index, t) - - # @N+2+ — pistes sous-titres - sub_offset = audio_offset + len(encode_cfg.audio_tracks) - used_sub_tracks = sub_tracks or encode_cfg.subtitle_tracks - for sub_order, (sub_path, sub_sid) in enumerate(used_sub_tracks): - t = remux_track_map.get((sub_path, sub_sid)) - if t is None: - continue - edit = _make_edit(sub_offset + sub_order, t) - if edit: - track_meta_edits.append(edit) - _append_track_offset("subtitle", sub_path, sub_sid, t) - - chapter_overrides = remux_cfg.chapter_overrides - - # Rien à fusionner et keep_chapters / chapter_overrides identiques → pas de reconstruction - if (not sub_tracks and not attachment_streams and not tag_sources - and tag_overrides is None - and not track_meta_edits - and not track_time_offsets - and encode_cfg.keep_chapters == remux_cfg.keep_chapters - and remux_cfg.chapter_overrides is None): - return encode_cfg - - return EncodeConfig( - source=encode_cfg.source, - output=encode_cfg.output, - video=encode_cfg.video, - audio_tracks=encode_cfg.audio_tracks, - copy_subtitles=encode_cfg.copy_subtitles if not sub_tracks else False, - subtitle_tracks=sub_tracks or encode_cfg.subtitle_tracks, - keep_chapters=remux_cfg.keep_chapters, - chapter_overrides=chapter_overrides, - attachment_streams=attachment_streams, - tag_sources=[] if tag_overrides is not None else tag_sources, - tag_overrides=tag_overrides, - track_meta_edits=track_meta_edits, - track_time_offsets=track_time_offsets, - duration_s=encode_cfg.duration_s, - copy_dv=encode_cfg.copy_dv, - copy_hdr10plus=encode_cfg.copy_hdr10plus, - dovi_profile=encode_cfg.dovi_profile, - work_dir=encode_cfg.work_dir, - file_title=encode_cfg.file_title, - extra_attachments=encode_cfg.extra_attachments, - tmdb_cover=encode_cfg.tmdb_cover, + fps_value = state.get("fps") + pct: int | None = None + duration_s = state.get("duration_s") + elapsed_video = state.get("elapsed_video") + if isinstance(duration_s, (int, float)) and duration_s > 0 and isinstance(elapsed_video, (int, float)): + pct = min(99, int(float(elapsed_video) / float(duration_s) * 100)) + else: + total_frames = state.get("total_frames") + frame = state.get("frame") + if isinstance(total_frames, int) and total_frames > 0 and isinstance(frame, int): + pct = min(99, int(frame / total_frames * 100)) + + if pct is not None: + parts.append(f"{pct}%") + if isinstance(fps_value, (int, float)) and fps_value > 0: + parts.append(f"{float(fps_value):.1f} fps") + + remaining_s = _multi_encode_remaining_seconds(state, now) + if remaining_s is not None: + parts.append(f"ETA {_fmt_eta(remaining_s)}") + return parts, pct + + def _reevaluate_multi_encode_progress(self) -> None: + if not self._running or self._op_mode != "encode" or not self._op_encode_multi_state: + return + now = time.monotonic() + active_label = _select_multi_encode_label( + self._op_encode_multi_state, + self._op_encode_multi_active_label, + now, ) + if active_label is None: + self._op_encode_multi_active_label = None + return + + self._op_encode_multi_active_label = active_label + state = self._op_encode_multi_state.get(active_label) + if state is None: + return + + parts, pct = self._multi_encode_progress_parts(state, now=now) + if pct is not None: + self._stop_prep_progress() + self._prog_bar.setValue(pct) + self._prog_lbl.setText(" · ".join(p for p in parts if p)) + if len(self._op_encode_multi_state) > 1 and not self._op_encode_multi_reselect_timer.isActive(): + self._op_encode_multi_reselect_timer.start() + + def _handle_encode_internal_progress(self, line: str) -> bool: + payload = _parse_encode_internal_progress(line) + if payload is None: + return False + if str(payload.get("kind") or "") != "encode_ffmpeg": + return False + + label = str(payload.get("label") or "").strip() + if not label.startswith("ffmpeg-video-"): + return True + + event = str(payload.get("event") or "").strip().lower() + raw_line = str(payload.get("line") or "") + state = self._ensure_multi_encode_state(label) + + if event == "done": + state["done"] = True + state["last_update"] = time.monotonic() + self._reevaluate_multi_encode_progress() + return True + + if raw_line.startswith("$ "): + self.log_requested.emit("INFO", raw_line) + return True + + fps_m = _FPS_RE.search(raw_line) + if fps_m: + try: + state["fps"] = float(fps_m.group(1)) + except ValueError: + state["fps"] = None + frame_m = _FRAME_RE.search(raw_line) + if frame_m: + try: + state["frame"] = int(frame_m.group(1)) + except ValueError: + state["frame"] = None + elapsed_video = ffmpeg_progress_seconds(raw_line) + if elapsed_video is not None: + state["elapsed_video"] = elapsed_video + state["last_update"] = time.monotonic() + self._reevaluate_multi_encode_progress() + + if _is_encode_progress_noise(raw_line): + return True + if _is_encode_stage_message(raw_line): + self._prog_lbl.setText(raw_line) + return True + self.log_requested.emit("INFO", raw_line) + return True def _on_op_progress(self, line: str) -> None: """Gère la progression selon le mode (remux ou encode).""" + self._capture_verbose_progress_line(line) if self._op_mode == "remux": if line.startswith("$ "): self._stop_prep_progress() @@ -1753,6 +1891,8 @@ def _on_op_progress(self, line: str) -> None: return self.log_requested.emit("INFO", line) else: + if self._handle_encode_internal_progress(line): + return if self._NOISE_RE.search(line): return if line.startswith("$ "): @@ -1845,6 +1985,8 @@ def _on_cancel_op(self) -> None: def _on_op_cancelled(self) -> None: self._running = False self._signals = None + self._op_encode_config = None + self._reset_multi_encode_progress_tracking() self._stop_prep_progress() self._run_btn.setEnabled(True) self._cancel_btn.setVisible(False) @@ -1857,6 +1999,8 @@ def _on_op_cancelled(self) -> None: def _on_op_finished(self, success: bool, error: str = "") -> None: self._running = False self._signals = None + self._op_encode_config = None + self._reset_multi_encode_progress_tracking() self._stop_prep_progress() self._run_btn.setEnabled(True) self._cancel_btn.setVisible(False) @@ -1877,9 +2021,12 @@ def _on_op_finished(self, success: bool, error: str = "") -> None: def _on_settings_saved(self) -> None: previous_theme = DesignSystem.current_theme() previous_scale = DesignSystem.current_ui_scale() + previous_file_logging_enabled = _config_file_logging_enabled(self._config) + previous_file_logging_level = _config_file_logging_level(self._config) self._config.reload() new_theme = DesignSystem.set_theme(self._config.theme) new_scale = DesignSystem.set_ui_scale(self._config.ui_scale_percent) + _ensure_verbose_file_logger(self) app = cast(QApplication | None, QApplication.instance()) if app is not None: current_font = app.font() @@ -1905,6 +2052,27 @@ def _on_settings_saved(self) -> None: ), ) self._prompt_restart_for_scale_change(new_scale) + new_file_logging_enabled = _config_file_logging_enabled(self._config) + new_file_logging_level = _config_file_logging_level(self._config) + if new_file_logging_enabled and not previous_file_logging_enabled: + self.log_requested.emit( + "INFO", + translate_text( + "Logging fichier activé ({level}) : {path}", + level=translate_text("Verbose" if new_file_logging_level == "verbose" else "Standard"), + path=str(self._verbose_log_session_path()), + ), + ) + elif previous_file_logging_enabled and not new_file_logging_enabled: + self.log_requested.emit("INFO", "Logging fichier désactivé.") + elif new_file_logging_enabled and previous_file_logging_level != new_file_logging_level: + self.log_requested.emit( + "INFO", + translate_text( + "Niveau de logging fichier : {level}", + level=translate_text("Verbose" if new_file_logging_level == "verbose" else "Standard"), + ), + ) self.log_requested.emit("OK", "Configuration appliquée depuis config.ini.") def _prompt_restart_for_scale_change(self, percent: int) -> None: @@ -1950,24 +2118,90 @@ def _on_log_collapsed(self, collapsed: bool) -> None: # Logging # ------------------------------------------------------------------ + def _verbose_log_part_path(self, index: int) -> Path: + return _ensure_verbose_file_logger(self).part_path(index) + + def _verbose_log_session_path(self) -> Path: + return _ensure_verbose_file_logger(self).session_path() + + def _prepare_verbose_log_target(self, incoming_bytes: int) -> Path: + return _ensure_verbose_file_logger(self).prepare_target(incoming_bytes) + + def _append_verbose_log_file(self, message: str, level: LogLevel) -> None: + _ensure_verbose_file_logger(self).append_application_message(message, level) + + def _append_verbose_tool_output( + self, + line: str, + *, + label: str | None = None, + ) -> None: + _ensure_verbose_file_logger(self).append_tool_output(line, label=label) + + def _capture_verbose_progress_line(self, line: str) -> None: + if not _config_file_logging_is_verbose(self._config): + return + + payload = _parse_encode_internal_progress(line) + if payload is not None and str(payload.get("kind") or "") == "encode_ffmpeg": + label = str(payload.get("label") or "").strip() or None + event = str(payload.get("event") or "").strip().lower() + raw_line = str(payload.get("line") or "") + if event == "done": + self._append_verbose_tool_output("done", label=label) + return + if raw_line and not raw_line.startswith("$ "): + if _is_encode_progress_noise(raw_line) or _is_encode_stage_message(raw_line): + self._append_verbose_tool_output(raw_line, label=label) + return + + if line.startswith("$ "): + return + + if self._op_mode == "remux": + if ffmpeg_progress_seconds(line) is not None or _is_encode_progress_noise(line): + self._append_verbose_tool_output(line) + return + + if self._NOISE_RE.search(line): + self._append_verbose_tool_output(line) + return + if ffmpeg_progress_seconds(line) is not None: + self._append_verbose_tool_output(line) + return + if _FRAME_RE.search(line): + total_frames = self._encode_panel.get_total_frames() + if total_frames and total_frames > 0: + self._append_verbose_tool_output(line) + return + if _is_encode_progress_noise(line): + self._append_verbose_tool_output(line) + + def _emit_log_entry(self, message: str, level: LogLevel) -> None: + self._append_verbose_log_file(message, level) + self._log_panel.log(message, level) + def _log_from_page(self, message: str, level: LogLevel) -> None: """Callback passé aux pages pour poster des logs.""" - self._log_panel.log(message, level) + self._emit_log_entry(message, level) def _on_log_requested(self, level: str, message: str) -> None: """Slot connecté au signal public log_requested(str, str).""" self._sync_prep_progress_from_log(message) - try: - lv = LogLevel(level.upper()) - except ValueError: - lv = LogLevel.INFO - self._log_panel.log(message, lv) + lv = parse_log_level(level) + self._emit_log_entry(message, lv) + + def _on_tool_output_requested(self, label: str, line: str) -> None: + if not _config_file_logging_is_verbose(self._config): + return + tool_label = str(label).strip() or None + self._append_verbose_tool_output(line, label=tool_label) # Raccourcis directs - def log_info(self, msg: str) -> None: self._log_panel.info(msg) - def log_ok(self, msg: str) -> None: self._log_panel.ok(msg) - def log_warn(self, msg: str) -> None: self._log_panel.warn(msg) - def log_error(self, msg: str) -> None: self._log_panel.error(msg) + def log_info(self, msg: str) -> None: self._emit_log_entry(msg, LogLevel.INFO) + def log_ok(self, msg: str) -> None: self._emit_log_entry(msg, LogLevel.OK) + def log_warn(self, msg: str) -> None: self._emit_log_entry(msg, LogLevel.WARN) + def log_error(self, msg: str) -> None: self._emit_log_entry(msg, LogLevel.ERROR) # ------------------------------------------------------------------ # Géométrie @@ -1978,7 +2212,7 @@ def _restore_geometry(self) -> None: self.restoreGeometry(self._config.window_geometry) def closeEvent(self, event) -> None: # type: ignore[override] - self._config.save_geometry(self.saveGeometry().data()) + self._config.save_geometry(bytes(self.saveGeometry().data())) self._config.save() super().closeEvent(event) @@ -1987,18 +2221,27 @@ def closeEvent(self, event) -> None: # type: ignore[override] # ------------------------------------------------------------------ def _post_init_log(self) -> None: - self._log_panel.info("Mediarecode démarré.") + self.log_info("Mediarecode démarré.") availability = self._config.all_tools_available() missing = [n for n, ok in availability.items() if not ok] if missing: - self._log_panel.warn( + self.log_warn( f"Outils manquants dans PATH : {', '.join(missing)}" ) else: - self._log_panel.ok("Tous les outils externes sont disponibles.") - self._log_panel.info( + self.log_ok("Tous les outils externes sont disponibles.") + if _config_file_logging_enabled(self._config): + logging_level = _config_file_logging_level(self._config) + self.log_info( + translate_text( + "Logging fichier activé ({level}) : {path}", + level=translate_text("Verbose" if logging_level == "verbose" else "Standard"), + path=str(self._verbose_log_session_path()), + ) + ) + self.log_info( f"Dossier de travail : {self._config.work_dir}" ) - self._log_panel.info( + self.log_info( f"Dossier de sortie : {self._config.output_dir}" ) diff --git a/ui/panels/encode_panel/panel.py b/ui/panels/encode_panel/panel.py index c8d26c1..5b211f6 100644 --- a/ui/panels/encode_panel/panel.py +++ b/ui/panels/encode_panel/panel.py @@ -27,14 +27,20 @@ from core.config import AppConfig from core.inspector import FileInfo, HDRType, VideoTrack from core.i18n import apply_translations, translate_text -from core.lang_tags import Rfc5646LanguageTags from core.workflows.remux_models import TrackEntry from core.runner import TaskSignals from core.workflows.encode import ( AUDIO_CODECS, HARDWARE_VIDEO_CODECS, SOFTWARE_VIDEO_CODECS, TONEMAP_ALGORITHMS, AudioTrackSettings, EncodeConfig, EncodePreset, EncodeWorkflow, HardwareEncoderDetector, - ProfileManager, QualityMode, TrackMetaEdit, VideoEncodeSettings, presets_for_codec, + ProfileManager, QualityMode, VideoEncodeSettings, VideoTrackEncodePlan, presets_for_codec, +) +from core.workflows.encode.catalog import ( + VIDEO_ENCODER_BADGES, + VIDEO_HDR_BADGE_ORDER, + encoder_badge, + is_h264_video_codec, + supports_dynamic_hdr, ) from ui.panels.encode_panel.theme import ( _C, _card, _checkbox_style, _combo_style, @@ -59,7 +65,10 @@ class EncodePanel(QWidget): audio_track_encoding_changed = Signal(object, str, int) # (entry_id, codec, bitrate_kbps) audio_track_add_requested = Signal(object, str, str, int) # (template TrackEntry, entry_id, codec, bitrate_kbps) audio_track_remove_requested = Signal(object) # (entry_id) + video_tracks_encoding_changed = Signal(object) _hw_detected = Signal(object, object, object) # (hw: set[str], sw: set[str], hw_ffmpeg: str) + _VIDEO_ENCODER_BADGES = VIDEO_ENCODER_BADGES + _VIDEO_HDR_BADGE_ORDER = VIDEO_HDR_BADGE_ORDER def __init__( self, @@ -78,6 +87,7 @@ def __init__( ram_buffer_enabled=config.ram_buffer_enabled, ram_buffer_threshold_pct=config.ram_buffer_threshold_pct, ffmpeg_threads=config.ffmpeg_threads, + max_parallel_video_encodes=config.max_parallel_video_encodes, parent=self, writing_application=writing_application, generate_nfo=config.generate_nfo, @@ -87,8 +97,10 @@ def __init__( self._file_info: FileInfo | None = None self._video_tracks: list[tuple[FileInfo, TrackEntry, str]] = [] self._video_settings_by_entry_id: dict[str, dict[str, object]] = {} + self._video_force_8bit_by_entry_id: dict[str, bool] = {} self._current_video_entry_id: str | None = None self._loading_video_settings = False + self._video_apply_all = False self._audio_tracks_data: list[tuple] = [] # list[tuple[AudioTrack, str, Path, TrackEntry]] self._duration_s: float | None = None self._hw_encoders: set[str] = set() @@ -250,28 +262,6 @@ def _build_video_source_card(self) -> QWidget: self._video_list.setVisible(False) self._video_placeholder.setVisible(True) - - # --- Langue de la piste vidéo de sortie --- - lang_row = QHBoxLayout() - lang_row.setContentsMargins(12, 8, 12, 8) - lang_row.setSpacing(8) - lang_lbl = QLabel("Langue") - lang_lbl.setStyleSheet( - f"color:{_C.TEXT_SEC};font-size:11px;background:transparent;min-width:52px;" - ) - self._video_lang_edit = QLineEdit() - self._video_lang_edit.setPlaceholderText("ex : fr, en, und…") - self._video_lang_edit.setFixedWidth(110) - self._video_lang_edit.setStyleSheet(_input_style()) - self._video_lang_edit.setToolTip( - "Balise langue RFC 5646 appliquée à la piste vidéo du fichier encodé" - ) - self._video_lang_edit.textChanged.connect(self._on_video_lang_changed) - lang_row.addWidget(lang_lbl) - lang_row.addWidget(self._video_lang_edit) - lang_row.addStretch() - cl.addLayout(lang_row) - return card # ------------------------------------------------------------------ @@ -292,10 +282,9 @@ def set_video_tracks(self, tracks: list[tuple]) -> None: self._file_info = None self.ready_changed.emit(False) self._video_list.blockSignals(False) - self._video_lang_edit.blockSignals(True) - self._video_lang_edit.clear() - self._video_lang_edit.blockSignals(False) self._current_video_entry_id = None + self._video_force_8bit_by_entry_id.clear() + self.video_tracks_encoding_changed.emit([]) self._rebuild_preview() return @@ -303,16 +292,12 @@ def set_video_tracks(self, tracks: list[tuple]) -> None: self._video_list.setVisible(True) for file_info, track, color in tracks: - hdr = self._hdr_type_for_entry(file_info, track).label() - hdr_part = ( - f" {hdr}" - if hdr not in ("SDR", "?") and hdr not in track.display_info - else "" - ) - text = f"█ {file_info.path.name} {track.codec.upper()} {track.display_info}{hdr_part}" + state = self._video_settings_by_entry_id.get(self._video_entry_id(track)) + text = self._video_source_row_text(file_info, track, state=state) item = QListWidgetItem(text) item.setData(Qt.ItemDataRole.UserRole, (file_info, track)) item.setForeground(QBrush(QColor(color))) + self._apply_video_source_item_style(item, state) self._video_list.addItem(item) active_ids = {self._video_entry_id(track) for _info, track, _color in tracks} @@ -321,6 +306,11 @@ def set_video_tracks(self, tracks: list[tuple]) -> None: for entry_id, settings in self._video_settings_by_entry_id.items() if entry_id in active_ids } + self._video_force_8bit_by_entry_id = { + entry_id: forced + for entry_id, forced in self._video_force_8bit_by_entry_id.items() + if entry_id in active_ids + } target_row = 0 if selected_entry_id is not None: for row, (_info, track, _color) in enumerate(tracks): @@ -332,6 +322,7 @@ def set_video_tracks(self, tracks: list[tuple]) -> None: self._video_list.blockSignals(False) self._adjust_video_list_height() self._on_video_row_changed(target_row) + self._ensure_video_states_for_active_tracks() def _adjust_video_list_height(self) -> None: """Ajuste la hauteur de la liste vidéo pour afficher exactement n lignes.""" @@ -347,9 +338,6 @@ def _on_video_row_changed(self, row: int) -> None: self._save_current_video_state() file_info, track, _color = self._video_tracks[row] self._current_video_entry_id = self._video_entry_id(track) - self._video_lang_edit.blockSignals(True) - self._video_lang_edit.setText(track.language or "") - self._video_lang_edit.blockSignals(False) self._apply_file_info(file_info, track) def _apply_file_info(self, info: FileInfo, track: TrackEntry | None = None) -> None: @@ -379,12 +367,17 @@ def _apply_file_info(self, info: FileInfo, track: TrackEntry | None = None) -> N else: self._update_passthrough_controls(auto_check=True) self._save_current_video_state() - hdr = selected_video.hdr_type if selected_video is not None else info.hdr_type + if self._video_apply_all: + self._propagate_current_video_state_to_all(force_current=False) + hdr_lbl = ( + selected_video.hdr_label if selected_video is not None + else (info.primary_video.hdr_label if info.primary_video else info.hdr_type.label()) + ) self.log_message.emit( "OK", f"{info.path.name} — " f"{len(info.video_tracks)}V {len(info.audio_tracks)}A " - f"{len(info.subtitle_tracks)}S {hdr.label()}", + f"{len(info.subtitle_tracks)}S {hdr_lbl}", ) self._rebuild_preview() @@ -431,6 +424,11 @@ def _build_video_card(self) -> QWidget: self._populate_codec_combo() self._codec_combo.currentIndexChanged.connect(self._on_codec_changed) r1.addWidget(self._codec_combo) + self._apply_all_video_cb = QCheckBox("Appliquer à toutes les pistes") + self._apply_all_video_cb.setChecked(False) + self._apply_all_video_cb.setStyleSheet(_checkbox_style()) + self._apply_all_video_cb.toggled.connect(self._on_apply_all_video_toggled) + r1.addWidget(self._apply_all_video_cb) r1.addStretch() cl.addLayout(r1) @@ -699,8 +697,14 @@ def _build_profiles_card(self) -> QWidget: def _prefill_hdr_meta(self, raw: dict) -> None: """Extrait master_display et max_cll depuis le side_data_list ffprobe.""" - self._master_display.clear() - self._max_cll.clear() + master_display, max_cll = self._extract_hdr_meta_fields(raw) + self._master_display.setText(master_display) + self._max_cll.setText(max_cll) + + @staticmethod + def _extract_hdr_meta_fields(raw: dict) -> tuple[str, str]: + master_display = "" + max_cll = "" def _rat(v) -> float: """Parse un rationnel ffprobe '35400/50000' ou un float direct.""" @@ -727,16 +731,17 @@ def _rat(v) -> float: f"R({c(rx)},{c(ry)})" f"WP({c(wx)},{c(wy)})" f"L({l(lmax)},{l(lmin)})") - self._master_display.setText(md) + master_display = md except Exception: pass elif sd.get("side_data_type") == "Content light level metadata": try: - maxcll = int(sd.get("max_content", 0)) - maxfall = int(sd.get("max_average", 0)) - self._max_cll.setText(f"{maxcll},{maxfall}") + max_content = int(sd.get("max_content", 0)) + max_average = int(sd.get("max_average", 0)) + max_cll = f"{max_content},{max_average}" except Exception: pass + return master_display, max_cll # ------------------------------------------------------------------ # Détection encodeurs matériels @@ -813,6 +818,14 @@ def _on_codec_changed(self, _idx: int = 0) -> None: self._update_passthrough_controls() self._rebuild_preview() + def _on_apply_all_video_toggled(self, checked: bool) -> None: + self._video_apply_all = bool(checked) + if checked: + self._propagate_current_video_state_to_all() + else: + self._save_current_video_state() + self._rebuild_preview() + def _on_dv_toggle(self, _state: int) -> None: self._dovi_profile_widget.setVisible(self._copy_dv_cb.isChecked()) self._rebuild_preview() @@ -845,9 +858,9 @@ def _update_passthrough_controls(self, *, auto_check: bool = False) -> None: has_static_hdr = bool(self._master_display.text().strip()) self._inject_hdr_cb.setChecked(has_static_hdr) - if not dv_ok: + if not dv_ok and not self._video_apply_all: self._copy_dv_cb.setChecked(False) - if not hdr10plus_ok: + if not hdr10plus_ok and not self._video_apply_all: self._copy_hdr10plus_cb.setChecked(False) def _on_mode_changed(self, _idx: int = 0) -> None: @@ -870,16 +883,6 @@ def _on_tonemap_toggle(self, _state: int) -> None: self._inject_hdr_cb.setChecked(False) self._rebuild_preview() - def _on_video_lang_changed(self, text: str) -> None: - """Corrige silencieusement la casse du code langue vidéo, puis rafraîchit l'aperçu.""" - canonical = Rfc5646LanguageTags.normalize(text.strip()) - if canonical is not None and canonical != text.strip(): - self._video_lang_edit.blockSignals(True) - self._video_lang_edit.setText(canonical) - self._video_lang_edit.blockSignals(False) - self._save_current_video_state() - self._rebuild_preview() - # ------------------------------------------------------------------ # Aperçu commande # ------------------------------------------------------------------ @@ -1002,6 +1005,64 @@ def get_total_frames(self) -> "int | None": return frames return None + def get_video_progress_targets(self, config: "EncodeConfig") -> dict[int, dict[str, object]]: + """ + Retourne les métriques de progression par piste vidéo routée. + + Clé : index 1-based de la piste dans `config.video_tracks`. + Valeurs : `duration_s`, `total_frames`, `source_name`. + """ + targets: dict[int, dict[str, object]] = {} + video_tracks = self._routing_video_tracks(config) + if not video_tracks: + return targets + + entry_map: dict[str, tuple[FileInfo, TrackEntry]] = {} + source_map: dict[tuple[Path, int], tuple[FileInfo, TrackEntry]] = {} + for info, track, _color in self._video_tracks: + entry_map[self._video_entry_id(track)] = (info, track) + source_map[(Path(info.path), int(track.mkv_tid))] = (info, track) + + for index, video in enumerate(video_tracks, start=1): + info: FileInfo | None = None + track: TrackEntry | None = None + entry_id = str(getattr(video, "track_entry_id", "") or "").strip() + source_path = Path(getattr(video, "source_path", None) or config.source) + stream_index = int(getattr(video, "stream_index", 0) or 0) + + if entry_id: + resolved = entry_map.get(entry_id) + if resolved is not None: + info, track = resolved + if info is None: + resolved = source_map.get((source_path, stream_index)) + if resolved is not None: + info, track = resolved + if info is None and self._file_info is not None and Path(self._file_info.path) == source_path: + info = self._file_info + + duration_s: float | None = None + total_frames: int | None = None + if info is not None: + resolved_track = self._video_track_for_entry(info, track) + duration_s = ( + getattr(resolved_track, "duration_s", None) + or getattr(info, "duration_s", None) + or config.duration_s + ) + frames_obj = getattr(info, "frame_count", None) + if isinstance(frames_obj, int) and frames_obj > 0: + total_frames = frames_obj + else: + duration_s = config.duration_s + + targets[index] = { + "duration_s": duration_s if isinstance(duration_s, (int, float)) and duration_s > 0 else None, + "total_frames": total_frames, + "source_name": source_path.name, + } + return targets + def run_operation(self, config: "EncodeConfig") -> "TaskSignals": """Lance l'encodage et retourne les signaux de progression.""" # MainWindow valide juste avant l'appel ; éviter un second passage I/O @@ -1014,12 +1075,20 @@ def validate_config(self, config: "EncodeConfig") -> list[str]: def is_pure_copy(self, config: "EncodeConfig") -> bool: """True si tout est en copie et qu'aucune transformation vidéo n'est demandée.""" - v = config.video + video_tracks = self._routing_video_tracks(config) + if not video_tracks: + return False return ( - v.codec == "copy" - and all(a.codec == "copy" for a in config.audio_tracks) - and not v.inject_hdr_meta - and not v.tonemap_to_sdr + all( + str(getattr(video, "codec", "") or "").strip().lower() == "copy" + and not bool(getattr(video, "inject_hdr_meta", False)) + and not bool(getattr(video, "tonemap_to_sdr", False)) + for video in video_tracks + ) + and all( + str(getattr(audio, "codec", "") or "").strip().lower() == "copy" + for audio in getattr(config, "audio_tracks", []) or [] + ) ) # ------------------------------------------------------------------ @@ -1033,6 +1102,174 @@ def _video_entry_id(self, track: TrackEntry) -> str: return track.source_entry_id return f"{track.file_id}:{track.track_type}:{track.mkv_tid}" + @classmethod + def _is_h264_codec(cls, codec: str) -> bool: + return is_h264_video_codec(codec) + + @classmethod + def _is_dynamic_hdr_codec(cls, codec: str) -> bool: + return supports_dynamic_hdr(codec) + + @staticmethod + def _source_has_dv(source_hdr: HDRType) -> bool: + return source_hdr in (HDRType.DOLBY_VISION, HDRType.DOLBY_VISION_HDR10PLUS) + + @staticmethod + def _source_has_hdr10plus(source_hdr: HDRType) -> bool: + return source_hdr in (HDRType.HDR10PLUS, HDRType.DOLBY_VISION_HDR10PLUS) + + def _effective_dynamic_hdr_flags( + self, + state: dict[str, object], + *, + source_hdr: HDRType, + target_codec: str | None = None, + ) -> tuple[bool, bool]: + codec = ( + str(target_codec or "").strip().lower() + if target_codec is not None + else self._video_state_target_codec(state) + ) + supports_dynamic_hdr = self._is_dynamic_hdr_codec(codec) + copy_dv = bool(state.get("copy_dv")) and supports_dynamic_hdr and self._source_has_dv(source_hdr) + copy_hdr10plus = bool(state.get("copy_hdr10plus")) and supports_dynamic_hdr and self._source_has_hdr10plus(source_hdr) + return copy_dv, copy_hdr10plus + + def _normalized_video_state_for_track( + self, + *, + info: FileInfo, + track: TrackEntry, + state: dict[str, object], + ) -> dict[str, object]: + normalized = self._copy_video_state(state) + source_hdr = self._hdr_type_for_entry(info, track) + target_codec = self._video_state_target_codec(normalized) + copy_dv, copy_hdr10plus = self._effective_dynamic_hdr_flags( + normalized, + source_hdr=source_hdr, + target_codec=target_codec, + ) + normalized["copy_dv"] = copy_dv + normalized["copy_hdr10plus"] = copy_hdr10plus + return normalized + + def _default_video_state_for_track( + self, + *, + info: FileInfo, + track: TrackEntry, + ) -> dict[str, object]: + source_hdr = self._hdr_type_for_entry(info, track) + source_video = self._video_track_for_entry(info, track) + raw = source_video.raw if source_video is not None else {} + master_display, max_cll = self._extract_hdr_meta_fields(raw) + state: dict[str, object] = { + "codec": "copy", + "quality_mode": QualityMode.CRF, + "preset": "slow", + "crf": 18, + "bitrate_kbps": "5000", + "target_size_mb": "4000", + "extra_params": "", + "inject_hdr_meta": False, + "master_display": master_display, + "max_cll": max_cll, + "copy_dv": self._source_has_dv(source_hdr), + "copy_hdr10plus": self._source_has_hdr10plus(source_hdr), + "dovi_profile": "0", + "tonemap_to_sdr": False, + "tonemap_algorithm": "hable", + } + return self._normalized_video_state_for_track( + info=info, + track=track, + state=state, + ) + + def _should_propagate_global_state(self, state: dict[str, object] | None) -> bool: + if not self._video_apply_all or state is None: + return False + return self._video_state_target_codec(state) != "copy" + + def _video_source_bit_depth(self, info: FileInfo, track: TrackEntry) -> int: + video_track = self._video_track_for_entry(info, track) + if video_track is None: + return 8 + try: + bit_depth = int(getattr(video_track, "bit_depth", 8) or 8) + except (TypeError, ValueError): + bit_depth = 8 + return bit_depth if bit_depth > 0 else 8 + + def _video_force_8bit_for_codec( + self, + info: FileInfo, + track: TrackEntry, + codec: str, + ) -> bool: + return self._is_h264_codec(codec) and self._video_source_bit_depth(info, track) > 8 + + def _video_source_row_text( + self, + info: FileInfo, + track: TrackEntry, + *, + state: dict[str, object] | None, + ) -> str: + source_codec = (track.orig_codec or track.codec).upper() + text = f"█ {info.path.name} {source_codec} {track.display_info}" + if state is None: + return text + + plan = self._video_plan_from_state( + entry_id=self._video_entry_id(track), + state=state, + source_video=self._video_track_for_entry(info, track), + ) + badges: list[str] = [] + target_codec = str(plan.target_codec or "copy").strip().lower() + if target_codec != "copy": + badges.append(encoder_badge(target_codec)) + if self._video_force_8bit_for_codec(info, track, target_codec): + badges.append("8-bit") + badges.extend(self._sorted_video_hdr_badges(plan.hdr_badges)) + if badges: + text += " " + " ".join(f"[{badge}]" for badge in badges) + return text + + def _sorted_video_hdr_badges(self, badges: tuple[str, ...]) -> list[str]: + order = {badge: idx for idx, badge in enumerate(self._VIDEO_HDR_BADGE_ORDER)} + unique = list(dict.fromkeys(str(badge or "").strip().upper() for badge in badges if str(badge or "").strip())) + return sorted(unique, key=lambda badge: (order.get(badge, len(order)), badge)) + + def _refresh_video_source_rows(self) -> None: + if not hasattr(self, "_video_list"): + return + for row, (file_info, track, _color) in enumerate(self._video_tracks): + item = self._video_list.item(row) + if item is None: + continue + entry_id = self._video_entry_id(track) + state = self._video_settings_by_entry_id.get(entry_id) + item.setText(self._video_source_row_text(file_info, track, state=state)) + self._apply_video_source_item_style(item, state) + + @staticmethod + def _video_state_target_codec(state: dict[str, object] | None) -> str: + if state is None: + return "copy" + return str(state.get("codec") or "copy").strip().lower() + + def _apply_video_source_item_style( + self, + item: QListWidgetItem, + state: dict[str, object] | None, + ) -> None: + font = item.font() + font.setBold(self._video_state_target_codec(state) != "copy") + item.setFont(font) + def _video_track_for_entry( self, info: FileInfo, @@ -1067,12 +1304,55 @@ def _set_combo_data(self, combo: QComboBox, value: object) -> None: combo.setCurrentIndex(idx) return - def _save_current_video_state(self) -> None: - if self._loading_video_settings or self._current_video_entry_id is None: + def _active_video_entry_ids(self) -> list[str]: + return [self._video_entry_id(track) for _info, track, _color in self._video_tracks] + + @staticmethod + def _routing_video_tracks(config: object) -> list[object]: + tracks = list(getattr(config, "video_tracks", []) or []) + if tracks: + return tracks + primary = getattr(config, "video", None) + return [primary] if primary is not None else [] + + def _ensure_video_states_for_active_tracks(self) -> None: + """Garantit un état UI pour chaque piste vidéo active du remux.""" + missing_tracks = [ + (info, track) + for info, track, _color in self._video_tracks + if self._video_entry_id(track) not in self._video_settings_by_entry_id + ] + if not missing_tracks: return - if not hasattr(self, "_codec_combo") or not hasattr(self, "_copy_dv_cb"): + + template_state = ( + self._video_settings_by_entry_id.get(self._current_video_entry_id) + if self._current_video_entry_id is not None + else None + ) + if template_state is None and self._video_settings_by_entry_id: + template_state = next(iter(self._video_settings_by_entry_id.values())) + if template_state is None and hasattr(self, "_codec_combo") and hasattr(self, "_copy_dv_cb"): + template_state = self._current_video_state() + if template_state is None: return - self._video_settings_by_entry_id[self._current_video_entry_id] = { + + propagate_global = self._should_propagate_global_state(template_state) + for info, track in missing_tracks: + entry_id = self._video_entry_id(track) + if propagate_global: + state = self._normalized_video_state_for_track( + info=info, + track=track, + state=template_state, + ) + else: + state = self._default_video_state_for_track(info=info, track=track) + self._video_settings_by_entry_id[entry_id] = state + self._emit_video_encoding_plans() + + def _current_video_state(self) -> dict[str, object]: + return { "codec": self._combo_data(self._codec_combo), "quality_mode": self._combo_data(self._mode_combo), "preset": self._combo_data(self._preset_combo), @@ -1088,16 +1368,193 @@ def _save_current_video_state(self) -> None: "dovi_profile": self._combo_data(self._dovi_profile_combo), "tonemap_to_sdr": self._tonemap_cb.isChecked(), "tonemap_algorithm": self._combo_data(self._tonemap_algo), - "language": self._video_lang_edit.text(), } + def _copy_video_state(self, state: dict[str, object]) -> dict[str, object]: + return dict(state) + + def _propagate_current_video_state_to_all(self, *, force_current: bool = True) -> None: + if not self._video_tracks: + return + if force_current: + self._save_current_video_state() + source_id = self._current_video_entry_id or self._active_video_entry_ids()[0] + state = self._video_settings_by_entry_id.get(source_id) + if state is None: + state = self._current_video_state() + for entry_id in self._active_video_entry_ids(): + self._video_settings_by_entry_id[entry_id] = self._copy_video_state(state) + self._emit_video_encoding_plans() + + @staticmethod + def _quality_value_summary(mode: QualityMode, state: dict[str, object]) -> str: + if mode == QualityMode.CRF: + return str(state.get("crf") or 18) + if mode == QualityMode.BITRATE: + return f"{state.get('bitrate_kbps') or '5000'} kbps" + return f"{state.get('target_size_mb') or '4000'} Mo" + + @staticmethod + def _summary_mode_label(mode: QualityMode) -> str: + return { + QualityMode.CRF: "CRF", + QualityMode.BITRATE: "Débit", + QualityMode.SIZE: "Taille", + }.get(mode, "CRF") + + def _video_hdr_badges_from_state( + self, + state: dict[str, object], + *, + source_video: VideoTrack | None, + ) -> tuple[str, ...]: + source_hdr = source_video.hdr_type if source_video is not None else HDRType.NONE + target_codec = self._video_state_target_codec(state) + copy_dv, copy_hdr10plus = self._effective_dynamic_hdr_flags( + state, + source_hdr=source_hdr, + target_codec=target_codec, + ) + if bool(state.get("tonemap_to_sdr")): + return ("SDR",) + + badges: list[str] = [] + if bool(state.get("inject_hdr_meta")): + badges.append("HDR") + if copy_dv: + badges.append("DV") + if copy_hdr10plus: + badges.append("10+") + + if badges: + return tuple(badges) + + if target_codec != "copy": + return () + + if source_hdr == HDRType.HDR10: + return ("HDR",) + if source_hdr == HDRType.HDR10PLUS: + return ("10+",) + if source_hdr == HDRType.HLG: + return ("HLG",) + if source_hdr == HDRType.DOLBY_VISION: + compat_label = source_video.dovi_compat_label if source_video is not None else None + if compat_label == "HDR10": + return ("DV", "HDR") + if compat_label == "HLG": + return ("DV", "HLG") + # P8.0 (compat_id=0) ou P8.2 (SDR) → pas de badge fallback HDR + return ("DV",) + if source_hdr == HDRType.DOLBY_VISION_HDR10PLUS: + return ("DV", "10+") + return () + + def _video_plan_from_state( + self, + *, + entry_id: str, + state: dict[str, object], + source_video: VideoTrack | None, + ) -> VideoTrackEncodePlan: + source_hdr = source_video.hdr_type if source_video is not None else HDRType.NONE + codec = str(state.get("codec") or "copy") + target_codec = str(codec or "copy").strip().lower() + copy_dv, copy_hdr10plus = self._effective_dynamic_hdr_flags( + state, + source_hdr=source_hdr, + target_codec=target_codec, + ) + mode = state.get("quality_mode") or QualityMode.CRF + if not isinstance(mode, QualityMode): + mode = QualityMode(str(mode)) + if codec == "copy": + summary = "Copy" + else: + summary = " - ".join([ + codec, + str(state.get("preset") or "slow"), + self._summary_mode_label(mode), + f"({self._quality_value_summary(mode, state)})", + ]) + + is_modified = bool( + codec != "copy" + or bool(state.get("inject_hdr_meta")) + or bool(state.get("tonemap_to_sdr")) + or bool(state.get("extra_params")) + or (codec != "copy" and copy_dv) + or (codec != "copy" and copy_hdr10plus) + ) + return VideoTrackEncodePlan( + track_entry_id=entry_id, + codec_summary=summary, + target_codec=codec, + hdr_badges=self._video_hdr_badges_from_state(state, source_video=source_video), + is_modified=is_modified, + ) + + def _emit_video_encoding_plans(self) -> None: + plans: list[VideoTrackEncodePlan] = [] + next_force_8bit: dict[str, bool] = {} + for index, (info, track, _color) in enumerate(self._video_tracks, start=1): + entry_id = self._video_entry_id(track) + state = self._video_settings_by_entry_id.get(entry_id) + if state is None: + continue + target_codec = self._video_state_target_codec(state) + force_8bit = self._video_force_8bit_for_codec(info, track, target_codec) + previous_force_8bit = self._video_force_8bit_by_entry_id.get(entry_id) + if force_8bit != previous_force_8bit: + if force_8bit: + self.log_message.emit( + "WARN", + translate_text( + "Précheck piste vidéo #{index} : source {depth}-bit + codec {codec} → bascule auto en 8-bit.", + index=index, + depth=self._video_source_bit_depth(info, track), + codec=target_codec.upper(), + ), + ) + elif previous_force_8bit: + self.log_message.emit( + "INFO", + translate_text( + "Précheck piste vidéo #{index} : retour au mode source (8-bit auto désactivé).", + index=index, + ), + ) + next_force_8bit[entry_id] = force_8bit + plans.append( + self._video_plan_from_state( + entry_id=entry_id, + state=state, + source_video=self._video_track_for_entry(info, track), + ) + ) + self._video_force_8bit_by_entry_id = next_force_8bit + self._refresh_video_source_rows() + self.video_tracks_encoding_changed.emit(plans) + + def _save_current_video_state(self) -> None: + if self._loading_video_settings or self._current_video_entry_id is None: + return + if not hasattr(self, "_codec_combo") or not hasattr(self, "_copy_dv_cb"): + return + state = self._current_video_state() + self._video_settings_by_entry_id[self._current_video_entry_id] = state + if self._video_apply_all: + for entry_id in self._active_video_entry_ids(): + self._video_settings_by_entry_id[entry_id] = self._copy_video_state(state) + self._emit_video_encoding_plans() + def _apply_video_state(self, state: dict[str, object]) -> None: self._loading_video_settings = True try: self._set_combo_data(self._codec_combo, state.get("codec")) self._set_combo_data(self._mode_combo, state.get("quality_mode")) self._set_combo_data(self._preset_combo, state.get("preset")) - self._crf_spin.setValue(int(state.get("crf") or self._crf_spin.value())) + self._crf_spin.setValue(self._state_int(state, "crf", self._crf_spin.value())) self._bitrate_edit.setText(str(state.get("bitrate_kbps") or "5000")) self._size_edit.setText(str(state.get("target_size_mb") or "4000")) self._extra_params.setText(str(state.get("extra_params") or "")) @@ -1109,9 +1566,6 @@ def _apply_video_state(self, state: dict[str, object]) -> None: self._set_combo_data(self._dovi_profile_combo, state.get("dovi_profile")) self._tonemap_cb.setChecked(bool(state.get("tonemap_to_sdr"))) self._set_combo_data(self._tonemap_algo, state.get("tonemap_algorithm")) - self._video_lang_edit.blockSignals(True) - self._video_lang_edit.setText(str(state.get("language") or "")) - self._video_lang_edit.blockSignals(False) finally: self._loading_video_settings = False @@ -1124,12 +1578,16 @@ def _current_video_settings(self) -> VideoEncodeSettings: video_source = self._file_info.path if self._file_info is not None else None stream_index = 0 track_entry_id = self._current_video_entry_id + selected_file_info: FileInfo | None = None + selected_track: TrackEntry | None = None row = self._video_list.currentRow() if hasattr(self, "_video_list") else -1 if 0 <= row < len(self._video_tracks): file_info, track, _color = self._video_tracks[row] video_source = file_info.path stream_index = int(track.mkv_tid) track_entry_id = self._video_entry_id(track) + selected_file_info = file_info + selected_track = track codec = self._codec_combo.currentData() or "libx265" mode = self._mode_combo.currentData() or QualityMode.CRF preset = self._preset_combo.currentData() or "slow" @@ -1141,6 +1599,21 @@ def _current_video_settings(self) -> VideoEncodeSettings: size = int(self._size_edit.text()) except ValueError: size = 4000 + force_8bit = bool( + selected_file_info is not None + and selected_track is not None + and self._video_force_8bit_for_codec(selected_file_info, selected_track, str(codec)) + ) + source_hdr = ( + self._hdr_type_for_entry(selected_file_info, selected_track) + if selected_file_info is not None and selected_track is not None + else HDRType.NONE + ) + copy_dv, copy_hdr10plus = self._effective_dynamic_hdr_flags( + self._current_video_state(), + source_hdr=source_hdr, + target_codec=str(codec), + ) return VideoEncodeSettings( stream_index=stream_index, source_path=video_source, @@ -1152,13 +1625,115 @@ def _current_video_settings(self) -> VideoEncodeSettings: target_size_mb=size, preset=preset, extra_params=self._extra_params.text().strip(), + force_8bit=force_8bit, inject_hdr_meta=self._inject_hdr_cb.isChecked(), master_display=self._master_display.text().strip(), max_cll=self._max_cll.text().strip(), + copy_dv=copy_dv, + copy_hdr10plus=copy_hdr10plus, + dovi_profile=self._dovi_profile_combo.currentData() or "0", tonemap_to_sdr=self._tonemap_cb.isChecked(), tonemap_algorithm=self._tonemap_algo.currentData() or "hable", ) + def _video_settings_from_state( + self, + *, + file_info: FileInfo, + track: TrackEntry, + state: dict[str, object], + ) -> VideoEncodeSettings: + mode = state.get("quality_mode") or QualityMode.CRF + if not isinstance(mode, QualityMode): + mode = QualityMode(str(mode)) + bitrate = self._state_int(state, "bitrate_kbps", 5000) + size = self._state_int(state, "target_size_mb", 4000) + codec = str(state.get("codec") or "libx265") + source_hdr = self._hdr_type_for_entry(file_info, track) + copy_dv, copy_hdr10plus = self._effective_dynamic_hdr_flags( + state, + source_hdr=source_hdr, + target_codec=codec, + ) + return VideoEncodeSettings( + stream_index=int(track.mkv_tid), + source_path=file_info.path, + track_entry_id=self._video_entry_id(track), + codec=codec, + quality_mode=mode, + crf=self._state_int(state, "crf", 18), + bitrate_kbps=bitrate, + target_size_mb=size, + preset=str(state.get("preset") or "slow"), + extra_params=str(state.get("extra_params") or "").strip(), + force_8bit=self._video_force_8bit_for_codec(file_info, track, codec), + inject_hdr_meta=bool(state.get("inject_hdr_meta")), + master_display=str(state.get("master_display") or "").strip(), + max_cll=str(state.get("max_cll") or "").strip(), + copy_dv=copy_dv, + copy_hdr10plus=copy_hdr10plus, + dovi_profile=str(state.get("dovi_profile") or "0"), + tonemap_to_sdr=bool(state.get("tonemap_to_sdr")), + tonemap_algorithm=str(state.get("tonemap_algorithm") or "hable"), + ) + + @staticmethod + def _state_int(state: dict[str, object], key: str, default: int) -> int: + value = state.get(key, default) + if isinstance(value, bool): + return int(value) + if isinstance(value, int): + return value + if isinstance(value, float): + return int(value) + if isinstance(value, str): + try: + return int(value.strip()) + except ValueError: + return default + return default + + def _current_video_settings_list(self) -> list[VideoEncodeSettings]: + self._save_current_video_state() + settings: list[VideoEncodeSettings] = [] + template_state = ( + self._video_settings_by_entry_id.get(self._current_video_entry_id) + if self._current_video_entry_id is not None + else None + ) + if template_state is None and self._video_settings_by_entry_id: + template_state = next(iter(self._video_settings_by_entry_id.values())) + if template_state is None and hasattr(self, "_codec_combo") and hasattr(self, "_copy_dv_cb"): + template_state = self._current_video_state() + + for file_info, track, _color in self._video_tracks: + entry_id = self._video_entry_id(track) + state = self._video_settings_by_entry_id.get(entry_id) + if state is None: + if entry_id == self._current_video_entry_id: + settings.append(self._current_video_settings()) + continue + if self._should_propagate_global_state(template_state): + state = self._normalized_video_state_for_track( + info=file_info, + track=track, + state=template_state or {}, + ) + else: + state = self._default_video_state_for_track( + info=file_info, + track=track, + ) + self._video_settings_by_entry_id[entry_id] = self._copy_video_state(state) + settings.append( + self._video_settings_from_state( + file_info=file_info, + track=track, + state=state, + ) + ) + return settings + def set_output_provider(self, provider: Callable[[], "Path | None"]) -> None: """ Fournit un callable qui retourne le chemin de sortie courant (depuis RemuxPanel). @@ -1207,23 +1782,25 @@ def _current_config(self) -> EncodeConfig | None: output = self._output_provider() if output is None: return None - video_lang = self._video_lang_edit.text().strip() - track_meta_edits = [TrackMetaEdit(track_order=1, language=video_lang)] if video_lang else [] + video_tracks = self._current_video_settings_list() + if not video_tracks: + return None + primary_source = video_tracks[0].source_path or self._file_info.path return EncodeConfig( - source=self._file_info.path, + source=primary_source, output=output, - video=self._current_video_settings(), + video=video_tracks[0], + video_tracks=video_tracks, audio_tracks=self._audio_table.current_audio_settings(), copy_subtitles=True, duration_s=self._duration_s, - copy_dv=self._copy_dv_cb.isChecked(), - copy_hdr10plus=self._copy_hdr10plus_cb.isChecked(), - dovi_profile=self._dovi_profile_combo.currentData() or "0", + copy_dv=video_tracks[0].copy_dv, + copy_hdr10plus=video_tracks[0].copy_hdr10plus, + dovi_profile=video_tracks[0].dovi_profile, work_dir=self._config.work_dir, file_title=self._file_title_provider(), extra_attachments=self._extra_attachments_provider(), tmdb_cover=self._tmdb_cover_provider(), - track_meta_edits=track_meta_edits, tag_overrides=self._tag_overrides_provider(), chapter_overrides=self._chapters_provider(), ) @@ -1276,6 +1853,7 @@ def _is_original_audio_source(track_tuple: tuple) -> bool: def refresh_runtime_settings(self) -> None: self._audio_table.refresh_runtime_settings() self._workflow.set_ffmpeg_threads(self._config.ffmpeg_threads) + self._workflow.set_max_parallel_video_encodes(self._config.max_parallel_video_encodes) self._workflow.set_mediainfo_bin(self._config.tool_mediainfo) self._workflow.set_generate_nfo(self._config.generate_nfo) self._rebuild_preview() diff --git a/ui/panels/encode_panel/widgets.py b/ui/panels/encode_panel/widgets.py index a579215..be29210 100644 --- a/ui/panels/encode_panel/widgets.py +++ b/ui/panels/encode_panel/widgets.py @@ -343,8 +343,9 @@ def set_file_info(self, info: FileInfo) -> None: parts = [info.size_human, info.duration_human, info.format] if info.primary_video: parts.append(info.primary_video.resolution) - if info.hdr_type.label() != "SDR": - parts.append(info.hdr_type.label()) + hdr_lbl = info.primary_video.hdr_label + if hdr_lbl != "SDR": + parts.append(hdr_lbl) self._info_lbl.setText(" ".join(p for p in parts if p != "?")) self.setStyleSheet(f"QFrame{{background:{_C.BG_CARD};" f"border:1px solid {_C.BORDER_LT};border-radius:{_scale(8)}px;}}") diff --git a/ui/panels/merge_dovi_panel.py b/ui/panels/merge_dovi_panel.py index 0798962..56ce2ce 100644 --- a/ui/panels/merge_dovi_panel.py +++ b/ui/panels/merge_dovi_panel.py @@ -20,6 +20,7 @@ import subprocess from concurrent.futures import ThreadPoolExecutor from pathlib import Path +from typing import Any, Protocol from PySide6.QtCore import Qt, Signal, QTimer from PySide6.QtGui import QDesktopServices @@ -40,6 +41,10 @@ from ui.design_system import colors as _C, font_px as _font_px, scale as _scale +class _StringSignal(Protocol): + def emit(self, *args: Any) -> None: ... + + # ============================================================================= # Helpers de style # ============================================================================= @@ -189,7 +194,7 @@ def _make_file_row( label: str, dialog_title: str, attr: str, - signal: Signal, + signal: _StringSignal, ) -> QWidget: card = _card() card.setAcceptDrops(True) @@ -385,6 +390,8 @@ class _ConfigSection(QWidget): def __init__(self, config: AppConfig, parent: QWidget | None = None) -> None: super().__init__(parent) self._config = config + self._work_input: QLineEdit + self._output_input: QLineEdit self._build_ui() def _build_ui(self) -> None: diff --git a/ui/panels/remux_panel/functions/config_builder.py b/ui/panels/remux_panel/functions/config_builder.py index 8659d16..9b96fa4 100644 --- a/ui/panels/remux_panel/functions/config_builder.py +++ b/ui/panels/remux_panel/functions/config_builder.py @@ -49,7 +49,7 @@ def current_config(panel: "RemuxPanel") -> RemuxConfig | None: if not sources: return None - track_order = [ + track_order: list[tuple[int, int] | tuple[int, int, str]] = [ (id_to_index[t.file_id], t.mkv_tid, t.entry_id) for t in all_tracks if t.enabled and t.file_id in id_to_index diff --git a/ui/panels/remux_panel/functions/inspection.py b/ui/panels/remux_panel/functions/inspection.py index 6ed508d..6906b80 100644 --- a/ui/panels/remux_panel/functions/inspection.py +++ b/ui/panels/remux_panel/functions/inspection.py @@ -53,6 +53,7 @@ def inspect_file(panel: "RemuxPanel", file_id: str, path: Path) -> None: inspector = FileInspector( ffprobe_bin=panel._config.tool_ffprobe, mediainfo_bin=panel._config.tool_mediainfo, + verbose_output=lambda line: panel.tool_output.emit("inspector", line), ) info = inspector.inspect(path) panel._inspection_done.emit(file_id, info) diff --git a/ui/panels/remux_panel/functions/signals.py b/ui/panels/remux_panel/functions/signals.py index 5e8f2ac..53c0976 100644 --- a/ui/panels/remux_panel/functions/signals.py +++ b/ui/panels/remux_panel/functions/signals.py @@ -34,7 +34,9 @@ def emit_video_tracks(panel: "RemuxPanel") -> None: if file_info is None: continue color = panel._source_colors.get(entry.file_id, _C.BORDER) - video_tuples.append((file_info, entry, color)) + # EncodePanel reçoit une copie dédiée pour éviter tout couplage objet + # entre pistes ou avec l'état interne du tableau remux. + video_tuples.append((file_info, dc_replace(entry), color)) panel.video_tracks_changed.emit(video_tuples) diff --git a/ui/panels/remux_panel/panel.py b/ui/panels/remux_panel/panel.py index 449b713..8db3b31 100644 --- a/ui/panels/remux_panel/panel.py +++ b/ui/panels/remux_panel/panel.py @@ -61,12 +61,14 @@ class RemuxPanel(QWidget): Signaux : log_message(level: str, message: str) + tool_output(label: str, line: str) video_tracks_changed(list) — pistes vidéo activées (FileInfo, TrackEntry, couleur) audio_tracks_changed(list) — pistes audio activées (AudioTrack, couleur, Path source) ready_changed(bool) — True quand au moins un fichier est inspecté """ log_message = Signal(str, str) + tool_output = Signal(str, str) _inspection_done = Signal(str, object) _inspection_error = Signal(str, str) @@ -640,6 +642,41 @@ def update_audio_track_encoding(self, entry_id, codec: str, bitrate_kbps: int) - self._emit_audio_tracks() return + def update_video_track_encoding(self, plans) -> None: + plan_map: dict[str, str] = {} + for plan in plans or []: + entry_id = str(getattr(plan, "track_entry_id", "") or "").strip() + if not entry_id: + continue + target_codec = str(getattr(plan, "target_codec", "") or "").strip().lower() + if not target_codec: + summary = str(getattr(plan, "codec_summary", "") or "").strip() + if summary.lower() == "copy": + target_codec = "copy" + else: + target_codec = summary.split(" - ", 1)[0].strip().lower() + plan_map[entry_id] = target_codec + + table_changed = self._track_table.update_video_encoding_plans(plan_map, clear_missing=True) + model_changed = False + for source in self._source_files: + for entry in source.tracks: + if entry.track_type != "video": + continue + target_codec = plan_map.get(entry.entry_id, "") + modified = bool(target_codec and target_codec != "copy") + if entry.encode_plan_codec == target_codec and entry.encode_plan_modified == modified: + continue + entry.encode_plan_codec = target_codec + entry.encode_plan_summary = "" + entry.encode_plan_hdr_badges = () + entry.encode_plan_modified = modified + model_changed = True + + if table_changed or model_changed: + self._rebuild_preview() + self._emit_video_tracks() + def current_output_path(self) -> Path | None: text = self._output_edit.text().strip() return Path(text) if text else None diff --git a/ui/panels/remux_panel/widgets/file_list.py b/ui/panels/remux_panel/widgets/file_list.py index e9a4ee3..a5dd399 100644 --- a/ui/panels/remux_panel/widgets/file_list.py +++ b/ui/panels/remux_panel/widgets/file_list.py @@ -105,8 +105,9 @@ def set_info(self, info: FileInfo) -> None: parts = [info.size_human, info.duration_human, info.format] if info.primary_video: parts.append(info.primary_video.resolution) - if info.primary_video.hdr_type.label() != "SDR": - parts.append(info.primary_video.hdr_type.label()) + hdr_lbl = info.primary_video.hdr_label + if hdr_lbl != "SDR": + parts.append(hdr_lbl) self._info_lbl.setText(" · ".join(p for p in parts if p and p != "?")) self._info_lbl.setStyleSheet(f""" color: {_C.TEXT_SEC}; diff --git a/ui/panels/remux_panel/widgets/track_table.py b/ui/panels/remux_panel/widgets/track_table.py index 0d241fd..77f8155 100644 --- a/ui/panels/remux_panel/widgets/track_table.py +++ b/ui/panels/remux_panel/widgets/track_table.py @@ -29,7 +29,6 @@ from ui.panels.remux_panel.theme import _C, _pencil_icon from ui.panels.track_edit_dialog import TrackEditDialog - class _TrackInfoDelegate(QStyledItemDelegate): @staticmethod def _offset_color(offset_value: str) -> QColor: @@ -94,7 +93,6 @@ def paint(self, painter: QPainter, option: QStyleOptionViewItem, index) -> None: painter.drawText(x, baseline, suffix) painter.restore() - class _TrackTable(QTableWidget): order_changed = Signal() extract_requested = Signal(object) # TrackEntry @@ -103,6 +101,25 @@ class _TrackTable(QTableWidget): _MAX_VISIBLE_ROWS = 15 _ROW_H_DEFAULT = 28 _NEW_TRACK_COLOR = QColor(_C.ERROR) + _VIDEO_ENCODE_COLOR = QColor(_C.ACCENT) + _VIDEO_ENCODE_CODEC_COLOR = QColor(_C.ERROR) + _VIDEO_CODEC_SHORT_LABELS: dict[str, str] = { + "libx265": "HEVC", + "hevc_nvenc": "HEVC", + "hevc_amf": "HEVC", + "hevc_qsv": "HEVC", + "hevc_vaapi": "HEVC", + "libx264": "H264", + "h264_nvenc": "H264", + "h264_amf": "H264", + "h264_qsv": "H264", + "h264_vaapi": "H264", + "libsvtav1": "AV1", + "av1_nvenc": "AV1", + "av1_amf": "AV1", + "av1_qsv": "AV1", + "av1_vaapi": "AV1", + } COL_SOURCE = 0 COL_CHECK = 1 @@ -359,6 +376,7 @@ def _fill_row(self, row: int, entry: TrackEntry, source_color: str) -> None: if entry.is_new: self._apply_new_track_style(row) + self._apply_video_encode_style(row, entry) edit_btn = QPushButton() from PySide6.QtCore import QSize @@ -396,6 +414,48 @@ def _apply_new_track_style(self, row: int) -> None: font.setBold(True) item.setFont(font) + def _apply_video_encode_style(self, row: int, entry: TrackEntry) -> None: + codec_item = self.item(row, self.COL_CODEC) + if entry.track_type != "video": + if codec_item is not None: + codec_item.setText(entry.codec) + return + + override_codec = str(entry.encode_plan_codec or "").strip().lower() + has_encode_override = bool(override_codec and override_codec != "copy") + display_codec = entry.orig_codec or entry.codec + if has_encode_override: + display_codec = self._VIDEO_CODEC_SHORT_LABELS.get( + override_codec, + override_codec.upper(), + ) + if codec_item is not None: + codec_item.setText(display_codec) + + highlight = has_encode_override + color = self._VIDEO_ENCODE_COLOR if highlight else None + columns = (self.COL_TYPE, self.COL_CODEC, self.COL_LANG, self.COL_TITLE, self.COL_INFO) + for col in columns: + item = self.item(row, col) + if item is None: + continue + font = item.font() + font.setBold(highlight) + item.setFont(font) + if color is None: + if col == self.COL_TYPE: + item.setForeground(QColor(_C.TRACK_VIDEO)) + elif col == self.COL_INFO: + item.setForeground(QColor(_C.TEXT_SEC)) + else: + item.setForeground(QColor(_C.TEXT_PRI)) + else: + # Ligne encodée mise en avant en bleu, mais codec explicite en rouge. + if col == self.COL_CODEC: + item.setForeground(self._VIDEO_ENCODE_CODEC_COLOR) + else: + item.setForeground(color) + def current_tracks(self) -> list[TrackEntry]: tracks: list[TrackEntry] = [] for row in range(self.rowCount()): @@ -507,6 +567,38 @@ def update_audio_encoding( self.blockSignals(False) return False + def update_video_encoding_plans( + self, + plans: dict[str, str], + *, + clear_missing: bool = False, + ) -> bool: + changed = False + self.blockSignals(True) + try: + for row in range(self.rowCount()): + item0 = self.item(row, self.COL_CHECK) + if item0 is None: + continue + entry = item0.data(Qt.ItemDataRole.UserRole) + if not isinstance(entry, TrackEntry) or entry.track_type != "video": + continue + target_codec = str(plans.get(entry.entry_id, "") or "").strip().lower() + if not target_codec and not clear_missing: + continue + modified = bool(target_codec and target_codec != "copy") + if entry.encode_plan_codec == target_codec and entry.encode_plan_modified == modified: + continue + entry.encode_plan_codec = target_codec + entry.encode_plan_summary = "" + entry.encode_plan_hdr_badges = () + entry.encode_plan_modified = modified + self._apply_video_encode_style(row, entry) + changed = True + finally: + self.blockSignals(False) + return changed + def _on_item_changed(self, item: QTableWidgetItem) -> None: if item.column() != self.COL_LANG: return diff --git a/ui/panels/settings_panel.py b/ui/panels/settings_panel.py index 19d43a1..259e504 100644 --- a/ui/panels/settings_panel.py +++ b/ui/panels/settings_panel.py @@ -86,6 +86,7 @@ def __init__(self, config: AppConfig, parent: QWidget | None = None) -> None: self._status_label: QLabel | None = None self._build_ui() self._load_from_config() + self._sync_file_logging_level_state() apply_translations(self) def widget_for(self, section: str, key: str) -> QWidget: @@ -198,6 +199,8 @@ def _build_field_widget(self, section: str, field: dict[str, Any]) -> QWidget: checkbox = QCheckBox(field["label"]) checkbox.setObjectName(f"{section}.{field['key']}") checkbox.setStyleSheet(_checkbox_style()) + if section == "ui" and field["key"] == "enable_file_logging": + checkbox.toggled.connect(self._sync_file_logging_level_state) layout.addWidget(checkbox) self._field_widgets[(section, field["key"])] = checkbox else: @@ -366,6 +369,13 @@ def _field_value(self, section: str, field: dict[str, Any]) -> str: return str(data if data is not None else widget.currentText()) raise TypeError(f"Unsupported widget type: {type(widget)!r}") + def _sync_file_logging_level_state(self) -> None: + checkbox = self._field_widgets.get(("ui", "enable_file_logging")) + combo = self._field_widgets.get(("ui", "file_logging_level")) + if not isinstance(checkbox, QCheckBox) or not isinstance(combo, QComboBox): + return + combo.setEnabled(checkbox.isChecked()) + def _load_from_config(self) -> None: for group in INI_FIELD_GROUPS: section = group["section"] @@ -400,6 +410,7 @@ def _load_from_config(self) -> None: if index >= 0: widget.setCurrentIndex(index) + self._sync_file_logging_level_state() if self._status_label is not None: self._status_label.clear() @@ -424,6 +435,24 @@ def _collect_section_values(self, section: str) -> dict[str, dict[str, str]]: } return {section: {}} + def _validate_values(self, values: dict[str, dict[str, str]]) -> bool: + ui_values = values.get("ui", {}) + verbose_enabled = str(ui_values.get("enable_file_logging", "false")).strip().lower() == "true" + verbose_dir = str(ui_values.get("verbose_log_dir", "")).strip() + if verbose_enabled and not verbose_dir: + message = translate_text( + "Le dossier des logs fichier doit être renseigné quand l'option est activée." + ) + if self._status_label is not None: + self._status_label.setText(message) + QMessageBox.warning( + self, + translate_text("Réglage invalide"), + message, + ) + return False + return True + def _browse_for_field(self, section: str, field: dict[str, Any]) -> None: widget = self._field_widgets[(section, field["key"])] if not isinstance(widget, QLineEdit): @@ -447,7 +476,10 @@ def _on_reload_clicked(self) -> None: self._status_label.setText(translate_text("Configuration rechargée depuis config.ini.")) def _on_save_clicked(self) -> None: - write_ini_settings(self._collect_values()) + values = self._collect_values() + if not self._validate_values(values): + return + write_ini_settings(values) self._config.reload() self._config.save() if self._status_label is not None: @@ -459,7 +491,10 @@ def _on_save_clicked(self) -> None: self.settings_saved.emit() def _on_save_section(self, section: str, section_title: str) -> None: - write_ini_settings(self._collect_section_values(section)) + values = self._collect_section_values(section) + if not self._validate_values(values): + return + write_ini_settings(values) self._config.reload() self._config.save() if self._status_label is not None: