VideoKonverter v2.2.0 - Initial Commit
Kompletter Video-Konverter mit Web-UI, GPU-Beschleunigung (Intel VAAPI), Video-Bibliothek mit Serien/Film-Erkennung und TVDB-Integration. Features: - AV1/HEVC/H.264 Encoding (GPU + CPU) - Video-Bibliothek mit ffprobe-Analyse und Filtern - TVDB-Integration mit Review-Modal und Sprachkonfiguration - Film-Scanning und TVDB-Zuordnung - Import- und Clean-Service (Grundgeruest) - WebSocket Live-Updates, Queue-Management - Docker mit GPU/CPU-Profilen Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
1a04f99097
commit
08dcf34f5d
41 changed files with 12902 additions and 154 deletions
174
.gitignore
vendored
174
.gitignore
vendored
|
|
@ -1,164 +1,32 @@
|
||||||
# ---> Python
|
# Python
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
*.egg-info/
|
||||||
.installed.cfg
|
.venv/
|
||||||
*.egg
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
.pybuilder/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
# .python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# poetry
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
||||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
||||||
# commonly ignored for libraries.
|
|
||||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
||||||
#poetry.lock
|
|
||||||
|
|
||||||
# pdm
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
||||||
#pdm.lock
|
|
||||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
||||||
# in version control.
|
|
||||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
|
||||||
.pdm.toml
|
|
||||||
.pdm-python
|
|
||||||
.pdm-build/
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.env
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
venv/
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
# Logs
|
||||||
.spyderproject
|
logs/
|
||||||
.spyproject
|
*.log
|
||||||
|
|
||||||
# Rope project settings
|
# Laufzeit-Daten
|
||||||
.ropeproject
|
data/
|
||||||
|
|
||||||
# mkdocs documentation
|
# Test-Medien
|
||||||
/site
|
testmedia/
|
||||||
|
|
||||||
# mypy
|
# IDE
|
||||||
.mypy_cache/
|
.idea/
|
||||||
.dmypy.json
|
.vscode/
|
||||||
dmypy.json
|
*.swp
|
||||||
|
*.swo
|
||||||
|
|
||||||
# Pyre type checker
|
# Docker
|
||||||
.pyre/
|
.docker/
|
||||||
|
|
||||||
# pytype static type analyzer
|
# OS
|
||||||
.pytype/
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
# PyCharm
|
|
||||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
||||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
||||||
#.idea/
|
|
||||||
|
|
||||||
|
# Secrets - NICHT einchecken wenn individuelle Passwoerter gesetzt
|
||||||
|
# app/cfg/settings.yaml wird eingecheckt (Template-Werte)
|
||||||
|
|
|
||||||
218
CHANGELOG.md
Normal file
218
CHANGELOG.md
Normal file
|
|
@ -0,0 +1,218 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
Alle relevanten Aenderungen am VideoKonverter-Projekt.
|
||||||
|
|
||||||
|
## [2.2.0] - 2026-02-21
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
**TVDB Review-Modal nicht klickbar**
|
||||||
|
- `JSON.stringify()` erzeugte doppelte Anfuehrungszeichen in HTML onclick-Attributen
|
||||||
|
- Neue `escapeAttr()`-Funktion ersetzt `"` durch `"` fuer sichere HTML-Attribute
|
||||||
|
- 4 Stellen in `library.js` korrigiert (Serien, Filme, Review-Liste, manuelle Suche)
|
||||||
|
|
||||||
|
**Film-TVDB-Suche liefert keine Ergebnisse**
|
||||||
|
- Filmtitel mit fuehrenden Nummern (z.B. "10 Logan The Wolverine") fanden nichts auf TVDB
|
||||||
|
- Neue `cleanSearchTitle()`-Funktion entfernt fuehrende Nummern und Aufloesungs-Suffixe
|
||||||
|
- Angewendet in `openMovieTvdbModal()` und `openTvdbModal()`
|
||||||
|
|
||||||
|
**Auto-Match Progress-Variable nicht im Scope**
|
||||||
|
- `pollAutoMatchStatus()` referenzierte `progress` aus `startAutoMatch()`-Scope
|
||||||
|
- Variable wird jetzt lokal in `pollAutoMatchStatus()` definiert
|
||||||
|
|
||||||
|
**TVDB-Sprache wurde nicht gespeichert**
|
||||||
|
- `pages.py` HTMX-Save-Handler fehlte `tvdb_language` Feld
|
||||||
|
- Hinzugefuegt: `settings["library"]["tvdb_language"] = data.get("tvdb_language", "deu")`
|
||||||
|
|
||||||
|
### Geaenderte Dateien
|
||||||
|
- `app/static/js/library.js` - +escapeAttr(), +cleanSearchTitle(), 4x Attribut-Escaping, Progress-Fix
|
||||||
|
- `app/routes/pages.py` - tvdb_language in Settings-Save
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [2.1.0] - 2026-02-21
|
||||||
|
|
||||||
|
### TVDB-Sprachkonfiguration
|
||||||
|
|
||||||
|
Alle TVDB-Metadaten (Serien-Titel, Beschreibungen, Episoden-Namen) werden jetzt
|
||||||
|
in der konfigurierten Sprache abgerufen statt immer Englisch.
|
||||||
|
|
||||||
|
#### Neue Features
|
||||||
|
- **Sprach-Dropdown** in Admin-UI (Deutsch, Englisch, Franzoesisch, Spanisch, Italienisch, Japanisch)
|
||||||
|
- **API-Endpoints**: `GET/PUT /api/tvdb/language` zum Lesen/Aendern der Sprache
|
||||||
|
- **Episoden-Refresh**: `POST /api/library/tvdb-refresh-episodes` aktualisiert alle gecachten Episoden
|
||||||
|
- Alle TVDB-API-Aufrufe nutzen `self._language` Property
|
||||||
|
|
||||||
|
#### TVDB Review-Modal (Neues Feature)
|
||||||
|
- Statt blindem Auto-Match werden jetzt **Vorschlaege gesammelt** und zur Pruefung angezeigt
|
||||||
|
- `collect_suggestions()` in tvdb.py: Top 3 TVDB-Treffer pro ungematchter Serie/Film
|
||||||
|
- Review-Modal: Poster-Vorschau, Beschreibung, Jahr, Einzelbestaetigung
|
||||||
|
- Manuelle TVDB-Suche falls Vorschlaege nicht passen
|
||||||
|
- Polling-basierter Fortschritt waehrend der Analyse
|
||||||
|
|
||||||
|
#### Film-Scanning (Neues Feature)
|
||||||
|
- Bibliothek unterstuetzt jetzt **Filme** neben Serien
|
||||||
|
- Neue DB-Tabelle `library_movies` fuer Film-Metadaten
|
||||||
|
- Film-Erkennung: Ein Video pro Ordner = Film, Ordnername = Filmtitel
|
||||||
|
- Film-Grid in der Bibliothek-UI mit Poster, Titel, Jahr
|
||||||
|
- TVDB-Zuordnung fuer Filme (Suche + manuelle Zuordnung)
|
||||||
|
|
||||||
|
#### Import- und Clean-Service (Grundgeruest)
|
||||||
|
- `app/services/importer.py` (734 Z.) - Import-Logik mit Serien-Erkennung + TVDB-Lookup
|
||||||
|
- `app/services/cleaner.py` (155 Z.) - Junk-Scan + Loeschen von Nicht-Video-Dateien
|
||||||
|
|
||||||
|
#### Geaenderte Dateien
|
||||||
|
- `app/services/tvdb.py` - _language Property, lokalisierte Suche, collect_suggestions() (298 -> 1005 Z.)
|
||||||
|
- `app/services/library.py` - Film-Scan, _ensure_movie, _add_video_to_db (1082 -> 1747 Z.)
|
||||||
|
- `app/routes/library_api.py` - TVDB-Language-Endpoints, Confirm-Endpoint, Film-Endpoints (260 -> 998 Z.)
|
||||||
|
- `app/static/js/library.js` - Review-Modal, Film-Grid, Auto-Match-Polling (587 -> 1912 Z.)
|
||||||
|
- `app/static/css/style.css` - Review-Modal CSS, Film-Grid CSS (889 -> 1554 Z.)
|
||||||
|
- `app/templates/library.html` - Review-Modal HTML, Film-TVDB-Modal (330 -> 392 Z.)
|
||||||
|
- `app/templates/admin.html` - TVDB-Sprach-Dropdown (330 -> 342 Z.)
|
||||||
|
- `app/server.py` - CleanerService + ImporterService Integration
|
||||||
|
- `app/cfg/settings.yaml` - tvdb_language, import-Settings, cleanup keep_extensions
|
||||||
|
|
||||||
|
#### Neue API-Endpoints
|
||||||
|
- `GET /api/tvdb/language` - Aktuelle TVDB-Sprache
|
||||||
|
- `PUT /api/tvdb/language` - TVDB-Sprache aendern
|
||||||
|
- `POST /api/library/tvdb-refresh-episodes` - Alle Episoden-Caches aktualisieren
|
||||||
|
- `POST /api/library/tvdb-auto-match` - Review-Vorschlaege sammeln
|
||||||
|
- `POST /api/library/tvdb-confirm` - Einzelnen TVDB-Match bestaetigen
|
||||||
|
- `GET /api/library/movies` - Filme auflisten
|
||||||
|
- `POST /api/library/movies/{id}/tvdb-match` - Film-TVDB-Zuordnung
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [2.0.0] - 2026-02-20
|
||||||
|
|
||||||
|
### Video-Bibliothek (Neues Feature)
|
||||||
|
|
||||||
|
Komplette Video-Bibliotheksverwaltung mit Serien-Erkennung, ffprobe-Analyse,
|
||||||
|
TVDB-Integration und umfangreichen Filterfunktionen.
|
||||||
|
|
||||||
|
#### Neue Dateien
|
||||||
|
- `app/services/library.py` - LibraryService (Scan, DB, Filter, Duplikate)
|
||||||
|
- `app/services/tvdb.py` - TVDBService (Auth, Suche, Episoden-Abgleich)
|
||||||
|
- `app/routes/library_api.py` - REST API Endpoints fuer Bibliothek
|
||||||
|
- `app/templates/library.html` - Bibliothek-Hauptseite mit Filter-Sidebar
|
||||||
|
- `app/static/js/library.js` - Bibliothek-Frontend (Filter, TVDB, Scan-Progress)
|
||||||
|
|
||||||
|
#### Geaenderte Dateien
|
||||||
|
- `app/server.py` - LibraryService + TVDBService Integration
|
||||||
|
- `app/routes/pages.py` - Route /library + TVDB-Settings speichern
|
||||||
|
- `app/templates/base.html` - Nav-Link "Bibliothek" hinzugefuegt
|
||||||
|
- `app/templates/admin.html` - TVDB-Settings + Scan-Pfad-Verwaltung
|
||||||
|
- `app/static/css/style.css` - Library-Styles (~250 Zeilen ergaenzt)
|
||||||
|
- `app/cfg/settings.yaml` - library-Sektion (enabled, tvdb_api_key, tvdb_pin)
|
||||||
|
- `requirements.txt` - tvdb-v4-official>=1.1.0 hinzugefuegt
|
||||||
|
|
||||||
|
#### Datenbank
|
||||||
|
Vier neue Tabellen (werden automatisch beim Start erstellt):
|
||||||
|
- `library_paths` - Konfigurierbare Scan-Pfade (Serien/Filme)
|
||||||
|
- `library_series` - Erkannte Serien mit TVDB-Verknuepfung
|
||||||
|
- `library_videos` - Videos mit vollstaendigen ffprobe-Metadaten
|
||||||
|
- `tvdb_episode_cache` - TVDB-Episoden-Cache
|
||||||
|
|
||||||
|
#### Features im Detail
|
||||||
|
|
||||||
|
**Ordner-Scan**
|
||||||
|
- Konfigurierbare Scan-Pfade fuer Serien und Filme getrennt
|
||||||
|
- Serien-Erkennung via Ordnerstruktur (S01E01, 1x02, Season/Staffel XX)
|
||||||
|
- ffprobe-Analyse: Codec, Aufloesung, Bitrate, Audio-Spuren, Untertitel, HDR
|
||||||
|
- Versteckte Ordner (.Trash-*) werden automatisch uebersprungen
|
||||||
|
- UPSERT-Logik: unveraenderte Dateien werden nicht erneut analysiert
|
||||||
|
- Scan-Fortschritt via WebSocket + Polling im UI
|
||||||
|
- Verwaiste DB-Eintraege werden nach Scan automatisch bereinigt
|
||||||
|
|
||||||
|
**Filter-System**
|
||||||
|
- Video-Codec: AV1, HEVC, H.264, MPEG-4
|
||||||
|
- Aufloesung: 4K, 1080p, 720p, SD
|
||||||
|
- Container: MKV, MP4, AVI, WebM, TS, WMV
|
||||||
|
- Audio: Sprache (Deutsch, Englisch), Kanaele (Stereo, 5.1, 7.1)
|
||||||
|
- 10-Bit Filter
|
||||||
|
- Freitext-Suche im Dateinamen
|
||||||
|
- Sortierung nach Name, Groesse, Aufloesung, Dauer, Codec, Datum
|
||||||
|
- Pagination (50 Videos pro Seite)
|
||||||
|
|
||||||
|
**TVDB-Integration**
|
||||||
|
- Authentifizierung via API Key + optionalem PIN
|
||||||
|
- Serien-Suche mit Ergebnis-Vorschau (Poster, Beschreibung, Jahr)
|
||||||
|
- Episoden-Abgleich: Soll (TVDB) vs. Ist (lokal) = fehlende Episoden
|
||||||
|
- Poster-URLs, Beschreibung, Status (Continuing/Ended)
|
||||||
|
- Episoden-Cache in DB (reduziert API-Aufrufe)
|
||||||
|
|
||||||
|
**Duplikat-Finder**
|
||||||
|
- Erkennt gleiche Episoden in verschiedenen Formaten (z.B. AVI + WebM)
|
||||||
|
- Vergleich ueber Serie + Staffel + Episode
|
||||||
|
- Anzeige mit Codec, Aufloesung, Groesse fuer beide Versionen
|
||||||
|
|
||||||
|
**Direkt-Konvertierung**
|
||||||
|
- "Conv"-Button bei jedem Video in der Bibliothek
|
||||||
|
- Sendet Video direkt an die bestehende Konvertierungs-Queue
|
||||||
|
- Optionale Preset-Auswahl
|
||||||
|
|
||||||
|
**Admin-UI Erweiterung**
|
||||||
|
- TVDB API Key + PIN Eingabefelder
|
||||||
|
- Scan-Pfad-Verwaltung (hinzufuegen, loeschen, einzeln scannen)
|
||||||
|
- Letzter Scan-Zeitpunkt pro Pfad angezeigt
|
||||||
|
|
||||||
|
**Statistik-Leiste**
|
||||||
|
- Gesamt-Videos, Serien-Anzahl, Speicherbedarf, Gesamtspielzeit
|
||||||
|
- Codec-Verteilung, Aufloesungs-Verteilung
|
||||||
|
|
||||||
|
#### API Endpoints (17 neue)
|
||||||
|
- `GET/POST/DELETE /api/library/paths` - Scan-Pfade CRUD
|
||||||
|
- `POST /api/library/scan` - Komplett-Scan
|
||||||
|
- `POST /api/library/scan/{id}` - Einzel-Scan
|
||||||
|
- `GET /api/library/scan-status` - Scan-Fortschritt
|
||||||
|
- `GET /api/library/videos` - Videos mit Filtern
|
||||||
|
- `GET /api/library/series` - Alle Serien
|
||||||
|
- `GET /api/library/series/{id}` - Serien-Detail mit Episoden
|
||||||
|
- `GET /api/library/series/{id}/missing` - Fehlende Episoden
|
||||||
|
- `POST /api/library/series/{id}/tvdb-match` - TVDB zuordnen
|
||||||
|
- `GET /api/library/duplicates` - Duplikate finden
|
||||||
|
- `POST /api/library/videos/{id}/convert` - Direkt konvertieren
|
||||||
|
- `GET /api/library/stats` - Bibliotheks-Statistiken
|
||||||
|
- `GET /api/tvdb/search?q=` - TVDB-Suche
|
||||||
|
|
||||||
|
#### Erster Scan-Lauf
|
||||||
|
- 80 Serien erkannt, 5.260 Videos analysiert
|
||||||
|
- ~3.7 TiB Gesamtgroesse, ~150 Tage Spielzeit
|
||||||
|
- Codecs: H.264 (2.942), MPEG-4 (2.094), AV1 (199), HEVC (24)
|
||||||
|
- Aufloesungen: SD (4.256), 720p (651), 1080p (300), 4K (52)
|
||||||
|
- 9 Duplikat-Paare gefunden
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [1.0.0] - 2026-02-20
|
||||||
|
|
||||||
|
### Komplett-Neubau
|
||||||
|
|
||||||
|
Vollstaendiger Neubau des VideoKonverter-Servers als moderne
|
||||||
|
Python/aiohttp-Anwendung mit Web-UI.
|
||||||
|
|
||||||
|
#### Kern-Features
|
||||||
|
- **aiohttp-Server** mit Jinja2-Templating und HTMX
|
||||||
|
- **WebSocket** fuer Echtzeit-Fortschritts-Updates
|
||||||
|
- **Queue-System** mit MariaDB-Persistierung und parallelen Jobs
|
||||||
|
- **FFmpeg-Encoding** mit GPU (Intel VAAPI) und CPU Support
|
||||||
|
- **7 Encoding-Presets**: GPU AV1/HEVC/H.264 + CPU SVT-AV1/x265/x264
|
||||||
|
- **Dashboard** mit aktiven Jobs und Queue-Uebersicht
|
||||||
|
- **Admin-UI** fuer Einstellungen, Presets, Encoding-Modus
|
||||||
|
- **Statistik-Seite** mit Konvertierungs-Historie
|
||||||
|
- **File-Browser** zum Auswaehlen von Dateien/Ordnern
|
||||||
|
- **Docker-Support** mit GPU- und CPU-Profilen
|
||||||
|
|
||||||
|
#### Audio-Handling
|
||||||
|
- Alle Spuren behalten (kein Downmix)
|
||||||
|
- Konfigurierbare Sprach-Filter (DE, EN, Undefiniert)
|
||||||
|
- Opus-Transcoding mit bitrate-basiertem Kanalmanagement
|
||||||
|
- Surround-Kanaele (5.1, 7.1) bleiben erhalten
|
||||||
|
|
||||||
|
#### Technische Details
|
||||||
|
- Async/Await durchgaengig (aiohttp, aiomysql, asyncio.subprocess)
|
||||||
|
- MariaDB statt SQLite (Lock-Probleme in Docker behoben)
|
||||||
|
- WebSocket-Broadcast fuer alle verbundenen Clients
|
||||||
|
- Automatische GPU-Erkennung (VAAPI, vainfo)
|
||||||
|
- Konfigurierbare Settings via YAML
|
||||||
|
- Log-Rotation (7 Tage, 10 MiB pro Datei)
|
||||||
39
Dockerfile
Normal file
39
Dockerfile
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
FROM ubuntu:24.04
|
||||||
|
|
||||||
|
# Basis-Pakete + ffmpeg + Intel GPU Treiber
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
ffmpeg \
|
||||||
|
python3 \
|
||||||
|
python3-pip \
|
||||||
|
intel-opencl-icd \
|
||||||
|
intel-media-va-driver-non-free \
|
||||||
|
libva-drm2 \
|
||||||
|
libva2 \
|
||||||
|
libmfx1 \
|
||||||
|
vainfo \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Umgebungsvariablen fuer Intel GPU
|
||||||
|
ENV LIBVA_DRIVER_NAME=iHD
|
||||||
|
ENV LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri
|
||||||
|
|
||||||
|
WORKDIR /opt/video-konverter
|
||||||
|
|
||||||
|
# Python-Abhaengigkeiten
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN pip install --no-cache-dir --break-system-packages -r requirements.txt
|
||||||
|
|
||||||
|
# Anwendung kopieren
|
||||||
|
COPY __main__.py .
|
||||||
|
COPY app/ ./app/
|
||||||
|
|
||||||
|
# Daten- und Log-Verzeichnisse (beschreibbar fuer UID 1000)
|
||||||
|
RUN mkdir -p /opt/video-konverter/data /opt/video-konverter/logs \
|
||||||
|
&& chmod 777 /opt/video-konverter/data /opt/video-konverter/logs
|
||||||
|
|
||||||
|
# Konfiguration und Daten als Volumes
|
||||||
|
VOLUME ["/opt/video-konverter/app/cfg", "/opt/video-konverter/data", "/opt/video-konverter/logs"]
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["python3", "__main__.py"]
|
||||||
288
README.md
288
README.md
|
|
@ -1,2 +1,288 @@
|
||||||
# docker.videokonverter
|
# VideoKonverter
|
||||||
|
|
||||||
|
Web-basierter Video-Konverter mit GPU-Beschleunigung (Intel VAAPI), Video-Bibliotheksverwaltung und TVDB-Integration. Laeuft als Docker-Container auf Unraid oder lokal.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Video-Konvertierung
|
||||||
|
- **GPU-Encoding**: Intel VAAPI (AV1, HEVC, H.264) ueber Intel A380
|
||||||
|
- **CPU-Encoding**: SVT-AV1, x265, x264 als Fallback
|
||||||
|
- **Konfigurierbare Presets**: GPU/CPU, verschiedene Codecs und Qualitaetsstufen
|
||||||
|
- **Parallele Jobs**: Mehrere Videos gleichzeitig konvertieren
|
||||||
|
- **Audio-Handling**: Alle Spuren behalten (DE+EN), kein Downmix, Opus-Transcoding
|
||||||
|
- **Live-Fortschritt**: WebSocket-basierte Echtzeit-Updates im Dashboard
|
||||||
|
- **Queue-Management**: Drag-and-Drop, Pause, Abbruch, Prioritaeten
|
||||||
|
|
||||||
|
### Video-Bibliothek
|
||||||
|
- **Ordner-Scan**: Konfigurierbare Scan-Pfade fuer Serien und Filme
|
||||||
|
- **Serien-Erkennung**: Automatisch via Ordnerstruktur (`S01E01`, `1x02`, `Staffel/Season XX`)
|
||||||
|
- **Film-Erkennung**: Ein Video pro Ordner = Film, Ordnername als Filmtitel
|
||||||
|
- **ffprobe-Analyse**: Codec, Aufloesung, Bitrate, Audio-Spuren, Untertitel, HDR/10-Bit
|
||||||
|
- **TVDB-Integration**: Serien + Filme, Poster, Episoden-Titel, fehlende Episoden
|
||||||
|
- **TVDB Review-Modal**: Vorschlaege pruefen statt blindem Auto-Match, manuelle Suche
|
||||||
|
- **TVDB-Sprachkonfiguration**: Metadaten in Deutsch, Englisch oder anderen Sprachen
|
||||||
|
- **Filter**: Video-Codec, Aufloesung, Container, Audio-Sprache, Kanaele, 10-Bit
|
||||||
|
- **Duplikat-Finder**: Gleiche Episode in verschiedenen Formaten erkennen
|
||||||
|
- **Import-Service**: Videos einsortieren mit Serien-Erkennung und TVDB-Lookup
|
||||||
|
- **Clean-Service**: Nicht-Video-Dateien (NFO, JPG, SRT etc.) finden und entfernen
|
||||||
|
- **Direkt-Konvertierung**: Videos aus der Bibliothek direkt in die Queue senden
|
||||||
|
|
||||||
|
### Administration
|
||||||
|
- **Web-UI**: Responsive Dashboard, Bibliothek, Einstellungen, Statistik
|
||||||
|
- **Settings**: Encoding-Modus, Ziel-Container, Audio-/Untertitel-Sprachen, Cleanup
|
||||||
|
- **TVDB-Verwaltung**: API-Key/PIN konfigurieren, Sprache waehlen, Serien + Filme zuordnen
|
||||||
|
- **Scan-Pfad-Management**: Pfade hinzufuegen/loeschen/scannen ueber Admin-UI
|
||||||
|
- **Statistik**: Konvertierungs-Historie mit Groessen-Ersparnis, Dauer, Codec-Verteilung
|
||||||
|
|
||||||
|
|
||||||
|
## Architektur
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────────────────────────────────────────────────┐
|
||||||
|
│ Browser (Dashboard / Bibliothek / Admin / Stats) │
|
||||||
|
├──────────────┬──────────────┬──────────────────────┤
|
||||||
|
│ HTMX/JS │ WebSocket │ REST API │
|
||||||
|
├──────────────┴──────────────┴──────────────────────┤
|
||||||
|
│ aiohttp Server │
|
||||||
|
│ ┌──────────┐ ┌────────────┐ ┌──────────────────┐ │
|
||||||
|
│ │ Queue │ │ Encoder │ │ LibraryService │ │
|
||||||
|
│ │ Service │ │ Service │ │ (Scan, Filter, │ │
|
||||||
|
│ │ │ │ (ffmpeg) │ │ Serien + Filme) │ │
|
||||||
|
│ ├──────────┤ ├────────────┤ ├──────────────────┤ │
|
||||||
|
│ │ Scanner │ │ Probe │ │ TVDBService │ │
|
||||||
|
│ │ Service │ │ Service │ │ (API v4, i18n) │ │
|
||||||
|
│ ├──────────┤ │ (ffprobe) │ ├──────────────────┤ │
|
||||||
|
│ │ Importer │ └────────────┘ │ CleanerService │ │
|
||||||
|
│ │ Service │ │ (Junk-Scan) │ │
|
||||||
|
│ └──────────┘ └──────────────────┘ │
|
||||||
|
├─────────────────────────────────────────────────────┤
|
||||||
|
│ MariaDB (statistics, library_*, tvdb_episode_cache)│
|
||||||
|
└─────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Technologie-Stack
|
||||||
|
|
||||||
|
| Komponente | Technologie |
|
||||||
|
|------------|-------------|
|
||||||
|
| Backend | Python 3.12, aiohttp (async) |
|
||||||
|
| Templates | Jinja2 + HTMX |
|
||||||
|
| Datenbank | MariaDB (aiomysql) |
|
||||||
|
| Video | FFmpeg + FFprobe |
|
||||||
|
| GPU | Intel VAAPI (iHD-Treiber) |
|
||||||
|
| Metadaten | TVDB API v4 (tvdb-v4-official) |
|
||||||
|
| Container | Docker (Ubuntu 24.04) |
|
||||||
|
| Echtzeit | WebSocket |
|
||||||
|
|
||||||
|
|
||||||
|
## Projektstruktur
|
||||||
|
|
||||||
|
```
|
||||||
|
video-konverter/
|
||||||
|
├── __main__.py # Einstiegspunkt
|
||||||
|
├── Dockerfile # Ubuntu 24.04 + ffmpeg + Intel GPU
|
||||||
|
├── docker-compose.yml # GPU + CPU Profile
|
||||||
|
├── requirements.txt # Python-Abhaengigkeiten
|
||||||
|
├── app/
|
||||||
|
│ ├── server.py # Haupt-Server (aiohttp Application)
|
||||||
|
│ ├── config.py # Settings + Presets laden
|
||||||
|
│ ├── cfg/
|
||||||
|
│ │ ├── settings.yaml # Laufzeit-Einstellungen
|
||||||
|
│ │ └── presets.yaml # Encoding-Presets (7 Stueck)
|
||||||
|
│ ├── models/
|
||||||
|
│ │ ├── media.py # MediaFile, VideoStream, AudioStream
|
||||||
|
│ │ └── job.py # ConvertJob, JobStatus
|
||||||
|
│ ├── services/
|
||||||
|
│ │ ├── library.py # Bibliothek: Scan, Filter, Duplikate (1747 Z.)
|
||||||
|
│ │ ├── tvdb.py # TVDB: Auth, Suche, Episoden, Sprache (1005 Z.)
|
||||||
|
│ │ ├── importer.py # Import: Erkennung, TVDB-Lookup, Kopieren (734 Z.)
|
||||||
|
│ │ ├── cleaner.py # Clean: Junk-Scan, Nicht-Video-Dateien (155 Z.)
|
||||||
|
│ │ ├── queue.py # Job-Queue mit MariaDB-Persistierung (541 Z.)
|
||||||
|
│ │ ├── encoder.py # FFmpeg-Wrapper (GPU + CPU)
|
||||||
|
│ │ ├── probe.py # FFprobe-Analyse
|
||||||
|
│ │ ├── scanner.py # Dateisystem-Scanner
|
||||||
|
│ │ └── progress.py # Encoding-Fortschritt parsen
|
||||||
|
│ ├── routes/
|
||||||
|
│ │ ├── api.py # REST API (Queue, Jobs, Convert)
|
||||||
|
│ │ ├── library_api.py # REST API (Bibliothek, TVDB, Scan) (998 Z.)
|
||||||
|
│ │ ├── pages.py # HTML-Seiten (Dashboard, Admin, etc.)
|
||||||
|
│ │ └── ws.py # WebSocket-Manager
|
||||||
|
│ ├── templates/
|
||||||
|
│ │ ├── base.html # Basis-Layout mit Navigation
|
||||||
|
│ │ ├── dashboard.html # Queue + aktive Jobs
|
||||||
|
│ │ ├── library.html # Bibliothek mit Filtern
|
||||||
|
│ │ ├── admin.html # Einstellungen + TVDB + Scan-Pfade
|
||||||
|
│ │ ├── statistics.html # Konvertierungs-Statistik
|
||||||
|
│ │ └── partials/
|
||||||
|
│ │ └── stats_table.html
|
||||||
|
│ └── static/
|
||||||
|
│ ├── css/style.css # Komplettes Styling (1554 Z.)
|
||||||
|
│ └── js/
|
||||||
|
│ ├── library.js # Bibliothek-UI (1912 Z.)
|
||||||
|
│ ├── websocket.js # WebSocket-Client
|
||||||
|
│ └── filebrowser.js # Datei-Browser
|
||||||
|
├── data/ # Queue-Persistierung
|
||||||
|
├── logs/ # Server-Logs
|
||||||
|
└── testmedia/ # Test-Dateien
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Voraussetzungen
|
||||||
|
- Docker + Docker Compose
|
||||||
|
- MariaDB-Server (extern, z.B. auf Unraid)
|
||||||
|
- Optional: Intel GPU fuer Hardware-Encoding
|
||||||
|
|
||||||
|
### MariaDB einrichten
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE video_converter CHARACTER SET utf8mb4;
|
||||||
|
CREATE USER 'video'@'%' IDENTIFIED BY 'dein_passwort';
|
||||||
|
GRANT ALL PRIVILEGES ON video_converter.* TO 'video'@'%';
|
||||||
|
FLUSH PRIVILEGES;
|
||||||
|
```
|
||||||
|
Die Tabellen werden automatisch beim ersten Start erstellt.
|
||||||
|
|
||||||
|
### Konfiguration
|
||||||
|
In `app/cfg/settings.yaml` anpassen:
|
||||||
|
```yaml
|
||||||
|
database:
|
||||||
|
host: "192.168.155.11"
|
||||||
|
port: 3306
|
||||||
|
user: "video"
|
||||||
|
password: "dein_passwort"
|
||||||
|
database: "video_converter"
|
||||||
|
|
||||||
|
encoding:
|
||||||
|
mode: "cpu" # "gpu" | "cpu" | "auto"
|
||||||
|
gpu_device: "/dev/dri/renderD128"
|
||||||
|
default_preset: "cpu_av1"
|
||||||
|
max_parallel_jobs: 1
|
||||||
|
|
||||||
|
files:
|
||||||
|
target_container: "webm" # "webm" | "mkv" | "mp4"
|
||||||
|
delete_source: false
|
||||||
|
recursive_scan: true
|
||||||
|
|
||||||
|
library:
|
||||||
|
enabled: true
|
||||||
|
tvdb_api_key: "" # Von thetvdb.com
|
||||||
|
tvdb_pin: "" # Subscriber PIN (optional)
|
||||||
|
tvdb_language: "deu" # deu, eng, fra, spa, ita, jpn
|
||||||
|
import_default_mode: "copy" # "copy" | "move"
|
||||||
|
import_naming_pattern: "{series} - S{season:02d}E{episode:02d} - {title}.{ext}"
|
||||||
|
import_season_pattern: "Season {season:02d}"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Starten
|
||||||
|
|
||||||
|
**GPU-Modus** (Produktion auf Unraid):
|
||||||
|
```bash
|
||||||
|
docker compose --profile gpu up --build -d
|
||||||
|
```
|
||||||
|
|
||||||
|
**CPU-Modus** (lokal testen):
|
||||||
|
```bash
|
||||||
|
PUID=1000 PGID=1000 docker compose --profile cpu up --build -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Web-UI: http://localhost:8080
|
||||||
|
|
||||||
|
|
||||||
|
## Encoding-Presets
|
||||||
|
|
||||||
|
| Preset | Codec | Container | Qualitaet | Modus |
|
||||||
|
|--------|-------|-----------|-----------|-------|
|
||||||
|
| GPU AV1 | av1_vaapi | WebM | QP 30 | GPU |
|
||||||
|
| GPU AV1 10-Bit | av1_vaapi | WebM | QP 30 | GPU |
|
||||||
|
| GPU HEVC | hevc_vaapi | MKV | QP 28 | GPU |
|
||||||
|
| GPU H.264 | h264_vaapi | MP4 | QP 23 | GPU |
|
||||||
|
| CPU AV1/SVT | libsvtav1 | WebM | CRF 30 | CPU |
|
||||||
|
| CPU HEVC/x265 | libx265 | MKV | CRF 28 | CPU |
|
||||||
|
| CPU H.264/x264 | libx264 | MP4 | CRF 23 | CPU |
|
||||||
|
|
||||||
|
|
||||||
|
## API Referenz
|
||||||
|
|
||||||
|
### Konvertierung
|
||||||
|
| Methode | Pfad | Beschreibung |
|
||||||
|
|---------|------|-------------|
|
||||||
|
| POST | `/api/convert` | Dateien/Ordner zur Queue hinzufuegen |
|
||||||
|
| GET | `/api/queue` | Queue-Status abrufen |
|
||||||
|
| DELETE | `/api/jobs/{id}` | Job entfernen/abbrechen |
|
||||||
|
|
||||||
|
### Bibliothek
|
||||||
|
| Methode | Pfad | Beschreibung |
|
||||||
|
|---------|------|-------------|
|
||||||
|
| GET | `/api/library/paths` | Scan-Pfade auflisten |
|
||||||
|
| POST | `/api/library/paths` | Scan-Pfad hinzufuegen |
|
||||||
|
| DELETE | `/api/library/paths/{id}` | Scan-Pfad loeschen |
|
||||||
|
| POST | `/api/library/scan` | Alle Pfade scannen |
|
||||||
|
| POST | `/api/library/scan/{id}` | Einzelnen Pfad scannen |
|
||||||
|
| GET | `/api/library/scan-status` | Scan-Fortschritt |
|
||||||
|
| GET | `/api/library/videos` | Videos filtern (siehe Filter-Params) |
|
||||||
|
| GET | `/api/library/series` | Alle Serien |
|
||||||
|
| GET | `/api/library/series/{id}` | Serie mit Episoden |
|
||||||
|
| GET | `/api/library/series/{id}/missing` | Fehlende Episoden |
|
||||||
|
| POST | `/api/library/series/{id}/tvdb-match` | TVDB-ID zuordnen |
|
||||||
|
| GET | `/api/library/duplicates` | Duplikate finden |
|
||||||
|
| POST | `/api/library/videos/{id}/convert` | Direkt konvertieren |
|
||||||
|
| GET | `/api/library/stats` | Bibliotheks-Statistiken |
|
||||||
|
| GET | `/api/library/movies` | Filme auflisten |
|
||||||
|
| POST | `/api/library/movies/{id}/tvdb-match` | Film-TVDB-Zuordnung |
|
||||||
|
| POST | `/api/library/tvdb-auto-match` | Review-Vorschlaege sammeln |
|
||||||
|
| POST | `/api/library/tvdb-confirm` | TVDB-Match bestaetigen |
|
||||||
|
| POST | `/api/library/tvdb-refresh-episodes` | Episoden-Cache aktualisieren |
|
||||||
|
| GET | `/api/tvdb/search?q=` | TVDB-Suche |
|
||||||
|
| GET | `/api/tvdb/language` | TVDB-Sprache lesen |
|
||||||
|
| PUT | `/api/tvdb/language` | TVDB-Sprache aendern |
|
||||||
|
|
||||||
|
### Video-Filter (`/api/library/videos`)
|
||||||
|
```
|
||||||
|
?video_codec=hevc # h264, hevc, av1, mpeg4
|
||||||
|
&min_width=1920 # Mindest-Aufloesung
|
||||||
|
&container=mkv # mkv, mp4, avi, webm
|
||||||
|
&audio_lang=ger # Audio-Sprache
|
||||||
|
&audio_channels=6 # Kanal-Anzahl (2=Stereo, 6=5.1)
|
||||||
|
&is_10bit=1 # Nur 10-Bit
|
||||||
|
&search=breaking # Dateiname-Suche
|
||||||
|
&sort=file_size # Sortierung
|
||||||
|
&order=desc # asc | desc
|
||||||
|
&page=1&limit=50 # Pagination
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Datenbank-Schema
|
||||||
|
|
||||||
|
### library_paths
|
||||||
|
Konfigurierte Scan-Pfade fuer Serien- und Film-Ordner.
|
||||||
|
|
||||||
|
### library_series
|
||||||
|
Erkannte Serien mit optionaler TVDB-Verknuepfung (Poster, Beschreibung, Episoden-Zaehler).
|
||||||
|
|
||||||
|
### library_movies
|
||||||
|
Erkannte Filme mit optionaler TVDB-Verknuepfung (Poster, Beschreibung, Jahr).
|
||||||
|
|
||||||
|
### library_videos
|
||||||
|
Jedes Video mit vollstaendigen ffprobe-Metadaten:
|
||||||
|
- Video: Codec, Aufloesung, Framerate, Bitrate, 10-Bit, HDR
|
||||||
|
- Audio: JSON-Array mit Spuren (`[{"codec":"eac3","lang":"ger","channels":6,"bitrate":256000}]`)
|
||||||
|
- Untertitel: JSON-Array (`[{"codec":"subrip","lang":"ger"}]`)
|
||||||
|
- Serien: Staffel/Episode-Nummer, Episoden-Titel
|
||||||
|
|
||||||
|
### tvdb_episode_cache
|
||||||
|
Zwischenspeicher fuer TVDB-Episodendaten (Serie, Staffel, Episode, Name, Ausstrahlung).
|
||||||
|
|
||||||
|
|
||||||
|
## Docker Volumes
|
||||||
|
|
||||||
|
| Volume | Container-Pfad | Beschreibung |
|
||||||
|
|--------|---------------|-------------|
|
||||||
|
| `./app/cfg` | `/opt/video-konverter/app/cfg` | Konfiguration (persistent) |
|
||||||
|
| `./data` | `/opt/video-konverter/data` | Queue-Persistierung |
|
||||||
|
| `./logs` | `/opt/video-konverter/logs` | Server-Logs |
|
||||||
|
| `/mnt` | `/mnt` | Medien-Pfade (1:1 durchgereicht) |
|
||||||
|
|
||||||
|
|
||||||
|
## Lizenz
|
||||||
|
|
||||||
|
Privates Projekt von Eddy (Eduard Wisch).
|
||||||
|
|
|
||||||
14
__main__.py
Normal file
14
__main__.py
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
"""Einstiegspunkt fuer den VideoKonverter Server"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from app.server import VideoKonverterServer
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
server = VideoKonverterServer()
|
||||||
|
try:
|
||||||
|
asyncio.run(server.run())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logging.warning("Server wurde manuell beendet")
|
||||||
|
except Exception as e:
|
||||||
|
logging.critical(f"Kritischer Fehler: {e}", exc_info=True)
|
||||||
0
app/__init__.py
Normal file
0
app/__init__.py
Normal file
82
app/cfg/presets.yaml
Normal file
82
app/cfg/presets.yaml
Normal file
|
|
@ -0,0 +1,82 @@
|
||||||
|
# === GPU-Presets (Intel VAAPI) ===
|
||||||
|
gpu_av1:
|
||||||
|
name: "GPU AV1 (Standard)"
|
||||||
|
video_codec: "av1_vaapi"
|
||||||
|
container: "webm"
|
||||||
|
quality_param: "qp"
|
||||||
|
quality_value: 30
|
||||||
|
gop_size: 240
|
||||||
|
video_filter: "format=nv12,hwupload"
|
||||||
|
hw_init: true
|
||||||
|
extra_params: {}
|
||||||
|
|
||||||
|
gpu_av1_10bit:
|
||||||
|
name: "GPU AV1 10-Bit"
|
||||||
|
video_codec: "av1_vaapi"
|
||||||
|
container: "webm"
|
||||||
|
quality_param: "qp"
|
||||||
|
quality_value: 30
|
||||||
|
gop_size: 240
|
||||||
|
video_filter: "format=p010,hwupload"
|
||||||
|
hw_init: true
|
||||||
|
extra_params: {}
|
||||||
|
|
||||||
|
gpu_hevc:
|
||||||
|
name: "GPU HEVC/H.265"
|
||||||
|
video_codec: "hevc_vaapi"
|
||||||
|
container: "mkv"
|
||||||
|
quality_param: "qp"
|
||||||
|
quality_value: 28
|
||||||
|
gop_size: 240
|
||||||
|
video_filter: "format=nv12,hwupload"
|
||||||
|
hw_init: true
|
||||||
|
extra_params: {}
|
||||||
|
|
||||||
|
gpu_h264:
|
||||||
|
name: "GPU H.264"
|
||||||
|
video_codec: "h264_vaapi"
|
||||||
|
container: "mp4"
|
||||||
|
quality_param: "qp"
|
||||||
|
quality_value: 23
|
||||||
|
gop_size: 240
|
||||||
|
video_filter: "format=nv12,hwupload"
|
||||||
|
hw_init: true
|
||||||
|
extra_params: {}
|
||||||
|
|
||||||
|
# === CPU-Presets ===
|
||||||
|
cpu_av1:
|
||||||
|
name: "CPU AV1/SVT-AV1 (Standard)"
|
||||||
|
video_codec: "libsvtav1"
|
||||||
|
container: "webm"
|
||||||
|
quality_param: "crf"
|
||||||
|
quality_value: 30
|
||||||
|
gop_size: 240
|
||||||
|
speed_preset: 5
|
||||||
|
video_filter: ""
|
||||||
|
hw_init: false
|
||||||
|
extra_params:
|
||||||
|
svtav1-params: "tune=0:film-grain=8"
|
||||||
|
|
||||||
|
cpu_hevc:
|
||||||
|
name: "CPU HEVC/x265"
|
||||||
|
video_codec: "libx265"
|
||||||
|
container: "mkv"
|
||||||
|
quality_param: "crf"
|
||||||
|
quality_value: 28
|
||||||
|
gop_size: 250
|
||||||
|
speed_preset: "medium"
|
||||||
|
video_filter: ""
|
||||||
|
hw_init: false
|
||||||
|
extra_params: {}
|
||||||
|
|
||||||
|
cpu_h264:
|
||||||
|
name: "CPU H.264/x264"
|
||||||
|
video_codec: "libx264"
|
||||||
|
container: "mp4"
|
||||||
|
quality_param: "crf"
|
||||||
|
quality_value: 23
|
||||||
|
gop_size: 250
|
||||||
|
speed_preset: "medium"
|
||||||
|
video_filter: ""
|
||||||
|
hw_init: false
|
||||||
|
extra_params: {}
|
||||||
89
app/cfg/settings.yaml
Normal file
89
app/cfg/settings.yaml
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
audio:
|
||||||
|
bitrate_map:
|
||||||
|
2: 128k
|
||||||
|
6: 320k
|
||||||
|
8: 450k
|
||||||
|
default_bitrate: 192k
|
||||||
|
default_codec: libopus
|
||||||
|
keep_channels: true
|
||||||
|
languages:
|
||||||
|
- ger
|
||||||
|
- eng
|
||||||
|
- und
|
||||||
|
cleanup:
|
||||||
|
delete_extensions:
|
||||||
|
- .avi
|
||||||
|
- .wmv
|
||||||
|
- .vob
|
||||||
|
- .nfo
|
||||||
|
- .txt
|
||||||
|
- .jpg
|
||||||
|
- .png
|
||||||
|
- .srt
|
||||||
|
- .sub
|
||||||
|
- .idx
|
||||||
|
enabled: false
|
||||||
|
exclude_patterns:
|
||||||
|
- readme*
|
||||||
|
- '*.md'
|
||||||
|
keep_extensions:
|
||||||
|
- .srt
|
||||||
|
database:
|
||||||
|
database: video_converter
|
||||||
|
host: 192.168.155.11
|
||||||
|
password: '8715'
|
||||||
|
port: 3306
|
||||||
|
user: video
|
||||||
|
encoding:
|
||||||
|
default_preset: cpu_av1
|
||||||
|
gpu_device: /dev/dri/renderD128
|
||||||
|
gpu_driver: iHD
|
||||||
|
max_parallel_jobs: 1
|
||||||
|
mode: cpu
|
||||||
|
files:
|
||||||
|
delete_source: false
|
||||||
|
recursive_scan: true
|
||||||
|
scan_extensions:
|
||||||
|
- .mkv
|
||||||
|
- .mp4
|
||||||
|
- .avi
|
||||||
|
- .wmv
|
||||||
|
- .vob
|
||||||
|
- .ts
|
||||||
|
- .m4v
|
||||||
|
- .flv
|
||||||
|
- .mov
|
||||||
|
target_container: webm
|
||||||
|
target_folder: same
|
||||||
|
library:
|
||||||
|
enabled: true
|
||||||
|
import_default_mode: copy
|
||||||
|
import_naming_pattern: '{series} - S{season:02d}E{episode:02d} - {title}.{ext}'
|
||||||
|
import_season_pattern: Season {season:02d}
|
||||||
|
scan_interval_hours: 0
|
||||||
|
tvdb_api_key: 5db8defd-41cd-4e0d-a637-ac0a96cbedd9
|
||||||
|
tvdb_language: deu
|
||||||
|
tvdb_pin: ''
|
||||||
|
logging:
|
||||||
|
backup_count: 7
|
||||||
|
file: server.log
|
||||||
|
level: INFO
|
||||||
|
max_size_mb: 10
|
||||||
|
rotation: time
|
||||||
|
server:
|
||||||
|
external_url: ''
|
||||||
|
host: 0.0.0.0
|
||||||
|
port: 8080
|
||||||
|
use_https: false
|
||||||
|
websocket_path: /ws
|
||||||
|
statistics:
|
||||||
|
cleanup_days: 365
|
||||||
|
max_entries: 5000
|
||||||
|
subtitle:
|
||||||
|
codec_blacklist:
|
||||||
|
- hdmv_pgs_subtitle
|
||||||
|
- dvd_subtitle
|
||||||
|
- dvb_subtitle
|
||||||
|
languages:
|
||||||
|
- ger
|
||||||
|
- eng
|
||||||
173
app/config.py
Normal file
173
app/config.py
Normal file
|
|
@ -0,0 +1,173 @@
|
||||||
|
"""Konfigurationsmanagement - Singleton fuer Settings und Presets"""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import yaml
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from logging.handlers import TimedRotatingFileHandler, RotatingFileHandler
|
||||||
|
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
"""Laedt und verwaltet settings.yaml und presets.yaml"""
|
||||||
|
_instance: Optional['Config'] = None
|
||||||
|
|
||||||
|
def __new__(cls) -> 'Config':
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = super().__new__(cls)
|
||||||
|
cls._instance._initialized = False
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
if self._initialized:
|
||||||
|
return
|
||||||
|
self._initialized = True
|
||||||
|
|
||||||
|
self._base_path = Path(__file__).parent
|
||||||
|
self._cfg_path = self._base_path / "cfg"
|
||||||
|
self._log_path = self._base_path.parent / "logs"
|
||||||
|
self._data_path = self._base_path.parent / "data"
|
||||||
|
|
||||||
|
# Verzeichnisse sicherstellen
|
||||||
|
self._log_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
self._data_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
self.settings: dict = {}
|
||||||
|
self.presets: dict = {}
|
||||||
|
self._load_settings()
|
||||||
|
self._load_presets()
|
||||||
|
self._apply_env_overrides()
|
||||||
|
|
||||||
|
def _load_settings(self) -> None:
|
||||||
|
"""Laedt settings.yaml"""
|
||||||
|
settings_file = self._cfg_path / "settings.yaml"
|
||||||
|
try:
|
||||||
|
with open(settings_file, "r", encoding="utf-8") as f:
|
||||||
|
self.settings = yaml.safe_load(f) or {}
|
||||||
|
logging.info(f"Settings geladen: {settings_file}")
|
||||||
|
except FileNotFoundError:
|
||||||
|
logging.error(f"Settings nicht gefunden: {settings_file}")
|
||||||
|
self.settings = {}
|
||||||
|
|
||||||
|
def _load_presets(self) -> None:
|
||||||
|
"""Laedt presets.yaml"""
|
||||||
|
presets_file = self._cfg_path / "presets.yaml"
|
||||||
|
try:
|
||||||
|
with open(presets_file, "r", encoding="utf-8") as f:
|
||||||
|
self.presets = yaml.safe_load(f) or {}
|
||||||
|
logging.info(f"Presets geladen: {presets_file}")
|
||||||
|
except FileNotFoundError:
|
||||||
|
logging.error(f"Presets nicht gefunden: {presets_file}")
|
||||||
|
self.presets = {}
|
||||||
|
|
||||||
|
def _apply_env_overrides(self) -> None:
|
||||||
|
"""Umgebungsvariablen ueberschreiben Settings"""
|
||||||
|
env_mode = os.environ.get("VIDEO_KONVERTER_MODE")
|
||||||
|
if env_mode and env_mode in ("cpu", "gpu", "auto"):
|
||||||
|
self.settings.setdefault("encoding", {})["mode"] = env_mode
|
||||||
|
logging.info(f"Encoding-Modus per Umgebungsvariable: {env_mode}")
|
||||||
|
|
||||||
|
def save_settings(self) -> None:
|
||||||
|
"""Schreibt aktuelle Settings zurueck in settings.yaml"""
|
||||||
|
settings_file = self._cfg_path / "settings.yaml"
|
||||||
|
try:
|
||||||
|
with open(settings_file, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(self.settings, f, default_flow_style=False,
|
||||||
|
indent=2, allow_unicode=True)
|
||||||
|
logging.info("Settings gespeichert")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Settings speichern fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
def save_presets(self) -> None:
|
||||||
|
"""Schreibt Presets zurueck in presets.yaml"""
|
||||||
|
presets_file = self._cfg_path / "presets.yaml"
|
||||||
|
try:
|
||||||
|
with open(presets_file, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(self.presets, f, default_flow_style=False,
|
||||||
|
indent=2, allow_unicode=True)
|
||||||
|
logging.info("Presets gespeichert")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Presets speichern fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
def setup_logging(self) -> None:
|
||||||
|
"""Konfiguriert Logging mit Rotation"""
|
||||||
|
log_cfg = self.settings.get("logging", {})
|
||||||
|
log_level = log_cfg.get("level", "INFO")
|
||||||
|
log_file = log_cfg.get("file", "server.log")
|
||||||
|
log_mode = log_cfg.get("rotation", "time")
|
||||||
|
backup_count = log_cfg.get("backup_count", 7)
|
||||||
|
|
||||||
|
log_path = self._log_path / log_file
|
||||||
|
handlers = [logging.StreamHandler()]
|
||||||
|
|
||||||
|
if log_mode == "time":
|
||||||
|
file_handler = TimedRotatingFileHandler(
|
||||||
|
str(log_path), when="midnight", interval=1,
|
||||||
|
backupCount=backup_count, encoding="utf-8"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
max_bytes = log_cfg.get("max_size_mb", 10) * 1024 * 1024
|
||||||
|
file_handler = RotatingFileHandler(
|
||||||
|
str(log_path), maxBytes=max_bytes,
|
||||||
|
backupCount=backup_count, encoding="utf-8"
|
||||||
|
)
|
||||||
|
|
||||||
|
handlers.append(file_handler)
|
||||||
|
|
||||||
|
# force=True weil Config.__init__ logging aufruft bevor setup_logging()
|
||||||
|
logging.basicConfig(
|
||||||
|
level=getattr(logging, log_level, logging.INFO),
|
||||||
|
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||||
|
handlers=handlers,
|
||||||
|
force=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# --- Properties fuer haeufig benoetigte Werte ---
|
||||||
|
|
||||||
|
@property
|
||||||
|
def encoding_mode(self) -> str:
|
||||||
|
return self.settings.get("encoding", {}).get("mode", "cpu")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def gpu_device(self) -> str:
|
||||||
|
return self.settings.get("encoding", {}).get("gpu_device", "/dev/dri/renderD128")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_parallel_jobs(self) -> int:
|
||||||
|
return self.settings.get("encoding", {}).get("max_parallel_jobs", 1)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def target_container(self) -> str:
|
||||||
|
return self.settings.get("files", {}).get("target_container", "webm")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_preset_name(self) -> str:
|
||||||
|
return self.settings.get("encoding", {}).get("default_preset", "cpu_av1")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_preset(self) -> dict:
|
||||||
|
name = self.default_preset_name
|
||||||
|
return self.presets.get(name, {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data_path(self) -> Path:
|
||||||
|
return self._data_path
|
||||||
|
|
||||||
|
@property
|
||||||
|
def audio_config(self) -> dict:
|
||||||
|
return self.settings.get("audio", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def subtitle_config(self) -> dict:
|
||||||
|
return self.settings.get("subtitle", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def files_config(self) -> dict:
|
||||||
|
return self.settings.get("files", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cleanup_config(self) -> dict:
|
||||||
|
return self.settings.get("cleanup", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def server_config(self) -> dict:
|
||||||
|
return self.settings.get("server", {})
|
||||||
0
app/models/__init__.py
Normal file
0
app/models/__init__.py
Normal file
201
app/models/job.py
Normal file
201
app/models/job.py
Normal file
|
|
@ -0,0 +1,201 @@
|
||||||
|
"""Konvertierungs-Job-Modell mit Status-Management"""
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from enum import IntEnum
|
||||||
|
from typing import Optional
|
||||||
|
from app.models.media import MediaFile
|
||||||
|
|
||||||
|
# Globaler Zaehler fuer eindeutige IDs
|
||||||
|
_id_counter = 0
|
||||||
|
|
||||||
|
|
||||||
|
class JobStatus(IntEnum):
|
||||||
|
QUEUED = 0
|
||||||
|
ACTIVE = 1
|
||||||
|
FINISHED = 2
|
||||||
|
FAILED = 3
|
||||||
|
CANCELLED = 4
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConversionJob:
|
||||||
|
"""Einzelner Konvertierungs-Auftrag"""
|
||||||
|
media: MediaFile
|
||||||
|
preset_name: str = ""
|
||||||
|
|
||||||
|
# Wird in __post_init__ gesetzt
|
||||||
|
id: int = field(init=False)
|
||||||
|
status: JobStatus = field(default=JobStatus.QUEUED)
|
||||||
|
|
||||||
|
# Ziel-Informationen
|
||||||
|
target_path: str = ""
|
||||||
|
target_filename: str = ""
|
||||||
|
target_container: str = "webm"
|
||||||
|
|
||||||
|
# ffmpeg Prozess
|
||||||
|
ffmpeg_cmd: list[str] = field(default_factory=list)
|
||||||
|
process: Optional[asyncio.subprocess.Process] = field(default=None, repr=False)
|
||||||
|
task: Optional[asyncio.Task] = field(default=None, repr=False)
|
||||||
|
|
||||||
|
# Fortschritt
|
||||||
|
progress_percent: float = 0.0
|
||||||
|
progress_fps: float = 0.0
|
||||||
|
progress_speed: float = 0.0
|
||||||
|
progress_bitrate: int = 0
|
||||||
|
progress_size_bytes: int = 0
|
||||||
|
progress_time_sec: float = 0.0
|
||||||
|
progress_frames: int = 0
|
||||||
|
progress_eta_sec: float = 0.0
|
||||||
|
|
||||||
|
# Zeitstempel
|
||||||
|
created_at: float = field(default_factory=time.time)
|
||||||
|
started_at: Optional[float] = None
|
||||||
|
finished_at: Optional[float] = None
|
||||||
|
|
||||||
|
# Statistik-Akkumulation (Summe, Anzahl)
|
||||||
|
_stat_fps: list = field(default_factory=lambda: [0.0, 0])
|
||||||
|
_stat_speed: list = field(default_factory=lambda: [0.0, 0])
|
||||||
|
_stat_bitrate: list = field(default_factory=lambda: [0, 0])
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
global _id_counter
|
||||||
|
self.id = int(time.time() * 1000) * 1000 + _id_counter
|
||||||
|
_id_counter += 1
|
||||||
|
|
||||||
|
def build_target_path(self, config) -> None:
|
||||||
|
"""Baut Ziel-Pfad basierend auf Config"""
|
||||||
|
files_cfg = config.files_config
|
||||||
|
container = files_cfg.get("target_container", "webm")
|
||||||
|
self.target_container = container
|
||||||
|
|
||||||
|
# Dateiname ohne Extension + neue Extension
|
||||||
|
base_name = self.media.source_filename.rsplit(".", 1)[0]
|
||||||
|
self.target_filename = f"{base_name}.{container}"
|
||||||
|
|
||||||
|
# Ziel-Ordner
|
||||||
|
target_folder = files_cfg.get("target_folder", "same")
|
||||||
|
if target_folder == "same":
|
||||||
|
target_dir = self.media.source_dir
|
||||||
|
else:
|
||||||
|
target_dir = target_folder
|
||||||
|
|
||||||
|
self.target_path = f"{target_dir}/{self.target_filename}"
|
||||||
|
|
||||||
|
# Konfliktvermeidung: Wenn Ziel = Quelle (gleiche Extension)
|
||||||
|
if self.target_path == self.media.source_path:
|
||||||
|
base = self.target_path.rsplit(".", 1)[0]
|
||||||
|
self.target_path = f"{base}_converted.{container}"
|
||||||
|
self.target_filename = f"{base_name}_converted.{container}"
|
||||||
|
|
||||||
|
def update_stats(self, fps: float, speed: float, bitrate: int) -> None:
|
||||||
|
"""Akkumuliert Statistik-Werte fuer Durchschnittsberechnung"""
|
||||||
|
if fps > 0:
|
||||||
|
self._stat_fps[0] += fps
|
||||||
|
self._stat_fps[1] += 1
|
||||||
|
if speed > 0:
|
||||||
|
self._stat_speed[0] += speed
|
||||||
|
self._stat_speed[1] += 1
|
||||||
|
if bitrate > 0:
|
||||||
|
self._stat_bitrate[0] += bitrate
|
||||||
|
self._stat_bitrate[1] += 1
|
||||||
|
|
||||||
|
@property
|
||||||
|
def avg_fps(self) -> float:
|
||||||
|
if self._stat_fps[1] > 0:
|
||||||
|
return self._stat_fps[0] / self._stat_fps[1]
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def avg_speed(self) -> float:
|
||||||
|
if self._stat_speed[1] > 0:
|
||||||
|
return self._stat_speed[0] / self._stat_speed[1]
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def avg_bitrate(self) -> int:
|
||||||
|
if self._stat_bitrate[1] > 0:
|
||||||
|
return int(self._stat_bitrate[0] / self._stat_bitrate[1])
|
||||||
|
return 0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration_sec(self) -> float:
|
||||||
|
"""Konvertierungsdauer in Sekunden"""
|
||||||
|
if self.started_at and self.finished_at:
|
||||||
|
return self.finished_at - self.started_at
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def to_dict_active(self) -> dict:
|
||||||
|
"""Fuer WebSocket data_convert Nachricht"""
|
||||||
|
size = MediaFile.format_size(self.media.source_size_bytes)
|
||||||
|
return {
|
||||||
|
"source_file_name": self.media.source_filename,
|
||||||
|
"source_file": self.media.source_path,
|
||||||
|
"source_path": self.media.source_dir,
|
||||||
|
"source_duration": self.media.source_duration_sec,
|
||||||
|
"source_size": [size[0], size[1]],
|
||||||
|
"source_frame_rate": self.media.frame_rate,
|
||||||
|
"source_frames_total": self.media.total_frames,
|
||||||
|
"target_file_name": self.target_filename,
|
||||||
|
"target_file": self.target_path,
|
||||||
|
"status": self.status.value,
|
||||||
|
"preset": self.preset_name,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_dict_queue(self) -> dict:
|
||||||
|
"""Fuer WebSocket data_queue Nachricht"""
|
||||||
|
return {
|
||||||
|
"source_file_name": self.media.source_filename,
|
||||||
|
"source_file": self.media.source_path,
|
||||||
|
"source_path": self.media.source_dir,
|
||||||
|
"status": self.status.value,
|
||||||
|
"preset": self.preset_name,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_dict_progress(self) -> dict:
|
||||||
|
"""Fuer WebSocket data_flow Nachricht"""
|
||||||
|
target_size = MediaFile.format_size(self.progress_size_bytes)
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"frames": self.progress_frames,
|
||||||
|
"fps": self.progress_fps,
|
||||||
|
"speed": self.progress_speed,
|
||||||
|
"quantizer": 0,
|
||||||
|
"size": [target_size[0], target_size[1]],
|
||||||
|
"time": MediaFile.format_time(self.progress_time_sec),
|
||||||
|
"time_remaining": MediaFile.format_time(self.progress_eta_sec),
|
||||||
|
"loading": round(self.progress_percent, 1),
|
||||||
|
"bitrate": [self.progress_bitrate, "kbits/s"],
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_dict_stats(self) -> dict:
|
||||||
|
"""Fuer Statistik-Datenbank"""
|
||||||
|
return {
|
||||||
|
"source_path": self.media.source_path,
|
||||||
|
"source_filename": self.media.source_filename,
|
||||||
|
"source_size_bytes": self.media.source_size_bytes,
|
||||||
|
"source_duration_sec": self.media.source_duration_sec,
|
||||||
|
"source_frame_rate": self.media.frame_rate,
|
||||||
|
"source_frames_total": self.media.total_frames,
|
||||||
|
"target_path": self.target_path,
|
||||||
|
"target_filename": self.target_filename,
|
||||||
|
"target_size_bytes": self.progress_size_bytes,
|
||||||
|
"target_container": self.target_container,
|
||||||
|
"preset_name": self.preset_name,
|
||||||
|
"status": self.status.value,
|
||||||
|
"started_at": self.started_at,
|
||||||
|
"finished_at": self.finished_at,
|
||||||
|
"duration_sec": self.duration_sec,
|
||||||
|
"avg_fps": self.avg_fps,
|
||||||
|
"avg_speed": self.avg_speed,
|
||||||
|
"avg_bitrate": self.avg_bitrate,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_json(self) -> dict:
|
||||||
|
"""Fuer Queue-Persistierung"""
|
||||||
|
return {
|
||||||
|
"source_path": self.media.source_path,
|
||||||
|
"preset_name": self.preset_name,
|
||||||
|
"status": self.status.value,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
}
|
||||||
166
app/models/media.py
Normal file
166
app/models/media.py
Normal file
|
|
@ -0,0 +1,166 @@
|
||||||
|
"""Media-Datei-Modell mit Stream-Informationen"""
|
||||||
|
import os
|
||||||
|
import math
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class VideoStream:
|
||||||
|
"""Einzelner Video-Stream"""
|
||||||
|
index: int
|
||||||
|
codec_name: str
|
||||||
|
width: int = 0
|
||||||
|
height: int = 0
|
||||||
|
pix_fmt: str = ""
|
||||||
|
frame_rate: float = 0.0
|
||||||
|
level: Optional[int] = None
|
||||||
|
bit_rate: Optional[int] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_10bit(self) -> bool:
|
||||||
|
"""Erkennt ob der Stream 10-Bit ist"""
|
||||||
|
return "10" in self.pix_fmt or "p010" in self.pix_fmt
|
||||||
|
|
||||||
|
@property
|
||||||
|
def resolution(self) -> str:
|
||||||
|
return f"{self.width}x{self.height}"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AudioStream:
|
||||||
|
"""Einzelner Audio-Stream"""
|
||||||
|
index: int
|
||||||
|
codec_name: str
|
||||||
|
channels: int = 2
|
||||||
|
sample_rate: int = 48000
|
||||||
|
language: Optional[str] = None
|
||||||
|
bit_rate: Optional[int] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def channel_layout(self) -> str:
|
||||||
|
"""Menschenlesbares Kanal-Layout"""
|
||||||
|
layouts = {1: "Mono", 2: "Stereo", 3: "2.1", 6: "5.1", 8: "7.1"}
|
||||||
|
return layouts.get(self.channels, f"{self.channels}ch")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SubtitleStream:
|
||||||
|
"""Einzelner Untertitel-Stream"""
|
||||||
|
index: int
|
||||||
|
codec_name: str
|
||||||
|
language: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MediaFile:
|
||||||
|
"""Analysierte Mediendatei mit allen Stream-Informationen"""
|
||||||
|
source_path: str
|
||||||
|
source_dir: str = field(init=False)
|
||||||
|
source_filename: str = field(init=False)
|
||||||
|
source_extension: str = field(init=False)
|
||||||
|
source_size_bytes: int = 0
|
||||||
|
source_duration_sec: float = 0.0
|
||||||
|
source_bitrate: int = 0
|
||||||
|
|
||||||
|
video_streams: list[VideoStream] = field(default_factory=list)
|
||||||
|
audio_streams: list[AudioStream] = field(default_factory=list)
|
||||||
|
subtitle_streams: list[SubtitleStream] = field(default_factory=list)
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
self.source_dir = os.path.dirname(self.source_path)
|
||||||
|
self.source_filename = os.path.basename(self.source_path)
|
||||||
|
self.source_extension = os.path.splitext(self.source_filename)[1].lower()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_rate(self) -> float:
|
||||||
|
"""Framerate des ersten Video-Streams"""
|
||||||
|
if self.video_streams:
|
||||||
|
return self.video_streams[0].frame_rate
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def total_frames(self) -> int:
|
||||||
|
"""Gesamtanzahl Frames"""
|
||||||
|
return int(self.frame_rate * self.source_duration_sec)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_10bit(self) -> bool:
|
||||||
|
"""Prueft ob der erste Video-Stream 10-Bit ist"""
|
||||||
|
if self.video_streams:
|
||||||
|
return self.video_streams[0].is_10bit
|
||||||
|
return False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def source_size_human(self) -> tuple[float, str]:
|
||||||
|
"""Menschenlesbare Groesse"""
|
||||||
|
return self.format_size(self.source_size_bytes)
|
||||||
|
|
||||||
|
def to_dict(self) -> dict:
|
||||||
|
"""Serialisiert fuer WebSocket/API"""
|
||||||
|
size = self.source_size_human
|
||||||
|
return {
|
||||||
|
"source_path": self.source_path,
|
||||||
|
"source_dir": self.source_dir,
|
||||||
|
"source_filename": self.source_filename,
|
||||||
|
"source_extension": self.source_extension,
|
||||||
|
"source_size": [size[0], size[1]],
|
||||||
|
"source_duration": self.source_duration_sec,
|
||||||
|
"source_duration_human": self.format_time(self.source_duration_sec),
|
||||||
|
"source_frame_rate": self.frame_rate,
|
||||||
|
"source_frames_total": self.total_frames,
|
||||||
|
"video_streams": len(self.video_streams),
|
||||||
|
"audio_streams": [
|
||||||
|
{"index": a.index, "codec": a.codec_name,
|
||||||
|
"channels": a.channels, "layout": a.channel_layout,
|
||||||
|
"language": a.language}
|
||||||
|
for a in self.audio_streams
|
||||||
|
],
|
||||||
|
"subtitle_streams": [
|
||||||
|
{"index": s.index, "codec": s.codec_name,
|
||||||
|
"language": s.language}
|
||||||
|
for s in self.subtitle_streams
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_size(size_bytes: int) -> tuple[float, str]:
|
||||||
|
"""Konvertiert Bytes in menschenlesbare Groesse"""
|
||||||
|
units = ["B", "KiB", "MiB", "GiB", "TiB"]
|
||||||
|
size = float(size_bytes)
|
||||||
|
unit_idx = 0
|
||||||
|
while size >= 1024.0 and unit_idx < len(units) - 1:
|
||||||
|
size /= 1024.0
|
||||||
|
unit_idx += 1
|
||||||
|
return round(size, 1), units[unit_idx]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_time(seconds: float) -> str:
|
||||||
|
"""Formatiert Sekunden in lesbares Format"""
|
||||||
|
if seconds <= 0:
|
||||||
|
return "0 Min"
|
||||||
|
days = int(seconds // 86400)
|
||||||
|
seconds %= 86400
|
||||||
|
hours = int(seconds // 3600)
|
||||||
|
seconds %= 3600
|
||||||
|
minutes = math.ceil(seconds / 60)
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
if days:
|
||||||
|
parts.append(f"{days} Tage")
|
||||||
|
if hours:
|
||||||
|
parts.append(f"{hours} Std")
|
||||||
|
if minutes:
|
||||||
|
parts.append(f"{minutes} Min")
|
||||||
|
return " ".join(parts) if parts else "< 1 Min"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def time_to_seconds(time_str: str) -> float:
|
||||||
|
"""Konvertiert HH:MM:SS oder Sekunden-String in float"""
|
||||||
|
parts = time_str.split(":")
|
||||||
|
if len(parts) == 1:
|
||||||
|
return float(parts[0])
|
||||||
|
if len(parts) == 3:
|
||||||
|
h, m, s = map(float, parts)
|
||||||
|
return h * 3600 + m * 60 + s
|
||||||
|
return 0.0
|
||||||
0
app/routes/__init__.py
Normal file
0
app/routes/__init__.py
Normal file
361
app/routes/api.py
Normal file
361
app/routes/api.py
Normal file
|
|
@ -0,0 +1,361 @@
|
||||||
|
"""REST API Endpoints"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from aiohttp import web
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.queue import QueueService
|
||||||
|
from app.services.scanner import ScannerService
|
||||||
|
from app.services.encoder import EncoderService
|
||||||
|
|
||||||
|
|
||||||
|
def setup_api_routes(app: web.Application, config: Config,
|
||||||
|
queue_service: QueueService,
|
||||||
|
scanner: ScannerService) -> None:
|
||||||
|
"""Registriert alle API-Routes"""
|
||||||
|
|
||||||
|
# --- Job-Management ---
|
||||||
|
|
||||||
|
async def post_convert(request: web.Request) -> web.Response:
|
||||||
|
"""
|
||||||
|
POST /api/convert
|
||||||
|
Body: {"files": ["/pfad/datei.mkv", "/pfad/ordner/"]}
|
||||||
|
Optional: {"files": [...], "preset": "gpu_av1", "recursive": true}
|
||||||
|
Hauptendpoint fuer KDE Dolphin Integration.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
files = data.get("files", [])
|
||||||
|
if not files:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine Dateien angegeben"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
preset = data.get("preset")
|
||||||
|
recursive = data.get("recursive")
|
||||||
|
|
||||||
|
logging.info(f"POST /api/convert: {len(files)} Pfade empfangen")
|
||||||
|
|
||||||
|
jobs = await queue_service.add_paths(files, preset, recursive)
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
"message": f"{len(jobs)} Jobs erstellt",
|
||||||
|
"jobs": [{"id": j.id, "file": j.media.source_filename} for j in jobs],
|
||||||
|
})
|
||||||
|
|
||||||
|
async def get_jobs(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/jobs - Alle Jobs mit Status"""
|
||||||
|
return web.json_response({"jobs": queue_service.get_all_jobs()})
|
||||||
|
|
||||||
|
async def delete_job(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/jobs/{job_id}"""
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
success = await queue_service.remove_job(job_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Job geloescht"})
|
||||||
|
return web.json_response({"error": "Job nicht gefunden"}, status=404)
|
||||||
|
|
||||||
|
async def post_cancel(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/jobs/{job_id}/cancel"""
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
success = await queue_service.cancel_job(job_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Job abgebrochen"})
|
||||||
|
return web.json_response({"error": "Job nicht aktiv"}, status=400)
|
||||||
|
|
||||||
|
async def post_retry(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/jobs/{job_id}/retry"""
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
success = await queue_service.retry_job(job_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Job wiederholt"})
|
||||||
|
return web.json_response({"error": "Job nicht fehlgeschlagen"}, status=400)
|
||||||
|
|
||||||
|
# --- Settings ---
|
||||||
|
|
||||||
|
async def get_settings(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/settings"""
|
||||||
|
return web.json_response(config.settings)
|
||||||
|
|
||||||
|
async def put_settings(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/settings - Settings aktualisieren"""
|
||||||
|
try:
|
||||||
|
new_settings = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
# Settings zusammenfuehren (deep merge)
|
||||||
|
_deep_merge(config.settings, new_settings)
|
||||||
|
config.save_settings()
|
||||||
|
logging.info("Settings aktualisiert via API")
|
||||||
|
return web.json_response({"message": "Settings gespeichert"})
|
||||||
|
|
||||||
|
# --- Presets ---
|
||||||
|
|
||||||
|
async def get_presets(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/presets"""
|
||||||
|
return web.json_response(config.presets)
|
||||||
|
|
||||||
|
async def put_preset(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/presets/{preset_name}"""
|
||||||
|
preset_name = request.match_info["preset_name"]
|
||||||
|
try:
|
||||||
|
preset_data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
config.presets[preset_name] = preset_data
|
||||||
|
config.save_presets()
|
||||||
|
logging.info(f"Preset '{preset_name}' aktualisiert")
|
||||||
|
return web.json_response({"message": f"Preset '{preset_name}' gespeichert"})
|
||||||
|
|
||||||
|
# --- Statistics ---
|
||||||
|
|
||||||
|
async def get_statistics(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/statistics?limit=50&offset=0"""
|
||||||
|
limit = int(request.query.get("limit", 50))
|
||||||
|
offset = int(request.query.get("offset", 0))
|
||||||
|
stats = await queue_service.get_statistics(limit, offset)
|
||||||
|
summary = await queue_service.get_statistics_summary()
|
||||||
|
return web.json_response({"entries": stats, "summary": summary})
|
||||||
|
|
||||||
|
# --- System ---
|
||||||
|
|
||||||
|
async def get_system_info(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/system - GPU-Status, verfuegbare Devices"""
|
||||||
|
gpu_available = EncoderService.detect_gpu_available()
|
||||||
|
devices = EncoderService.get_available_render_devices()
|
||||||
|
return web.json_response({
|
||||||
|
"encoding_mode": config.encoding_mode,
|
||||||
|
"gpu_available": gpu_available,
|
||||||
|
"gpu_devices": devices,
|
||||||
|
"gpu_device_configured": config.gpu_device,
|
||||||
|
"default_preset": config.default_preset_name,
|
||||||
|
"max_parallel_jobs": config.max_parallel_jobs,
|
||||||
|
"active_jobs": len([
|
||||||
|
j for j in queue_service.jobs.values()
|
||||||
|
if j.status.value == 1
|
||||||
|
]),
|
||||||
|
"queued_jobs": len([
|
||||||
|
j for j in queue_service.jobs.values()
|
||||||
|
if j.status.value == 0
|
||||||
|
]),
|
||||||
|
})
|
||||||
|
|
||||||
|
async def get_ws_config(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/ws-config - WebSocket-URL fuer Client"""
|
||||||
|
srv = config.server_config
|
||||||
|
ext_url = srv.get("external_url", "")
|
||||||
|
use_https = srv.get("use_https", False)
|
||||||
|
port = srv.get("port", 8080)
|
||||||
|
|
||||||
|
if ext_url:
|
||||||
|
ws_url = ext_url
|
||||||
|
else:
|
||||||
|
ws_url = f"{request.host}"
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
"websocket_url": ws_url,
|
||||||
|
"websocket_path": srv.get("websocket_path", "/ws"),
|
||||||
|
"use_https": use_https,
|
||||||
|
"port": port,
|
||||||
|
})
|
||||||
|
|
||||||
|
# --- Filebrowser ---
|
||||||
|
|
||||||
|
# Erlaubte Basispfade (Sicherheit: nur unter /mnt navigierbar)
|
||||||
|
_BROWSE_ROOTS = ["/mnt"]
|
||||||
|
|
||||||
|
def _is_path_allowed(path: str) -> bool:
|
||||||
|
"""Prueft ob Pfad unter einem erlaubten Root liegt"""
|
||||||
|
real = os.path.realpath(path)
|
||||||
|
return any(real.startswith(root) for root in _BROWSE_ROOTS)
|
||||||
|
|
||||||
|
async def get_browse(request: web.Request) -> web.Response:
|
||||||
|
"""
|
||||||
|
GET /api/browse?path=/mnt
|
||||||
|
Gibt Ordner und Videodateien im Verzeichnis zurueck.
|
||||||
|
"""
|
||||||
|
path = request.query.get("path", "/mnt")
|
||||||
|
|
||||||
|
if not _is_path_allowed(path):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Zugriff verweigert"}, status=403
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Verzeichnis nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
scan_ext = set(config.files_config.get("scan_extensions", []))
|
||||||
|
dirs = []
|
||||||
|
files = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
for entry in sorted(os.listdir(path)):
|
||||||
|
# Versteckte Dateien ueberspringen
|
||||||
|
if entry.startswith("."):
|
||||||
|
continue
|
||||||
|
|
||||||
|
full = os.path.join(path, entry)
|
||||||
|
|
||||||
|
if os.path.isdir(full):
|
||||||
|
# Anzahl Videodateien im Unterordner zaehlen
|
||||||
|
video_count = 0
|
||||||
|
try:
|
||||||
|
for f in os.listdir(full):
|
||||||
|
if os.path.splitext(f)[1].lower() in scan_ext:
|
||||||
|
video_count += 1
|
||||||
|
except PermissionError:
|
||||||
|
pass
|
||||||
|
dirs.append({
|
||||||
|
"name": entry,
|
||||||
|
"path": full,
|
||||||
|
"video_count": video_count,
|
||||||
|
})
|
||||||
|
|
||||||
|
elif os.path.isfile(full):
|
||||||
|
ext = os.path.splitext(entry)[1].lower()
|
||||||
|
if ext in scan_ext:
|
||||||
|
size = os.path.getsize(full)
|
||||||
|
files.append({
|
||||||
|
"name": entry,
|
||||||
|
"path": full,
|
||||||
|
"size": size,
|
||||||
|
"size_human": _format_size(size),
|
||||||
|
})
|
||||||
|
|
||||||
|
except PermissionError:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine Leseberechtigung"}, status=403
|
||||||
|
)
|
||||||
|
|
||||||
|
# Eltern-Pfad (zum Navigieren nach oben)
|
||||||
|
parent = os.path.dirname(path)
|
||||||
|
if not _is_path_allowed(parent):
|
||||||
|
parent = None
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
"path": path,
|
||||||
|
"parent": parent,
|
||||||
|
"dirs": dirs,
|
||||||
|
"files": files,
|
||||||
|
"total_files": len(files),
|
||||||
|
})
|
||||||
|
|
||||||
|
def _format_size(size_bytes: int) -> str:
|
||||||
|
"""Kompakte Groessenangabe"""
|
||||||
|
if size_bytes < 1024 * 1024:
|
||||||
|
return f"{size_bytes / 1024:.0f} KiB"
|
||||||
|
if size_bytes < 1024 * 1024 * 1024:
|
||||||
|
return f"{size_bytes / (1024 * 1024):.1f} MiB"
|
||||||
|
return f"{size_bytes / (1024 * 1024 * 1024):.2f} GiB"
|
||||||
|
|
||||||
|
# --- Upload ---
|
||||||
|
|
||||||
|
# Upload-Verzeichnis
|
||||||
|
_UPLOAD_DIR = "/mnt/uploads"
|
||||||
|
|
||||||
|
async def post_upload(request: web.Request) -> web.Response:
|
||||||
|
"""
|
||||||
|
POST /api/upload (multipart/form-data)
|
||||||
|
Laedt eine Videodatei hoch und startet die Konvertierung.
|
||||||
|
"""
|
||||||
|
os.makedirs(_UPLOAD_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
reader = await request.multipart()
|
||||||
|
preset = None
|
||||||
|
saved_files = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
part = await reader.next()
|
||||||
|
if part is None:
|
||||||
|
break
|
||||||
|
|
||||||
|
if part.name == "preset":
|
||||||
|
preset = (await part.text()).strip() or None
|
||||||
|
continue
|
||||||
|
|
||||||
|
if part.name == "files":
|
||||||
|
filename = part.filename
|
||||||
|
if not filename:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sicherheit: Nur Dateiname ohne Pfad
|
||||||
|
filename = os.path.basename(filename)
|
||||||
|
ext = os.path.splitext(filename)[1].lower()
|
||||||
|
scan_ext = set(config.files_config.get("scan_extensions", []))
|
||||||
|
if ext not in scan_ext:
|
||||||
|
logging.warning(f"Upload abgelehnt (Extension {ext}): {filename}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Datei speichern
|
||||||
|
dest = os.path.join(_UPLOAD_DIR, filename)
|
||||||
|
# Konfliktvermeidung
|
||||||
|
if os.path.exists(dest):
|
||||||
|
base, extension = os.path.splitext(filename)
|
||||||
|
counter = 1
|
||||||
|
while os.path.exists(dest):
|
||||||
|
dest = os.path.join(
|
||||||
|
_UPLOAD_DIR, f"{base}_{counter}{extension}"
|
||||||
|
)
|
||||||
|
counter += 1
|
||||||
|
|
||||||
|
size = 0
|
||||||
|
with open(dest, "wb") as f:
|
||||||
|
while True:
|
||||||
|
chunk = await part.read_chunk()
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
f.write(chunk)
|
||||||
|
size += len(chunk)
|
||||||
|
|
||||||
|
saved_files.append(dest)
|
||||||
|
logging.info(f"Upload: {filename} ({_format_size(size)})")
|
||||||
|
|
||||||
|
if not saved_files:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine gueltigen Videodateien hochgeladen"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
jobs = await queue_service.add_paths(saved_files, preset)
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
"message": f"{len(saved_files)} Datei(en) hochgeladen, {len(jobs)} Jobs erstellt",
|
||||||
|
"jobs": [{"id": j.id, "file": j.media.source_filename} for j in jobs],
|
||||||
|
})
|
||||||
|
|
||||||
|
# --- Routes registrieren ---
|
||||||
|
app.router.add_get("/api/browse", get_browse)
|
||||||
|
app.router.add_post("/api/upload", post_upload)
|
||||||
|
app.router.add_post("/api/convert", post_convert)
|
||||||
|
app.router.add_get("/api/jobs", get_jobs)
|
||||||
|
app.router.add_delete("/api/jobs/{job_id}", delete_job)
|
||||||
|
app.router.add_post("/api/jobs/{job_id}/cancel", post_cancel)
|
||||||
|
app.router.add_post("/api/jobs/{job_id}/retry", post_retry)
|
||||||
|
app.router.add_get("/api/settings", get_settings)
|
||||||
|
app.router.add_put("/api/settings", put_settings)
|
||||||
|
app.router.add_get("/api/presets", get_presets)
|
||||||
|
app.router.add_put("/api/presets/{preset_name}", put_preset)
|
||||||
|
app.router.add_get("/api/statistics", get_statistics)
|
||||||
|
app.router.add_get("/api/system", get_system_info)
|
||||||
|
app.router.add_get("/api/ws-config", get_ws_config)
|
||||||
|
|
||||||
|
|
||||||
|
def _deep_merge(base: dict, override: dict) -> None:
|
||||||
|
"""Rekursives Zusammenfuehren zweier Dicts"""
|
||||||
|
for key, value in override.items():
|
||||||
|
if key in base and isinstance(base[key], dict) and isinstance(value, dict):
|
||||||
|
_deep_merge(base[key], value)
|
||||||
|
else:
|
||||||
|
base[key] = value
|
||||||
998
app/routes/library_api.py
Normal file
998
app/routes/library_api.py
Normal file
|
|
@ -0,0 +1,998 @@
|
||||||
|
"""REST API Endpoints fuer die Video-Bibliothek"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from aiohttp import web
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.library import LibraryService
|
||||||
|
from app.services.tvdb import TVDBService
|
||||||
|
from app.services.queue import QueueService
|
||||||
|
from app.services.cleaner import CleanerService
|
||||||
|
from app.services.importer import ImporterService
|
||||||
|
|
||||||
|
|
||||||
|
def setup_library_routes(app: web.Application, config: Config,
|
||||||
|
library_service: LibraryService,
|
||||||
|
tvdb_service: TVDBService,
|
||||||
|
queue_service: QueueService,
|
||||||
|
cleaner_service: CleanerService = None,
|
||||||
|
importer_service: ImporterService = None
|
||||||
|
) -> None:
|
||||||
|
"""Registriert Bibliotheks-API-Routes"""
|
||||||
|
|
||||||
|
# === Scan-Pfade ===
|
||||||
|
|
||||||
|
async def get_paths(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/paths"""
|
||||||
|
paths = await library_service.get_paths()
|
||||||
|
return web.json_response({"paths": paths})
|
||||||
|
|
||||||
|
async def post_path(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/paths"""
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
name = data.get("name", "").strip()
|
||||||
|
path = data.get("path", "").strip()
|
||||||
|
media_type = data.get("media_type", "").strip()
|
||||||
|
|
||||||
|
if not name or not path:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Name und Pfad erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
if media_type not in ("series", "movie"):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "media_type muss 'series' oder 'movie' sein"},
|
||||||
|
status=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
path_id = await library_service.add_path(name, path, media_type)
|
||||||
|
if path_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"message": "Pfad hinzugefuegt", "id": path_id}
|
||||||
|
)
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Pfad konnte nicht hinzugefuegt werden"}, status=500
|
||||||
|
)
|
||||||
|
|
||||||
|
async def put_path(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/library/paths/{path_id}"""
|
||||||
|
path_id = int(request.match_info["path_id"])
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
success = await library_service.update_path(
|
||||||
|
path_id,
|
||||||
|
name=data.get("name"),
|
||||||
|
path=data.get("path"),
|
||||||
|
media_type=data.get("media_type"),
|
||||||
|
enabled=data.get("enabled"),
|
||||||
|
)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Pfad aktualisiert"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Pfad nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
async def delete_path(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/library/paths/{path_id}"""
|
||||||
|
path_id = int(request.match_info["path_id"])
|
||||||
|
success = await library_service.remove_path(path_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Pfad entfernt"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Pfad nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
# === Scanning ===
|
||||||
|
|
||||||
|
async def post_scan_all(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/scan - Alle Pfade scannen"""
|
||||||
|
asyncio.create_task(_run_scan_all())
|
||||||
|
return web.json_response({"message": "Scan gestartet"})
|
||||||
|
|
||||||
|
async def _run_scan_all():
|
||||||
|
result = await library_service.scan_all()
|
||||||
|
logging.info(f"Komplett-Scan Ergebnis: {result}")
|
||||||
|
|
||||||
|
async def post_scan_single(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/scan/{path_id}"""
|
||||||
|
path_id = int(request.match_info["path_id"])
|
||||||
|
asyncio.create_task(_run_scan_single(path_id))
|
||||||
|
return web.json_response({"message": "Scan gestartet"})
|
||||||
|
|
||||||
|
async def _run_scan_single(path_id: int):
|
||||||
|
result = await library_service.scan_single_path(path_id)
|
||||||
|
logging.info(f"Einzel-Scan Ergebnis: {result}")
|
||||||
|
|
||||||
|
# === Videos abfragen ===
|
||||||
|
|
||||||
|
async def get_videos(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/videos?filter-params..."""
|
||||||
|
filters = {}
|
||||||
|
for key in ("library_path_id", "media_type", "series_id",
|
||||||
|
"video_codec", "min_width", "max_width",
|
||||||
|
"container", "audio_lang", "audio_channels",
|
||||||
|
"has_subtitle", "is_10bit", "sort", "order",
|
||||||
|
"search"):
|
||||||
|
val = request.query.get(key)
|
||||||
|
if val:
|
||||||
|
filters[key] = val
|
||||||
|
|
||||||
|
page = int(request.query.get("page", 1))
|
||||||
|
limit = int(request.query.get("limit", 50))
|
||||||
|
|
||||||
|
result = await library_service.get_videos(filters, page, limit)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def get_movies(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/movies - Nur Filme (keine Serien)"""
|
||||||
|
filters = {}
|
||||||
|
for key in ("video_codec", "min_width", "max_width",
|
||||||
|
"container", "audio_lang", "audio_channels",
|
||||||
|
"is_10bit", "sort", "order", "search"):
|
||||||
|
val = request.query.get(key)
|
||||||
|
if val:
|
||||||
|
filters[key] = val
|
||||||
|
|
||||||
|
page = int(request.query.get("page", 1))
|
||||||
|
limit = int(request.query.get("limit", 50))
|
||||||
|
|
||||||
|
result = await library_service.get_movies(filters, page, limit)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
# === Serien ===
|
||||||
|
|
||||||
|
async def get_series(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/series"""
|
||||||
|
path_id = request.query.get("path_id")
|
||||||
|
if path_id:
|
||||||
|
path_id = int(path_id)
|
||||||
|
series = await library_service.get_series_list(path_id)
|
||||||
|
return web.json_response({"series": series})
|
||||||
|
|
||||||
|
async def get_series_detail(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/series/{series_id}"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if detail:
|
||||||
|
return web.json_response(detail)
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Serie nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
async def delete_series(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/library/series/{series_id}?delete_files=1"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
delete_files = request.query.get("delete_files") == "1"
|
||||||
|
result = await library_service.delete_series(
|
||||||
|
series_id, delete_files=delete_files
|
||||||
|
)
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=404)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def get_missing_episodes(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/series/{series_id}/missing"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
missing = await library_service.get_missing_episodes(series_id)
|
||||||
|
return web.json_response({"missing": missing})
|
||||||
|
|
||||||
|
# === TVDB ===
|
||||||
|
|
||||||
|
async def post_tvdb_match(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/series/{series_id}/tvdb-match"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
tvdb_id = data.get("tvdb_id")
|
||||||
|
if not tvdb_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "tvdb_id erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await tvdb_service.match_and_update_series(
|
||||||
|
series_id, int(tvdb_id), library_service
|
||||||
|
)
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=400)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def delete_tvdb_link(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/library/series/{series_id}/tvdb"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
success = await library_service.unlink_tvdb(series_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "TVDB-Zuordnung geloest"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Serie nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
async def post_tvdb_refresh(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/series/{series_id}/tvdb-refresh"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
# TVDB-ID aus DB holen
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if not detail or not detail.get("tvdb_id"):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine TVDB-Zuordnung vorhanden"}, status=400
|
||||||
|
)
|
||||||
|
result = await tvdb_service.match_and_update_series(
|
||||||
|
series_id, detail["tvdb_id"], library_service
|
||||||
|
)
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=400)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def get_tvdb_search(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/tvdb/search?q=Breaking+Bad"""
|
||||||
|
query = request.query.get("q", "").strip()
|
||||||
|
if not query:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Suchbegriff erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
if not tvdb_service.is_configured:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "TVDB nicht konfiguriert (API Key fehlt)"},
|
||||||
|
status=400,
|
||||||
|
)
|
||||||
|
results = await tvdb_service.search_series(query)
|
||||||
|
return web.json_response({"results": results})
|
||||||
|
|
||||||
|
# === TVDB Metadaten ===
|
||||||
|
|
||||||
|
async def get_series_cast(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/series/{series_id}/cast"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if not detail or not detail.get("tvdb_id"):
|
||||||
|
return web.json_response({"cast": []})
|
||||||
|
cast = await tvdb_service.get_series_characters(detail["tvdb_id"])
|
||||||
|
return web.json_response({"cast": cast})
|
||||||
|
|
||||||
|
async def get_series_artworks(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/series/{series_id}/artworks"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if not detail or not detail.get("tvdb_id"):
|
||||||
|
return web.json_response({"artworks": []})
|
||||||
|
artworks = await tvdb_service.get_series_artworks(detail["tvdb_id"])
|
||||||
|
return web.json_response({"artworks": artworks})
|
||||||
|
|
||||||
|
async def post_metadata_download(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/series/{series_id}/metadata-download"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if not detail:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Serie nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
if not detail.get("tvdb_id"):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine TVDB-Zuordnung"}, status=400
|
||||||
|
)
|
||||||
|
result = await tvdb_service.download_metadata(
|
||||||
|
series_id, detail["tvdb_id"], detail.get("folder_path", "")
|
||||||
|
)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_metadata_download_all(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/metadata-download-all"""
|
||||||
|
series_list = await library_service.get_series_list()
|
||||||
|
results = {"success": 0, "skipped": 0, "errors": 0}
|
||||||
|
for s in series_list:
|
||||||
|
if not s.get("tvdb_id"):
|
||||||
|
results["skipped"] += 1
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
await tvdb_service.download_metadata(
|
||||||
|
s["id"], s["tvdb_id"], s.get("folder_path", "")
|
||||||
|
)
|
||||||
|
results["success"] += 1
|
||||||
|
except Exception:
|
||||||
|
results["errors"] += 1
|
||||||
|
return web.json_response(results)
|
||||||
|
|
||||||
|
async def get_metadata_image(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/metadata/{series_id}/{filename}"""
|
||||||
|
series_id = int(request.match_info["series_id"])
|
||||||
|
filename = request.match_info["filename"]
|
||||||
|
detail = await library_service.get_series_detail(series_id)
|
||||||
|
if not detail or not detail.get("folder_path"):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
import os
|
||||||
|
file_path = os.path.join(
|
||||||
|
detail["folder_path"], ".metadata", filename
|
||||||
|
)
|
||||||
|
if not os.path.isfile(file_path):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Datei nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
return web.FileResponse(file_path)
|
||||||
|
|
||||||
|
# === Filme ===
|
||||||
|
|
||||||
|
async def get_movies_list(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/movies-list?path_id=X"""
|
||||||
|
path_id = request.query.get("path_id")
|
||||||
|
if path_id:
|
||||||
|
path_id = int(path_id)
|
||||||
|
movies = await library_service.get_movie_list(path_id)
|
||||||
|
return web.json_response({"movies": movies})
|
||||||
|
|
||||||
|
async def get_movie_detail(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/movies/{movie_id}"""
|
||||||
|
movie_id = int(request.match_info["movie_id"])
|
||||||
|
detail = await library_service.get_movie_detail(movie_id)
|
||||||
|
if detail:
|
||||||
|
return web.json_response(detail)
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Film nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
async def delete_movie(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/library/movies/{movie_id}?delete_files=1"""
|
||||||
|
movie_id = int(request.match_info["movie_id"])
|
||||||
|
delete_files = request.query.get("delete_files") == "1"
|
||||||
|
result = await library_service.delete_movie(
|
||||||
|
movie_id, delete_files=delete_files
|
||||||
|
)
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=404)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_movie_tvdb_match(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/movies/{movie_id}/tvdb-match"""
|
||||||
|
movie_id = int(request.match_info["movie_id"])
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
tvdb_id = data.get("tvdb_id")
|
||||||
|
if not tvdb_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "tvdb_id erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
result = await tvdb_service.match_and_update_movie(
|
||||||
|
movie_id, int(tvdb_id), library_service
|
||||||
|
)
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=400)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def delete_movie_tvdb_link(request: web.Request) -> web.Response:
|
||||||
|
"""DELETE /api/library/movies/{movie_id}/tvdb"""
|
||||||
|
movie_id = int(request.match_info["movie_id"])
|
||||||
|
success = await library_service.unlink_movie_tvdb(movie_id)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "TVDB-Zuordnung geloest"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Film nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_tvdb_movie_search(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/tvdb/search-movies?q=Inception"""
|
||||||
|
query = request.query.get("q", "").strip()
|
||||||
|
if not query:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Suchbegriff erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
if not tvdb_service.is_configured:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
||||||
|
)
|
||||||
|
results = await tvdb_service.search_movies(query)
|
||||||
|
return web.json_response({"results": results})
|
||||||
|
|
||||||
|
# === TVDB Auto-Match (Review-Modus) ===
|
||||||
|
|
||||||
|
_auto_match_state = {
|
||||||
|
"active": False,
|
||||||
|
"phase": "",
|
||||||
|
"done": 0,
|
||||||
|
"total": 0,
|
||||||
|
"current": "",
|
||||||
|
"suggestions": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def post_tvdb_auto_match(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/tvdb-auto-match?type=series|movies|all
|
||||||
|
Sammelt TVDB-Vorschlaege (matched NICHT automatisch)."""
|
||||||
|
if _auto_match_state["active"]:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Suche laeuft bereits"}, status=409
|
||||||
|
)
|
||||||
|
if not tvdb_service.is_configured:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
match_type = request.query.get("type", "all")
|
||||||
|
_auto_match_state.update({
|
||||||
|
"active": True,
|
||||||
|
"phase": "starting",
|
||||||
|
"done": 0, "total": 0,
|
||||||
|
"current": "",
|
||||||
|
"suggestions": None,
|
||||||
|
})
|
||||||
|
|
||||||
|
async def run_collect():
|
||||||
|
try:
|
||||||
|
async def progress_cb(done, total, name, count):
|
||||||
|
_auto_match_state.update({
|
||||||
|
"done": done,
|
||||||
|
"total": total,
|
||||||
|
"current": name,
|
||||||
|
})
|
||||||
|
|
||||||
|
all_suggestions = []
|
||||||
|
|
||||||
|
if match_type in ("series", "all"):
|
||||||
|
_auto_match_state["phase"] = "series"
|
||||||
|
_auto_match_state["done"] = 0
|
||||||
|
s = await tvdb_service.collect_suggestions(
|
||||||
|
"series", progress_cb
|
||||||
|
)
|
||||||
|
all_suggestions.extend(s)
|
||||||
|
|
||||||
|
if match_type in ("movies", "all"):
|
||||||
|
_auto_match_state["phase"] = "movies"
|
||||||
|
_auto_match_state["done"] = 0
|
||||||
|
s = await tvdb_service.collect_suggestions(
|
||||||
|
"movies", progress_cb
|
||||||
|
)
|
||||||
|
all_suggestions.extend(s)
|
||||||
|
|
||||||
|
_auto_match_state["suggestions"] = all_suggestions
|
||||||
|
_auto_match_state["phase"] = "done"
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"TVDB Vorschlaege sammeln fehlgeschlagen: {e}")
|
||||||
|
_auto_match_state["phase"] = "error"
|
||||||
|
_auto_match_state["suggestions"] = []
|
||||||
|
finally:
|
||||||
|
_auto_match_state["active"] = False
|
||||||
|
|
||||||
|
asyncio.create_task(run_collect())
|
||||||
|
return web.json_response({"message": "TVDB-Suche gestartet"})
|
||||||
|
|
||||||
|
async def get_tvdb_auto_match_status(
|
||||||
|
request: web.Request
|
||||||
|
) -> web.Response:
|
||||||
|
"""GET /api/library/tvdb-auto-match-status"""
|
||||||
|
# Vorschlaege nur bei "done" mitschicken
|
||||||
|
result = {
|
||||||
|
"active": _auto_match_state["active"],
|
||||||
|
"phase": _auto_match_state["phase"],
|
||||||
|
"done": _auto_match_state["done"],
|
||||||
|
"total": _auto_match_state["total"],
|
||||||
|
"current": _auto_match_state["current"],
|
||||||
|
}
|
||||||
|
if _auto_match_state["phase"] == "done":
|
||||||
|
result["suggestions"] = _auto_match_state["suggestions"]
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_tvdb_confirm(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/tvdb-confirm - Einzelnen Vorschlag bestaetigen.
|
||||||
|
Body: {id, type: 'series'|'movies', tvdb_id}"""
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
item_id = data.get("id")
|
||||||
|
media_type = data.get("type")
|
||||||
|
tvdb_id = data.get("tvdb_id")
|
||||||
|
|
||||||
|
if not item_id or not media_type or not tvdb_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "id, type und tvdb_id erforderlich"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
if media_type == "series":
|
||||||
|
result = await tvdb_service.match_and_update_series(
|
||||||
|
int(item_id), int(tvdb_id), library_service
|
||||||
|
)
|
||||||
|
elif media_type == "movies":
|
||||||
|
result = await tvdb_service.match_and_update_movie(
|
||||||
|
int(item_id), int(tvdb_id), library_service
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "type muss 'series' oder 'movies' sein"},
|
||||||
|
status=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.get("error"):
|
||||||
|
return web.json_response(result, status=400)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
# === TVDB Sprache ===
|
||||||
|
|
||||||
|
async def get_tvdb_language(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/tvdb/language"""
|
||||||
|
lang = config.settings.get("library", {}).get(
|
||||||
|
"tvdb_language", "deu"
|
||||||
|
)
|
||||||
|
return web.json_response({"language": lang})
|
||||||
|
|
||||||
|
async def put_tvdb_language(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/tvdb/language - TVDB-Sprache aendern"""
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
lang = data.get("language", "").strip()
|
||||||
|
if not lang or len(lang) != 3:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Sprache muss 3-Buchstaben-Code sein (z.B. deu)"},
|
||||||
|
status=400,
|
||||||
|
)
|
||||||
|
# In Config speichern
|
||||||
|
if "library" not in config.settings:
|
||||||
|
config.settings["library"] = {}
|
||||||
|
config.settings["library"]["tvdb_language"] = lang
|
||||||
|
config.save_settings()
|
||||||
|
return web.json_response(
|
||||||
|
{"message": f"TVDB-Sprache auf '{lang}' gesetzt"}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def post_tvdb_refresh_all_episodes(
|
||||||
|
request: web.Request,
|
||||||
|
) -> web.Response:
|
||||||
|
"""POST /api/library/tvdb-refresh-episodes
|
||||||
|
Laedt alle Episoden-Caches neu (z.B. nach Sprachswitch)."""
|
||||||
|
if not tvdb_service.is_configured:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
||||||
|
)
|
||||||
|
series_list = await library_service.get_series_list()
|
||||||
|
refreshed = 0
|
||||||
|
for s in series_list:
|
||||||
|
if not s.get("tvdb_id"):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
await tvdb_service.fetch_episodes(s["tvdb_id"])
|
||||||
|
await tvdb_service._update_episode_titles(
|
||||||
|
s["id"], s["tvdb_id"]
|
||||||
|
)
|
||||||
|
refreshed += 1
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return web.json_response({
|
||||||
|
"message": f"{refreshed} Serien-Episoden aktualisiert"
|
||||||
|
})
|
||||||
|
|
||||||
|
# === Ordner-Ansicht ===
|
||||||
|
|
||||||
|
async def get_browse(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/browse?path=..."""
|
||||||
|
path = request.query.get("path")
|
||||||
|
result = await library_service.browse_path(path or None)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
# === Duplikate ===
|
||||||
|
|
||||||
|
async def get_duplicates(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/duplicates"""
|
||||||
|
dupes = await library_service.find_duplicates()
|
||||||
|
return web.json_response({"duplicates": dupes})
|
||||||
|
|
||||||
|
# === Konvertierung aus Bibliothek ===
|
||||||
|
|
||||||
|
async def post_convert_video(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/videos/{video_id}/convert"""
|
||||||
|
video_id = int(request.match_info["video_id"])
|
||||||
|
|
||||||
|
pool = await library_service._get_pool()
|
||||||
|
if not pool:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine DB-Verbindung"}, status=500
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT file_path FROM library_videos WHERE id = %s",
|
||||||
|
(video_id,)
|
||||||
|
)
|
||||||
|
row = await cur.fetchone()
|
||||||
|
if not row:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Video nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return web.json_response({"error": str(e)}, status=500)
|
||||||
|
|
||||||
|
file_path = row[0]
|
||||||
|
preset = None
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
preset = data.get("preset")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
jobs = await queue_service.add_paths([file_path], preset)
|
||||||
|
if jobs:
|
||||||
|
return web.json_response({
|
||||||
|
"message": "Konvertierung gestartet",
|
||||||
|
"job_id": jobs[0].id,
|
||||||
|
})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Job konnte nicht erstellt werden"}, status=500
|
||||||
|
)
|
||||||
|
|
||||||
|
# === Statistiken ===
|
||||||
|
|
||||||
|
async def get_library_stats(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/stats"""
|
||||||
|
stats = await library_service.get_stats()
|
||||||
|
return web.json_response(stats)
|
||||||
|
|
||||||
|
# === Scan-Status ===
|
||||||
|
|
||||||
|
async def get_scan_status(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/scan-status"""
|
||||||
|
return web.json_response(library_service._scan_progress)
|
||||||
|
|
||||||
|
# === Clean-Funktion ===
|
||||||
|
|
||||||
|
async def get_clean_scan(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/clean/scan?path_id="""
|
||||||
|
if not cleaner_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
path_id = request.query.get("path_id")
|
||||||
|
result = await cleaner_service.scan_for_junk(
|
||||||
|
int(path_id) if path_id else None
|
||||||
|
)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_clean_delete(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/clean/delete"""
|
||||||
|
if not cleaner_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
files = data.get("files", [])
|
||||||
|
if not files:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine Dateien angegeben"}, status=400
|
||||||
|
)
|
||||||
|
result = await cleaner_service.delete_files(files)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_clean_empty_dirs(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/clean/empty-dirs"""
|
||||||
|
if not cleaner_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
data = {}
|
||||||
|
path_id = data.get("path_id")
|
||||||
|
count = await cleaner_service.delete_empty_dirs(
|
||||||
|
int(path_id) if path_id else None
|
||||||
|
)
|
||||||
|
return web.json_response({"deleted_dirs": count})
|
||||||
|
|
||||||
|
# === Filesystem-Browser (fuer Import) ===
|
||||||
|
|
||||||
|
async def get_browse_fs(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/browse-fs?path=... - Echten Filesystem-Browser"""
|
||||||
|
import os
|
||||||
|
from app.services.library import VIDEO_EXTENSIONS
|
||||||
|
|
||||||
|
path = request.query.get("path", "/mnt")
|
||||||
|
|
||||||
|
# Sicherheits-Check: Nur unter /mnt erlauben
|
||||||
|
real = os.path.realpath(path)
|
||||||
|
if not real.startswith("/mnt"):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Zugriff nur auf /mnt erlaubt"}, status=403
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.isdir(real):
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ordner nicht gefunden"}, status=404
|
||||||
|
)
|
||||||
|
|
||||||
|
folders = []
|
||||||
|
video_count = 0
|
||||||
|
video_size = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
entries = sorted(os.scandir(real), key=lambda e: e.name.lower())
|
||||||
|
for entry in entries:
|
||||||
|
if entry.name.startswith("."):
|
||||||
|
continue
|
||||||
|
if entry.is_dir(follow_symlinks=True):
|
||||||
|
# Schnelle Zaehlung: Videos im Unterordner
|
||||||
|
sub_vids = 0
|
||||||
|
try:
|
||||||
|
for sub in os.scandir(entry.path):
|
||||||
|
if sub.is_file():
|
||||||
|
ext = os.path.splitext(sub.name)[1].lower()
|
||||||
|
if ext in VIDEO_EXTENSIONS:
|
||||||
|
sub_vids += 1
|
||||||
|
except PermissionError:
|
||||||
|
pass
|
||||||
|
folders.append({
|
||||||
|
"name": entry.name,
|
||||||
|
"path": entry.path,
|
||||||
|
"video_count": sub_vids,
|
||||||
|
})
|
||||||
|
elif entry.is_file():
|
||||||
|
ext = os.path.splitext(entry.name)[1].lower()
|
||||||
|
if ext in VIDEO_EXTENSIONS:
|
||||||
|
video_count += 1
|
||||||
|
try:
|
||||||
|
video_size += entry.stat().st_size
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
except PermissionError:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine Berechtigung"}, status=403
|
||||||
|
)
|
||||||
|
|
||||||
|
# Breadcrumb
|
||||||
|
parts = real.split("/")
|
||||||
|
breadcrumb = []
|
||||||
|
for i in range(1, len(parts)):
|
||||||
|
crumb_path = "/".join(parts[:i + 1]) or "/"
|
||||||
|
breadcrumb.append({
|
||||||
|
"name": parts[i],
|
||||||
|
"path": crumb_path,
|
||||||
|
})
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
"current_path": real,
|
||||||
|
"folders": folders,
|
||||||
|
"video_count": video_count,
|
||||||
|
"video_size": video_size,
|
||||||
|
"breadcrumb": breadcrumb,
|
||||||
|
})
|
||||||
|
|
||||||
|
# === Import-Funktion ===
|
||||||
|
|
||||||
|
async def post_create_import(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/import"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
source = data.get("source_path", "").strip()
|
||||||
|
target_id = data.get("target_library_id")
|
||||||
|
mode = data.get("mode", "copy")
|
||||||
|
|
||||||
|
if not source or not target_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "source_path und target_library_id erforderlich"},
|
||||||
|
status=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
job_id = await importer_service.create_job(
|
||||||
|
source, int(target_id), mode
|
||||||
|
)
|
||||||
|
if job_id:
|
||||||
|
return web.json_response(
|
||||||
|
{"message": "Import-Job erstellt", "job_id": job_id}
|
||||||
|
)
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Keine Videos gefunden oder Fehler"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
async def post_analyze_import(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/import/{job_id}/analyze"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
result = await importer_service.analyze_job(job_id)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def get_import_status(request: web.Request) -> web.Response:
|
||||||
|
"""GET /api/library/import/{job_id}"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
result = await importer_service.get_job_status(job_id)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def post_execute_import(request: web.Request) -> web.Response:
|
||||||
|
"""POST /api/library/import/{job_id}/execute"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
job_id = int(request.match_info["job_id"])
|
||||||
|
result = await importer_service.execute_import(job_id)
|
||||||
|
return web.json_response(result)
|
||||||
|
|
||||||
|
async def put_import_item(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/library/import/items/{item_id}"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
item_id = int(request.match_info["item_id"])
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
success = await importer_service.update_item(item_id, **data)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Item aktualisiert"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Aktualisierung fehlgeschlagen"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
async def put_resolve_conflict(request: web.Request) -> web.Response:
|
||||||
|
"""PUT /api/library/import/items/{item_id}/resolve"""
|
||||||
|
if not importer_service:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
||||||
|
)
|
||||||
|
item_id = int(request.match_info["item_id"])
|
||||||
|
try:
|
||||||
|
data = await request.json()
|
||||||
|
except Exception:
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltiges JSON"}, status=400
|
||||||
|
)
|
||||||
|
action = data.get("action", "")
|
||||||
|
success = await importer_service.resolve_conflict(item_id, action)
|
||||||
|
if success:
|
||||||
|
return web.json_response({"message": "Konflikt geloest"})
|
||||||
|
return web.json_response(
|
||||||
|
{"error": "Ungueltige Aktion"}, status=400
|
||||||
|
)
|
||||||
|
|
||||||
|
# === Routes registrieren ===
|
||||||
|
# Pfade
|
||||||
|
app.router.add_get("/api/library/paths", get_paths)
|
||||||
|
app.router.add_post("/api/library/paths", post_path)
|
||||||
|
app.router.add_put("/api/library/paths/{path_id}", put_path)
|
||||||
|
app.router.add_delete("/api/library/paths/{path_id}", delete_path)
|
||||||
|
# Scanning
|
||||||
|
app.router.add_post("/api/library/scan", post_scan_all)
|
||||||
|
app.router.add_post("/api/library/scan/{path_id}", post_scan_single)
|
||||||
|
app.router.add_get("/api/library/scan-status", get_scan_status)
|
||||||
|
# Videos / Filme
|
||||||
|
app.router.add_get("/api/library/videos", get_videos)
|
||||||
|
app.router.add_get("/api/library/movies", get_movies)
|
||||||
|
# Serien
|
||||||
|
app.router.add_get("/api/library/series", get_series)
|
||||||
|
app.router.add_get("/api/library/series/{series_id}", get_series_detail)
|
||||||
|
app.router.add_delete(
|
||||||
|
"/api/library/series/{series_id}", delete_series
|
||||||
|
)
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/series/{series_id}/missing", get_missing_episodes
|
||||||
|
)
|
||||||
|
# TVDB
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/series/{series_id}/tvdb-match", post_tvdb_match
|
||||||
|
)
|
||||||
|
app.router.add_delete(
|
||||||
|
"/api/library/series/{series_id}/tvdb", delete_tvdb_link
|
||||||
|
)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/series/{series_id}/tvdb-refresh", post_tvdb_refresh
|
||||||
|
)
|
||||||
|
app.router.add_get("/api/tvdb/search", get_tvdb_search)
|
||||||
|
# TVDB Metadaten
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/series/{series_id}/cast", get_series_cast
|
||||||
|
)
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/series/{series_id}/artworks", get_series_artworks
|
||||||
|
)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/series/{series_id}/metadata-download",
|
||||||
|
post_metadata_download,
|
||||||
|
)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/metadata-download-all", post_metadata_download_all
|
||||||
|
)
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/metadata/{series_id}/{filename}", get_metadata_image
|
||||||
|
)
|
||||||
|
# Filme
|
||||||
|
app.router.add_get("/api/library/movies-list", get_movies_list)
|
||||||
|
app.router.add_get("/api/library/movies/{movie_id}", get_movie_detail)
|
||||||
|
app.router.add_delete("/api/library/movies/{movie_id}", delete_movie)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/movies/{movie_id}/tvdb-match", post_movie_tvdb_match
|
||||||
|
)
|
||||||
|
app.router.add_delete(
|
||||||
|
"/api/library/movies/{movie_id}/tvdb", delete_movie_tvdb_link
|
||||||
|
)
|
||||||
|
app.router.add_get("/api/tvdb/search-movies", get_tvdb_movie_search)
|
||||||
|
# Browse / Duplikate
|
||||||
|
app.router.add_get("/api/library/browse", get_browse)
|
||||||
|
app.router.add_get("/api/library/duplicates", get_duplicates)
|
||||||
|
# Konvertierung
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/videos/{video_id}/convert", post_convert_video
|
||||||
|
)
|
||||||
|
# Statistiken
|
||||||
|
app.router.add_get("/api/library/stats", get_library_stats)
|
||||||
|
# Clean
|
||||||
|
app.router.add_get("/api/library/clean/scan", get_clean_scan)
|
||||||
|
app.router.add_post("/api/library/clean/delete", post_clean_delete)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/clean/empty-dirs", post_clean_empty_dirs
|
||||||
|
)
|
||||||
|
# Filesystem-Browser
|
||||||
|
app.router.add_get("/api/library/browse-fs", get_browse_fs)
|
||||||
|
# Import
|
||||||
|
app.router.add_post("/api/library/import", post_create_import)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/import/{job_id}/analyze", post_analyze_import
|
||||||
|
)
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/import/{job_id}", get_import_status
|
||||||
|
)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/import/{job_id}/execute", post_execute_import
|
||||||
|
)
|
||||||
|
app.router.add_put(
|
||||||
|
"/api/library/import/items/{item_id}", put_import_item
|
||||||
|
)
|
||||||
|
app.router.add_put(
|
||||||
|
"/api/library/import/items/{item_id}/resolve", put_resolve_conflict
|
||||||
|
)
|
||||||
|
# TVDB Auto-Match (Review-Modus)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/tvdb-auto-match", post_tvdb_auto_match
|
||||||
|
)
|
||||||
|
app.router.add_get(
|
||||||
|
"/api/library/tvdb-auto-match-status", get_tvdb_auto_match_status
|
||||||
|
)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/tvdb-confirm", post_tvdb_confirm
|
||||||
|
)
|
||||||
|
# TVDB Sprache
|
||||||
|
app.router.add_get("/api/tvdb/language", get_tvdb_language)
|
||||||
|
app.router.add_put("/api/tvdb/language", put_tvdb_language)
|
||||||
|
app.router.add_post(
|
||||||
|
"/api/library/tvdb-refresh-episodes",
|
||||||
|
post_tvdb_refresh_all_episodes,
|
||||||
|
)
|
||||||
155
app/routes/pages.py
Normal file
155
app/routes/pages.py
Normal file
|
|
@ -0,0 +1,155 @@
|
||||||
|
"""Server-gerenderte Seiten mit Jinja2 + HTMX"""
|
||||||
|
import logging
|
||||||
|
from aiohttp import web
|
||||||
|
import aiohttp_jinja2
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.queue import QueueService
|
||||||
|
from app.services.encoder import EncoderService
|
||||||
|
from app.models.media import MediaFile
|
||||||
|
|
||||||
|
|
||||||
|
def setup_page_routes(app: web.Application, config: Config,
|
||||||
|
queue_service: QueueService) -> None:
|
||||||
|
"""Registriert Seiten-Routes"""
|
||||||
|
|
||||||
|
def _build_ws_url(request) -> str:
|
||||||
|
"""Baut WebSocket-URL fuer den Client"""
|
||||||
|
srv = config.server_config
|
||||||
|
ext_url = srv.get("external_url", "")
|
||||||
|
use_https = srv.get("use_https", False)
|
||||||
|
ws_path = srv.get("websocket_path", "/ws")
|
||||||
|
protocol = "wss" if use_https else "ws"
|
||||||
|
|
||||||
|
if ext_url:
|
||||||
|
return f"{protocol}://{ext_url}{ws_path}"
|
||||||
|
return f"{protocol}://{request.host}{ws_path}"
|
||||||
|
|
||||||
|
@aiohttp_jinja2.template("dashboard.html")
|
||||||
|
async def dashboard(request: web.Request) -> dict:
|
||||||
|
"""GET / - Dashboard"""
|
||||||
|
return {
|
||||||
|
"ws_url": _build_ws_url(request),
|
||||||
|
"active_jobs": queue_service.get_active_jobs().get("data_convert", {}),
|
||||||
|
"queue": queue_service.get_queue_state().get("data_queue", {}),
|
||||||
|
}
|
||||||
|
|
||||||
|
@aiohttp_jinja2.template("admin.html")
|
||||||
|
async def admin(request: web.Request) -> dict:
|
||||||
|
"""GET /admin - Einstellungsseite"""
|
||||||
|
gpu_available = EncoderService.detect_gpu_available()
|
||||||
|
gpu_devices = EncoderService.get_available_render_devices()
|
||||||
|
return {
|
||||||
|
"settings": config.settings,
|
||||||
|
"presets": config.presets,
|
||||||
|
"gpu_available": gpu_available,
|
||||||
|
"gpu_devices": gpu_devices,
|
||||||
|
}
|
||||||
|
|
||||||
|
@aiohttp_jinja2.template("library.html")
|
||||||
|
async def library(request: web.Request) -> dict:
|
||||||
|
"""GET /library - Bibliothek"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
@aiohttp_jinja2.template("statistics.html")
|
||||||
|
async def statistics(request: web.Request) -> dict:
|
||||||
|
"""GET /statistics - Statistik-Seite"""
|
||||||
|
entries = await queue_service.get_statistics(limit=50)
|
||||||
|
summary = await queue_service.get_statistics_summary()
|
||||||
|
return {
|
||||||
|
"entries": entries,
|
||||||
|
"summary": summary,
|
||||||
|
"format_size": MediaFile.format_size,
|
||||||
|
"format_time": MediaFile.format_time,
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- HTMX Partials ---
|
||||||
|
|
||||||
|
async def htmx_save_settings(request: web.Request) -> web.Response:
|
||||||
|
"""POST /htmx/settings - Settings via Formular speichern"""
|
||||||
|
data = await request.post()
|
||||||
|
|
||||||
|
# Formular-Daten in Settings-Struktur konvertieren
|
||||||
|
settings = config.settings
|
||||||
|
|
||||||
|
# Encoding
|
||||||
|
settings["encoding"]["mode"] = data.get("encoding_mode", "cpu")
|
||||||
|
settings["encoding"]["gpu_device"] = data.get("gpu_device",
|
||||||
|
"/dev/dri/renderD128")
|
||||||
|
settings["encoding"]["default_preset"] = data.get("default_preset",
|
||||||
|
"cpu_av1")
|
||||||
|
settings["encoding"]["max_parallel_jobs"] = int(
|
||||||
|
data.get("max_parallel_jobs", 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Files
|
||||||
|
settings["files"]["target_container"] = data.get("target_container", "webm")
|
||||||
|
settings["files"]["target_folder"] = data.get("target_folder", "same")
|
||||||
|
settings["files"]["delete_source"] = data.get("delete_source") == "on"
|
||||||
|
settings["files"]["recursive_scan"] = data.get("recursive_scan") == "on"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
settings["cleanup"]["enabled"] = data.get("cleanup_enabled") == "on"
|
||||||
|
cleanup_ext = data.get("cleanup_extensions", "")
|
||||||
|
if cleanup_ext:
|
||||||
|
settings["cleanup"]["delete_extensions"] = [
|
||||||
|
e.strip() for e in cleanup_ext.split(",") if e.strip()
|
||||||
|
]
|
||||||
|
exclude_pat = data.get("cleanup_exclude", "")
|
||||||
|
if exclude_pat:
|
||||||
|
settings["cleanup"]["exclude_patterns"] = [
|
||||||
|
p.strip() for p in exclude_pat.split(",") if p.strip()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Audio
|
||||||
|
audio_langs = data.get("audio_languages", "ger,eng,und")
|
||||||
|
settings["audio"]["languages"] = [
|
||||||
|
l.strip() for l in audio_langs.split(",") if l.strip()
|
||||||
|
]
|
||||||
|
settings["audio"]["default_codec"] = data.get("audio_codec", "libopus")
|
||||||
|
settings["audio"]["keep_channels"] = data.get("keep_channels") == "on"
|
||||||
|
|
||||||
|
# Subtitle
|
||||||
|
sub_langs = data.get("subtitle_languages", "ger,eng")
|
||||||
|
settings["subtitle"]["languages"] = [
|
||||||
|
l.strip() for l in sub_langs.split(",") if l.strip()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
settings["logging"]["level"] = data.get("log_level", "INFO")
|
||||||
|
|
||||||
|
# Bibliothek / TVDB
|
||||||
|
settings.setdefault("library", {})
|
||||||
|
settings["library"]["tvdb_api_key"] = data.get("tvdb_api_key", "")
|
||||||
|
settings["library"]["tvdb_pin"] = data.get("tvdb_pin", "")
|
||||||
|
settings["library"]["tvdb_language"] = data.get("tvdb_language", "deu")
|
||||||
|
|
||||||
|
config.save_settings()
|
||||||
|
logging.info("Settings via Admin-UI gespeichert")
|
||||||
|
|
||||||
|
# Erfolgs-HTML zurueckgeben (HTMX swap)
|
||||||
|
return web.Response(
|
||||||
|
text='<div class="toast success">Settings gespeichert!</div>',
|
||||||
|
content_type="text/html",
|
||||||
|
)
|
||||||
|
|
||||||
|
@aiohttp_jinja2.template("partials/stats_table.html")
|
||||||
|
async def htmx_stats_table(request: web.Request) -> dict:
|
||||||
|
"""GET /htmx/stats?page=1 - Paginierte Statistik"""
|
||||||
|
page = int(request.query.get("page", 1))
|
||||||
|
limit = 25
|
||||||
|
offset = (page - 1) * limit
|
||||||
|
entries = await queue_service.get_statistics(limit, offset)
|
||||||
|
return {
|
||||||
|
"entries": entries,
|
||||||
|
"page": page,
|
||||||
|
"format_size": MediaFile.format_size,
|
||||||
|
"format_time": MediaFile.format_time,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Routes registrieren
|
||||||
|
app.router.add_get("/", dashboard)
|
||||||
|
app.router.add_get("/library", library)
|
||||||
|
app.router.add_get("/admin", admin)
|
||||||
|
app.router.add_get("/statistics", statistics)
|
||||||
|
app.router.add_post("/htmx/settings", htmx_save_settings)
|
||||||
|
app.router.add_get("/htmx/stats", htmx_stats_table)
|
||||||
121
app/routes/ws.py
Normal file
121
app/routes/ws.py
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
"""WebSocket Handler fuer Echtzeit-Updates"""
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Optional, Set, TYPE_CHECKING
|
||||||
|
from aiohttp import web
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from app.services.queue import QueueService
|
||||||
|
|
||||||
|
|
||||||
|
class WebSocketManager:
|
||||||
|
"""Verwaltet WebSocket-Verbindungen und Broadcasts"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.clients: Set[web.WebSocketResponse] = set()
|
||||||
|
self.queue_service: Optional['QueueService'] = None
|
||||||
|
|
||||||
|
def set_queue_service(self, queue_service: 'QueueService') -> None:
|
||||||
|
"""Setzt Referenz auf QueueService"""
|
||||||
|
self.queue_service = queue_service
|
||||||
|
|
||||||
|
async def handle_websocket(self, request: web.Request) -> web.WebSocketResponse:
|
||||||
|
"""WebSocket-Endpoint Handler"""
|
||||||
|
ws = web.WebSocketResponse()
|
||||||
|
await ws.prepare(request)
|
||||||
|
self.clients.add(ws)
|
||||||
|
|
||||||
|
client_ip = request.remote
|
||||||
|
logging.info(f"WebSocket Client verbunden: {client_ip} "
|
||||||
|
f"({len(self.clients)} aktiv)")
|
||||||
|
|
||||||
|
# Initialen Status senden
|
||||||
|
if self.queue_service:
|
||||||
|
try:
|
||||||
|
await ws.send_json(self.queue_service.get_active_jobs())
|
||||||
|
await ws.send_json(self.queue_service.get_queue_state())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for msg in ws:
|
||||||
|
if msg.type == web.WSMsgType.TEXT:
|
||||||
|
try:
|
||||||
|
data = json.loads(msg.data)
|
||||||
|
await self._handle_message(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logging.warning(f"Ungueltige JSON-Nachricht: {msg.data[:100]}")
|
||||||
|
elif msg.type == web.WSMsgType.ERROR:
|
||||||
|
logging.error(f"WebSocket Fehler: {ws.exception()}")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"WebSocket Handler Fehler: {e}")
|
||||||
|
finally:
|
||||||
|
self.clients.discard(ws)
|
||||||
|
logging.info(f"WebSocket Client getrennt: {client_ip} "
|
||||||
|
f"({len(self.clients)} aktiv)")
|
||||||
|
|
||||||
|
return ws
|
||||||
|
|
||||||
|
async def broadcast(self, message: dict) -> None:
|
||||||
|
"""Sendet Nachricht an alle verbundenen Clients"""
|
||||||
|
if not self.clients:
|
||||||
|
return
|
||||||
|
|
||||||
|
msg_json = json.dumps(message)
|
||||||
|
dead_clients = set()
|
||||||
|
|
||||||
|
for client in self.clients.copy():
|
||||||
|
try:
|
||||||
|
await client.send_str(msg_json)
|
||||||
|
except Exception:
|
||||||
|
dead_clients.add(client)
|
||||||
|
|
||||||
|
self.clients -= dead_clients
|
||||||
|
|
||||||
|
async def broadcast_queue_update(self) -> None:
|
||||||
|
"""Sendet aktuelle Queue an alle Clients"""
|
||||||
|
if not self.queue_service:
|
||||||
|
return
|
||||||
|
await self.broadcast(self.queue_service.get_active_jobs())
|
||||||
|
await self.broadcast(self.queue_service.get_queue_state())
|
||||||
|
|
||||||
|
async def broadcast_progress(self, job) -> None:
|
||||||
|
"""Sendet Fortschritts-Update fuer einen Job"""
|
||||||
|
await self.broadcast({"data_flow": job.to_dict_progress()})
|
||||||
|
|
||||||
|
async def _handle_message(self, data: dict) -> None:
|
||||||
|
"""Verarbeitet eingehende WebSocket-Nachrichten"""
|
||||||
|
if not self.queue_service:
|
||||||
|
return
|
||||||
|
|
||||||
|
if "data_path" in data:
|
||||||
|
# Pfade empfangen (Legacy-Kompatibilitaet + neues Format)
|
||||||
|
paths = data["data_path"]
|
||||||
|
if isinstance(paths, str):
|
||||||
|
# Einzelner Pfad als String
|
||||||
|
paths = [paths]
|
||||||
|
elif isinstance(paths, dict) and "paths" in paths:
|
||||||
|
# Altes Format: {"paths": [...]}
|
||||||
|
paths = paths["paths"]
|
||||||
|
elif not isinstance(paths, list):
|
||||||
|
paths = [str(paths)]
|
||||||
|
|
||||||
|
await self.queue_service.add_paths(paths)
|
||||||
|
|
||||||
|
elif "data_command" in data:
|
||||||
|
cmd = data["data_command"]
|
||||||
|
cmd_type = cmd.get("cmd", "")
|
||||||
|
job_id = cmd.get("id")
|
||||||
|
|
||||||
|
if not job_id:
|
||||||
|
return
|
||||||
|
|
||||||
|
if cmd_type == "delete":
|
||||||
|
await self.queue_service.remove_job(int(job_id))
|
||||||
|
elif cmd_type == "cancel":
|
||||||
|
await self.queue_service.cancel_job(int(job_id))
|
||||||
|
elif cmd_type == "retry":
|
||||||
|
await self.queue_service.retry_job(int(job_id))
|
||||||
|
|
||||||
|
elif "data_message" in data:
|
||||||
|
logging.info(f"Client-Nachricht: {data['data_message']}")
|
||||||
156
app/server.py
Normal file
156
app/server.py
Normal file
|
|
@ -0,0 +1,156 @@
|
||||||
|
"""Haupt-Server: HTTP + WebSocket + Templates in einer aiohttp-App"""
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from aiohttp import web
|
||||||
|
import aiohttp_jinja2
|
||||||
|
import jinja2
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.queue import QueueService
|
||||||
|
from app.services.scanner import ScannerService
|
||||||
|
from app.services.encoder import EncoderService
|
||||||
|
from app.routes.ws import WebSocketManager
|
||||||
|
from app.services.library import LibraryService
|
||||||
|
from app.services.tvdb import TVDBService
|
||||||
|
from app.services.cleaner import CleanerService
|
||||||
|
from app.services.importer import ImporterService
|
||||||
|
from app.routes.api import setup_api_routes
|
||||||
|
from app.routes.library_api import setup_library_routes
|
||||||
|
from app.routes.pages import setup_page_routes
|
||||||
|
|
||||||
|
|
||||||
|
class VideoKonverterServer:
|
||||||
|
"""Haupt-Server - ein Port fuer HTTP, WebSocket und Admin-UI"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.config = Config()
|
||||||
|
self.config.setup_logging()
|
||||||
|
|
||||||
|
# Services
|
||||||
|
self.ws_manager = WebSocketManager()
|
||||||
|
self.scanner = ScannerService(self.config)
|
||||||
|
self.queue_service = QueueService(self.config, self.ws_manager)
|
||||||
|
self.ws_manager.set_queue_service(self.queue_service)
|
||||||
|
|
||||||
|
# Bibliothek-Services
|
||||||
|
self.library_service = LibraryService(self.config, self.ws_manager)
|
||||||
|
self.tvdb_service = TVDBService(self.config)
|
||||||
|
self.cleaner_service = CleanerService(self.config, self.library_service)
|
||||||
|
self.importer_service = ImporterService(
|
||||||
|
self.config, self.library_service, self.tvdb_service
|
||||||
|
)
|
||||||
|
|
||||||
|
# aiohttp App (50 GiB Upload-Limit fuer grosse Videodateien)
|
||||||
|
self.app = web.Application(client_max_size=50 * 1024 * 1024 * 1024)
|
||||||
|
self._setup_app()
|
||||||
|
|
||||||
|
def _setup_app(self) -> None:
|
||||||
|
"""Konfiguriert die aiohttp-Application"""
|
||||||
|
# Jinja2 Templates (request_processor macht request in Templates verfuegbar)
|
||||||
|
template_dir = Path(__file__).parent / "templates"
|
||||||
|
aiohttp_jinja2.setup(
|
||||||
|
self.app,
|
||||||
|
loader=jinja2.FileSystemLoader(str(template_dir)),
|
||||||
|
context_processors=[aiohttp_jinja2.request_processor],
|
||||||
|
)
|
||||||
|
|
||||||
|
# WebSocket Route
|
||||||
|
ws_path = self.config.server_config.get("websocket_path", "/ws")
|
||||||
|
self.app.router.add_get(ws_path, self.ws_manager.handle_websocket)
|
||||||
|
|
||||||
|
# API Routes
|
||||||
|
setup_api_routes(
|
||||||
|
self.app, self.config, self.queue_service, self.scanner
|
||||||
|
)
|
||||||
|
|
||||||
|
# Bibliothek API Routes
|
||||||
|
setup_library_routes(
|
||||||
|
self.app, self.config, self.library_service,
|
||||||
|
self.tvdb_service, self.queue_service,
|
||||||
|
self.cleaner_service, self.importer_service,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Seiten Routes
|
||||||
|
setup_page_routes(self.app, self.config, self.queue_service)
|
||||||
|
|
||||||
|
# Statische Dateien
|
||||||
|
static_dir = Path(__file__).parent / "static"
|
||||||
|
if static_dir.exists():
|
||||||
|
self.app.router.add_static(
|
||||||
|
"/static/", path=str(static_dir), name="static"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Startup/Shutdown Hooks
|
||||||
|
self.app.on_startup.append(self._on_startup)
|
||||||
|
self.app.on_shutdown.append(self._on_shutdown)
|
||||||
|
|
||||||
|
async def _on_startup(self, app: web.Application) -> None:
|
||||||
|
"""Server-Start: GPU pruefen, Queue starten"""
|
||||||
|
mode = self.config.encoding_mode
|
||||||
|
|
||||||
|
# Auto-Detection
|
||||||
|
if mode == "auto":
|
||||||
|
gpu_ok = EncoderService.detect_gpu_available()
|
||||||
|
if gpu_ok:
|
||||||
|
gpu_ok = await EncoderService.test_gpu_encoding(
|
||||||
|
self.config.gpu_device
|
||||||
|
)
|
||||||
|
if gpu_ok:
|
||||||
|
self.config.settings["encoding"]["mode"] = "gpu"
|
||||||
|
self.config.settings["encoding"]["default_preset"] = "gpu_av1"
|
||||||
|
logging.info(f"GPU erkannt ({self.config.gpu_device}), "
|
||||||
|
f"verwende GPU-Encoding")
|
||||||
|
else:
|
||||||
|
self.config.settings["encoding"]["mode"] = "cpu"
|
||||||
|
self.config.settings["encoding"]["default_preset"] = "cpu_av1"
|
||||||
|
logging.info("Keine GPU erkannt, verwende CPU-Encoding")
|
||||||
|
else:
|
||||||
|
logging.info(f"Encoding-Modus: {mode}")
|
||||||
|
|
||||||
|
# Queue starten
|
||||||
|
await self.queue_service.start()
|
||||||
|
|
||||||
|
# Bibliothek starten
|
||||||
|
await self.library_service.start()
|
||||||
|
|
||||||
|
# DB-Pool mit anderen Services teilen
|
||||||
|
if self.library_service._db_pool:
|
||||||
|
self.tvdb_service.set_db_pool(self.library_service._db_pool)
|
||||||
|
self.importer_service.set_db_pool(self.library_service._db_pool)
|
||||||
|
|
||||||
|
# Zusaetzliche DB-Tabellen erstellen
|
||||||
|
await self.tvdb_service.init_db()
|
||||||
|
await self.importer_service.init_db()
|
||||||
|
|
||||||
|
host = self.config.server_config.get("host", "0.0.0.0")
|
||||||
|
port = self.config.server_config.get("port", 8080)
|
||||||
|
logging.info(f"Server bereit auf http://{host}:{port}")
|
||||||
|
|
||||||
|
async def _on_shutdown(self, app: web.Application) -> None:
|
||||||
|
"""Server-Stop: Queue und Library stoppen"""
|
||||||
|
await self.queue_service.stop()
|
||||||
|
await self.library_service.stop()
|
||||||
|
logging.info("Server heruntergefahren")
|
||||||
|
|
||||||
|
async def run(self) -> None:
|
||||||
|
"""Startet den Server"""
|
||||||
|
host = self.config.server_config.get("host", "0.0.0.0")
|
||||||
|
port = self.config.server_config.get("port", 8080)
|
||||||
|
|
||||||
|
runner = web.AppRunner(self.app)
|
||||||
|
await runner.setup()
|
||||||
|
site = web.TCPSite(runner, host, port)
|
||||||
|
await site.start()
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"VideoKonverter Server laeuft auf http://{host}:{port}\n"
|
||||||
|
f" Dashboard: http://{host}:{port}/\n"
|
||||||
|
f" Bibliothek: http://{host}:{port}/library\n"
|
||||||
|
f" Admin: http://{host}:{port}/admin\n"
|
||||||
|
f" Statistik: http://{host}:{port}/statistics\n"
|
||||||
|
f" WebSocket: ws://{host}:{port}/ws\n"
|
||||||
|
f" API: http://{host}:{port}/api/convert (POST)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Endlos laufen bis Interrupt
|
||||||
|
await asyncio.Event().wait()
|
||||||
0
app/services/__init__.py
Normal file
0
app/services/__init__.py
Normal file
155
app/services/cleaner.py
Normal file
155
app/services/cleaner.py
Normal file
|
|
@ -0,0 +1,155 @@
|
||||||
|
"""Clean-Service: Findet und entfernt Nicht-Video-Dateien aus der Bibliothek"""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.library import LibraryService, VIDEO_EXTENSIONS
|
||||||
|
|
||||||
|
|
||||||
|
class CleanerService:
|
||||||
|
"""Scannt Library-Ordner nach Nicht-Video-Dateien und bietet Cleanup an"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config, library_service: LibraryService):
|
||||||
|
self.config = config
|
||||||
|
self.library = library_service
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _cleanup_config(self) -> dict:
|
||||||
|
return self.config.settings.get("cleanup", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _keep_extensions(self) -> set:
|
||||||
|
"""Extensions die behalten werden sollen"""
|
||||||
|
exts = self._cleanup_config.get("keep_extensions", [])
|
||||||
|
return {e.lower() for e in exts}
|
||||||
|
|
||||||
|
async def scan_for_junk(self, library_path_id: int = None) -> dict:
|
||||||
|
"""Scannt Library-Ordner nach Nicht-Video-Dateien.
|
||||||
|
Gibt zurueck: files, total_size, total_count"""
|
||||||
|
paths = await self.library.get_paths()
|
||||||
|
if library_path_id:
|
||||||
|
paths = [p for p in paths if p["id"] == library_path_id]
|
||||||
|
|
||||||
|
keep_exts = self._keep_extensions
|
||||||
|
junk_files = []
|
||||||
|
total_size = 0
|
||||||
|
|
||||||
|
for lib_path in paths:
|
||||||
|
if not lib_path.get("enabled"):
|
||||||
|
continue
|
||||||
|
base = lib_path["path"]
|
||||||
|
if not os.path.isdir(base):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(base):
|
||||||
|
# Versteckte Ordner ueberspringen
|
||||||
|
dirs[:] = [d for d in dirs if not d.startswith(".")]
|
||||||
|
for f in files:
|
||||||
|
ext = os.path.splitext(f)[1].lower()
|
||||||
|
# Video-Dateien und Keep-Extensions ueberspringen
|
||||||
|
if ext in VIDEO_EXTENSIONS:
|
||||||
|
continue
|
||||||
|
if ext in keep_exts:
|
||||||
|
continue
|
||||||
|
|
||||||
|
fp = os.path.join(root, f)
|
||||||
|
try:
|
||||||
|
size = os.path.getsize(fp)
|
||||||
|
except OSError:
|
||||||
|
size = 0
|
||||||
|
|
||||||
|
# Relativen Pfad berechnen
|
||||||
|
rel = os.path.relpath(fp, base)
|
||||||
|
# Serien-Ordner aus erstem Pfadteil
|
||||||
|
parts = rel.replace("\\", "/").split("/")
|
||||||
|
parent_series = parts[0] if len(parts) > 1 else ""
|
||||||
|
|
||||||
|
junk_files.append({
|
||||||
|
"path": fp,
|
||||||
|
"name": f,
|
||||||
|
"size": size,
|
||||||
|
"extension": ext,
|
||||||
|
"parent_series": parent_series,
|
||||||
|
"library_name": lib_path["name"],
|
||||||
|
})
|
||||||
|
total_size += size
|
||||||
|
|
||||||
|
return {
|
||||||
|
"files": junk_files,
|
||||||
|
"total_size": total_size,
|
||||||
|
"total_count": len(junk_files),
|
||||||
|
}
|
||||||
|
|
||||||
|
async def delete_files(self, file_paths: list[str]) -> dict:
|
||||||
|
"""Loescht die angegebenen Dateien"""
|
||||||
|
deleted = 0
|
||||||
|
failed = 0
|
||||||
|
freed = 0
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Sicherheitscheck: Nur Dateien in Library-Pfaden loeschen
|
||||||
|
paths = await self.library.get_paths()
|
||||||
|
allowed_prefixes = [p["path"] for p in paths]
|
||||||
|
|
||||||
|
for fp in file_paths:
|
||||||
|
# Pruefen ob Datei in erlaubtem Pfad liegt
|
||||||
|
is_allowed = any(
|
||||||
|
fp.startswith(prefix + "/") or fp == prefix
|
||||||
|
for prefix in allowed_prefixes
|
||||||
|
)
|
||||||
|
if not is_allowed:
|
||||||
|
errors.append(f"Nicht erlaubt: {fp}")
|
||||||
|
failed += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
size = os.path.getsize(fp)
|
||||||
|
os.remove(fp)
|
||||||
|
deleted += 1
|
||||||
|
freed += size
|
||||||
|
logging.info(f"Clean: Geloescht: {fp}")
|
||||||
|
except OSError as e:
|
||||||
|
errors.append(f"{fp}: {e}")
|
||||||
|
failed += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"deleted": deleted,
|
||||||
|
"failed": failed,
|
||||||
|
"freed_bytes": freed,
|
||||||
|
"errors": errors,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def delete_empty_dirs(self,
|
||||||
|
library_path_id: int = None) -> int:
|
||||||
|
"""Leere Unterordner loeschen (bottom-up)"""
|
||||||
|
paths = await self.library.get_paths()
|
||||||
|
if library_path_id:
|
||||||
|
paths = [p for p in paths if p["id"] == library_path_id]
|
||||||
|
|
||||||
|
removed = 0
|
||||||
|
for lib_path in paths:
|
||||||
|
if not lib_path.get("enabled"):
|
||||||
|
continue
|
||||||
|
base = lib_path["path"]
|
||||||
|
if not os.path.isdir(base):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Bottom-up: Tiefste Ordner zuerst
|
||||||
|
for root, dirs, files in os.walk(base, topdown=False):
|
||||||
|
# Nicht den Basis-Ordner selbst loeschen
|
||||||
|
if root == base:
|
||||||
|
continue
|
||||||
|
# Versteckte Ordner ueberspringen
|
||||||
|
if os.path.basename(root).startswith("."):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
if not os.listdir(root):
|
||||||
|
os.rmdir(root)
|
||||||
|
removed += 1
|
||||||
|
logging.info(f"Clean: Leerer Ordner entfernt: {root}")
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return removed
|
||||||
226
app/services/encoder.py
Normal file
226
app/services/encoder.py
Normal file
|
|
@ -0,0 +1,226 @@
|
||||||
|
"""ffmpeg Command Builder - GPU und CPU Encoding"""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import asyncio
|
||||||
|
from app.config import Config
|
||||||
|
from app.models.job import ConversionJob
|
||||||
|
|
||||||
|
|
||||||
|
class EncoderService:
|
||||||
|
"""Baut ffmpeg-Befehle basierend auf Preset und Media-Analyse"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def build_command(self, job: ConversionJob) -> list[str]:
|
||||||
|
"""
|
||||||
|
Baut den vollstaendigen ffmpeg-Befehl.
|
||||||
|
Beruecksichtigt GPU/CPU, Preset, Audio/Subtitle-Filter.
|
||||||
|
"""
|
||||||
|
preset = self.config.presets.get(job.preset_name, {})
|
||||||
|
if not preset:
|
||||||
|
logging.error(f"Preset '{job.preset_name}' nicht gefunden")
|
||||||
|
preset = self.config.default_preset
|
||||||
|
|
||||||
|
cmd = ["ffmpeg", "-y"]
|
||||||
|
|
||||||
|
# GPU-Initialisierung
|
||||||
|
cmd.extend(self._build_hw_init(preset))
|
||||||
|
|
||||||
|
# Input
|
||||||
|
cmd.extend(["-i", job.media.source_path])
|
||||||
|
|
||||||
|
# Video-Stream (erster Video-Stream)
|
||||||
|
cmd.extend(self._build_video_params(job, preset))
|
||||||
|
|
||||||
|
# Audio-Streams (alle passenden)
|
||||||
|
cmd.extend(self._build_audio_params(job))
|
||||||
|
|
||||||
|
# Subtitle-Streams (alle passenden)
|
||||||
|
cmd.extend(self._build_subtitle_params(job))
|
||||||
|
|
||||||
|
# Output
|
||||||
|
cmd.append(job.target_path)
|
||||||
|
|
||||||
|
job.ffmpeg_cmd = cmd
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def _build_hw_init(self, preset: dict) -> list[str]:
|
||||||
|
"""GPU-Initialisierung fuer VAAPI"""
|
||||||
|
if not preset.get("hw_init", False):
|
||||||
|
return []
|
||||||
|
|
||||||
|
device = self.config.gpu_device
|
||||||
|
return [
|
||||||
|
"-init_hw_device", f"vaapi=intel:{device}",
|
||||||
|
"-hwaccel", "vaapi",
|
||||||
|
"-hwaccel_device", "intel",
|
||||||
|
]
|
||||||
|
|
||||||
|
def _build_video_params(self, job: ConversionJob, preset: dict) -> list[str]:
|
||||||
|
"""Video-Parameter: Codec, Quality, Filter"""
|
||||||
|
cmd = ["-map", "0:v:0"] # Erster Video-Stream
|
||||||
|
|
||||||
|
# Video-Codec
|
||||||
|
codec = preset.get("video_codec", "libsvtav1")
|
||||||
|
cmd.extend(["-c:v", codec])
|
||||||
|
|
||||||
|
# Quality (CRF oder QP je nach Encoder)
|
||||||
|
quality_param = preset.get("quality_param", "crf")
|
||||||
|
quality_value = preset.get("quality_value", 30)
|
||||||
|
cmd.extend([f"-{quality_param}", str(quality_value)])
|
||||||
|
|
||||||
|
# GOP-Groesse
|
||||||
|
gop = preset.get("gop_size")
|
||||||
|
if gop:
|
||||||
|
cmd.extend(["-g", str(gop)])
|
||||||
|
|
||||||
|
# Speed-Preset (nur CPU-Encoder)
|
||||||
|
speed = preset.get("speed_preset")
|
||||||
|
if speed is not None:
|
||||||
|
cmd.extend(["-preset", str(speed)])
|
||||||
|
|
||||||
|
# Video-Filter
|
||||||
|
vf = preset.get("video_filter", "")
|
||||||
|
if not vf and preset.get("hw_init"):
|
||||||
|
# Auto-Detect Pixel-Format fuer GPU
|
||||||
|
vf = self._detect_gpu_filter(job)
|
||||||
|
if vf:
|
||||||
|
cmd.extend(["-vf", vf])
|
||||||
|
|
||||||
|
# Extra-Parameter
|
||||||
|
for key, value in preset.get("extra_params", {}).items():
|
||||||
|
cmd.extend([f"-{key}", str(value)])
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def _build_audio_params(self, job: ConversionJob) -> list[str]:
|
||||||
|
"""
|
||||||
|
Audio-Streams: Filtert nach Sprache, setzt Codec/Bitrate.
|
||||||
|
WICHTIG: Kanalanzahl wird beibehalten (kein Downmix)!
|
||||||
|
Surround (5.1/7.1) und Stereo (2.0/2.1) bleiben erhalten.
|
||||||
|
"""
|
||||||
|
audio_cfg = self.config.audio_config
|
||||||
|
languages = audio_cfg.get("languages", ["ger", "eng", "und"])
|
||||||
|
codec = audio_cfg.get("default_codec", "libopus")
|
||||||
|
bitrate_map = audio_cfg.get("bitrate_map", {2: "128k", 6: "320k", 8: "450k"})
|
||||||
|
default_bitrate = audio_cfg.get("default_bitrate", "192k")
|
||||||
|
keep_channels = audio_cfg.get("keep_channels", True)
|
||||||
|
|
||||||
|
cmd = []
|
||||||
|
audio_idx = 0
|
||||||
|
|
||||||
|
for stream in job.media.audio_streams:
|
||||||
|
# Sprachfilter: Wenn Sprache gesetzt, muss sie in der Liste sein
|
||||||
|
lang = stream.language
|
||||||
|
if lang and lang not in languages:
|
||||||
|
continue
|
||||||
|
|
||||||
|
cmd.extend(["-map", f"0:{stream.index}"])
|
||||||
|
|
||||||
|
if codec == "copy":
|
||||||
|
cmd.extend([f"-c:a:{audio_idx}", "copy"])
|
||||||
|
else:
|
||||||
|
cmd.extend([f"-c:a:{audio_idx}", codec])
|
||||||
|
|
||||||
|
# Bitrate nach Kanalanzahl (Surround bekommt mehr)
|
||||||
|
# Konvertiere bitrate_map Keys zu int (YAML laedt sie als int)
|
||||||
|
channels = stream.channels
|
||||||
|
bitrate = str(bitrate_map.get(channels, default_bitrate))
|
||||||
|
cmd.extend([f"-b:a:{audio_idx}", bitrate])
|
||||||
|
|
||||||
|
# Kanalanzahl beibehalten
|
||||||
|
if keep_channels:
|
||||||
|
cmd.extend([f"-ac:{audio_idx}", str(channels)])
|
||||||
|
|
||||||
|
audio_idx += 1
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def _build_subtitle_params(self, job: ConversionJob) -> list[str]:
|
||||||
|
"""Subtitle-Streams: Filtert nach Sprache und Blacklist"""
|
||||||
|
sub_cfg = self.config.subtitle_config
|
||||||
|
languages = sub_cfg.get("languages", ["ger", "eng"])
|
||||||
|
blacklist = sub_cfg.get("codec_blacklist", [])
|
||||||
|
|
||||||
|
cmd = []
|
||||||
|
for stream in job.media.subtitle_streams:
|
||||||
|
# Codec-Blacklist (Bild-basierte Untertitel)
|
||||||
|
if stream.codec_name in blacklist:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sprachfilter
|
||||||
|
lang = stream.language
|
||||||
|
if lang and lang not in languages:
|
||||||
|
continue
|
||||||
|
|
||||||
|
cmd.extend(["-map", f"0:{stream.index}"])
|
||||||
|
|
||||||
|
# Subtitle-Codec: Bei WebM nur webvtt moeglich
|
||||||
|
if job.target_container == "webm" and cmd:
|
||||||
|
cmd.extend(["-c:s", "webvtt"])
|
||||||
|
elif cmd:
|
||||||
|
cmd.extend(["-c:s", "copy"])
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
def _detect_gpu_filter(self, job: ConversionJob) -> str:
|
||||||
|
"""Erkennt Pixel-Format fuer GPU-Encoding"""
|
||||||
|
if job.media.is_10bit:
|
||||||
|
return "format=p010,hwupload"
|
||||||
|
return "format=nv12,hwupload"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def detect_gpu_available() -> bool:
|
||||||
|
"""Prueft ob GPU/VAAPI verfuegbar ist"""
|
||||||
|
# Pruefe ob /dev/dri existiert
|
||||||
|
if not os.path.exists("/dev/dri"):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Pruefe ob renderD* Devices vorhanden
|
||||||
|
devices = EncoderService.get_available_render_devices()
|
||||||
|
if not devices:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_available_render_devices() -> list[str]:
|
||||||
|
"""Listet verfuegbare /dev/dri/renderD* Geraete"""
|
||||||
|
dri_path = "/dev/dri"
|
||||||
|
if not os.path.exists(dri_path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
for entry in os.listdir(dri_path):
|
||||||
|
if entry.startswith("renderD"):
|
||||||
|
devices.append(f"{dri_path}/{entry}")
|
||||||
|
except PermissionError:
|
||||||
|
logging.warning("Kein Zugriff auf /dev/dri")
|
||||||
|
|
||||||
|
return sorted(devices)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def test_gpu_encoding(device: str = "/dev/dri/renderD128") -> bool:
|
||||||
|
"""Testet ob GPU-Encoding tatsaechlich funktioniert"""
|
||||||
|
cmd = [
|
||||||
|
"ffmpeg", "-y",
|
||||||
|
"-init_hw_device", f"vaapi=test:{device}",
|
||||||
|
"-f", "lavfi", "-i", "nullsrc=s=64x64:d=0.1",
|
||||||
|
"-vf", "format=nv12,hwupload",
|
||||||
|
"-c:v", "h264_vaapi",
|
||||||
|
"-frames:v", "1",
|
||||||
|
"-f", "null", "-",
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
process = await asyncio.create_subprocess_exec(
|
||||||
|
*cmd,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
await asyncio.wait_for(process.communicate(), timeout=10)
|
||||||
|
return process.returncode == 0
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"GPU-Test fehlgeschlagen: {e}")
|
||||||
|
return False
|
||||||
734
app/services/importer.py
Normal file
734
app/services/importer.py
Normal file
|
|
@ -0,0 +1,734 @@
|
||||||
|
"""Import-Service: Videos erkennen, TVDB-Match, umbenennen, einsortieren"""
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import aiomysql
|
||||||
|
|
||||||
|
from app.config import Config
|
||||||
|
from app.services.library import (
|
||||||
|
LibraryService, VIDEO_EXTENSIONS, RE_SXXEXX, RE_XXxXX
|
||||||
|
)
|
||||||
|
from app.services.tvdb import TVDBService
|
||||||
|
from app.services.probe import ProbeService
|
||||||
|
|
||||||
|
# Serienname aus Dateiname extrahieren (alles vor SxxExx)
|
||||||
|
RE_SERIES_FROM_NAME = re.compile(
|
||||||
|
r'^(.+?)[\s._-]+[Ss]\d{1,2}[Ee]\d{1,3}', re.IGNORECASE
|
||||||
|
)
|
||||||
|
RE_SERIES_FROM_XXx = re.compile(
|
||||||
|
r'^(.+?)[\s._-]+\d{1,2}x\d{2,3}', re.IGNORECASE
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImporterService:
|
||||||
|
"""Video-Import: Erkennung, TVDB-Matching, Umbenennung, Kopieren/Verschieben"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config, library_service: LibraryService,
|
||||||
|
tvdb_service: TVDBService):
|
||||||
|
self.config = config
|
||||||
|
self.library = library_service
|
||||||
|
self.tvdb = tvdb_service
|
||||||
|
self._db_pool: Optional[aiomysql.Pool] = None
|
||||||
|
|
||||||
|
def set_db_pool(self, pool: aiomysql.Pool) -> None:
|
||||||
|
self._db_pool = pool
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _naming_pattern(self) -> str:
|
||||||
|
return self.config.settings.get("library", {}).get(
|
||||||
|
"import_naming_pattern",
|
||||||
|
"{series} - S{season:02d}E{episode:02d} - {title}.{ext}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _season_pattern(self) -> str:
|
||||||
|
return self.config.settings.get("library", {}).get(
|
||||||
|
"import_season_pattern", "Season {season:02d}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# === DB-Tabellen erstellen ===
|
||||||
|
|
||||||
|
async def init_db(self) -> None:
|
||||||
|
"""Import-Tabellen erstellen"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS import_jobs (
|
||||||
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
source_path VARCHAR(1024) NOT NULL,
|
||||||
|
target_library_id INT NOT NULL,
|
||||||
|
status ENUM('pending','analyzing','ready',
|
||||||
|
'importing','done','error')
|
||||||
|
DEFAULT 'pending',
|
||||||
|
mode ENUM('copy','move') DEFAULT 'copy',
|
||||||
|
naming_pattern VARCHAR(256),
|
||||||
|
season_pattern VARCHAR(256),
|
||||||
|
total_files INT DEFAULT 0,
|
||||||
|
processed_files INT DEFAULT 0,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (target_library_id)
|
||||||
|
REFERENCES library_paths(id)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
|
||||||
|
""")
|
||||||
|
|
||||||
|
await cur.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS import_items (
|
||||||
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
import_job_id INT NOT NULL,
|
||||||
|
source_file VARCHAR(1024) NOT NULL,
|
||||||
|
source_size BIGINT NOT NULL DEFAULT 0,
|
||||||
|
source_duration DOUBLE NULL,
|
||||||
|
detected_series VARCHAR(256),
|
||||||
|
detected_season INT,
|
||||||
|
detected_episode INT,
|
||||||
|
tvdb_series_id INT NULL,
|
||||||
|
tvdb_series_name VARCHAR(256),
|
||||||
|
tvdb_episode_title VARCHAR(512),
|
||||||
|
target_path VARCHAR(1024),
|
||||||
|
target_filename VARCHAR(512),
|
||||||
|
status ENUM('pending','matched','conflict',
|
||||||
|
'skipped','done','error')
|
||||||
|
DEFAULT 'pending',
|
||||||
|
conflict_reason VARCHAR(512) NULL,
|
||||||
|
existing_file_path VARCHAR(1024) NULL,
|
||||||
|
existing_file_size BIGINT NULL,
|
||||||
|
user_action ENUM('overwrite','skip','rename') NULL,
|
||||||
|
FOREIGN KEY (import_job_id)
|
||||||
|
REFERENCES import_jobs(id) ON DELETE CASCADE,
|
||||||
|
INDEX idx_job (import_job_id)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
|
||||||
|
""")
|
||||||
|
logging.info("Import-Tabellen initialisiert")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import-Tabellen erstellen fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
# === Job-Verwaltung ===
|
||||||
|
|
||||||
|
async def create_job(self, source_path: str,
|
||||||
|
target_library_id: int,
|
||||||
|
mode: str = 'copy') -> Optional[int]:
|
||||||
|
"""Erstellt einen Import-Job und sucht Video-Dateien im Quellordner"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return None
|
||||||
|
if not os.path.isdir(source_path):
|
||||||
|
return None
|
||||||
|
if mode not in ('copy', 'move'):
|
||||||
|
mode = 'copy'
|
||||||
|
|
||||||
|
# Video-Dateien im Quellordner finden
|
||||||
|
videos = []
|
||||||
|
for root, dirs, files in os.walk(source_path):
|
||||||
|
dirs[:] = [d for d in dirs if not d.startswith(".")]
|
||||||
|
for f in sorted(files):
|
||||||
|
ext = os.path.splitext(f)[1].lower()
|
||||||
|
if ext in VIDEO_EXTENSIONS:
|
||||||
|
videos.append(os.path.join(root, f))
|
||||||
|
|
||||||
|
if not videos:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"INSERT INTO import_jobs "
|
||||||
|
"(source_path, target_library_id, status, mode, "
|
||||||
|
"naming_pattern, season_pattern, total_files) "
|
||||||
|
"VALUES (%s, %s, 'pending', %s, %s, %s, %s)",
|
||||||
|
(source_path, target_library_id, mode,
|
||||||
|
self._naming_pattern, self._season_pattern,
|
||||||
|
len(videos))
|
||||||
|
)
|
||||||
|
job_id = cur.lastrowid
|
||||||
|
|
||||||
|
# Items einfuegen
|
||||||
|
for vf in videos:
|
||||||
|
try:
|
||||||
|
size = os.path.getsize(vf)
|
||||||
|
except OSError:
|
||||||
|
size = 0
|
||||||
|
await cur.execute(
|
||||||
|
"INSERT INTO import_items "
|
||||||
|
"(import_job_id, source_file, source_size) "
|
||||||
|
"VALUES (%s, %s, %s)",
|
||||||
|
(job_id, vf, size)
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Import-Job erstellt: {job_id} "
|
||||||
|
f"({len(videos)} Videos aus {source_path})"
|
||||||
|
)
|
||||||
|
return job_id
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import-Job erstellen fehlgeschlagen: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def analyze_job(self, job_id: int) -> dict:
|
||||||
|
"""Analysiert alle Dateien: Erkennung + TVDB-Lookup + Konflikt-Check"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return {"error": "Keine DB-Verbindung"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
||||||
|
# Job laden
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_jobs WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
job = await cur.fetchone()
|
||||||
|
if not job:
|
||||||
|
return {"error": "Job nicht gefunden"}
|
||||||
|
|
||||||
|
# Ziel-Library laden
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM library_paths WHERE id = %s",
|
||||||
|
(job["target_library_id"],)
|
||||||
|
)
|
||||||
|
lib_path = await cur.fetchone()
|
||||||
|
if not lib_path:
|
||||||
|
return {"error": "Ziel-Library nicht gefunden"}
|
||||||
|
|
||||||
|
# Status auf analyzing
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_jobs SET status = 'analyzing' "
|
||||||
|
"WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Items laden
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_items "
|
||||||
|
"WHERE import_job_id = %s ORDER BY source_file",
|
||||||
|
(job_id,)
|
||||||
|
)
|
||||||
|
items = await cur.fetchall()
|
||||||
|
|
||||||
|
# Jedes Item analysieren
|
||||||
|
tvdb_cache = {} # Serienname -> TVDB-Info
|
||||||
|
for item in items:
|
||||||
|
await self._analyze_item(
|
||||||
|
item, lib_path, job, tvdb_cache
|
||||||
|
)
|
||||||
|
|
||||||
|
# Status auf ready
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_jobs SET status = 'ready' "
|
||||||
|
"WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
return await self.get_job_status(job_id)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import-Analyse fehlgeschlagen: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
# Mindestgroesse fuer "echte" Episoden (darunter = Sample/Trailer)
|
||||||
|
MIN_EPISODE_SIZE = 100 * 1024 * 1024 # 100 MiB
|
||||||
|
|
||||||
|
async def _analyze_item(self, item: dict, lib_path: dict,
|
||||||
|
job: dict, tvdb_cache: dict) -> None:
|
||||||
|
"""Einzelnes Item analysieren: Erkennung + TVDB + Konflikt"""
|
||||||
|
filename = os.path.basename(item["source_file"])
|
||||||
|
ext = os.path.splitext(filename)[1].lstrip(".")
|
||||||
|
|
||||||
|
# 0. Groessen-Check: Zu kleine Dateien als Sample/Trailer markieren
|
||||||
|
if item["source_size"] < self.MIN_EPISODE_SIZE:
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
UPDATE import_items SET
|
||||||
|
status = 'skipped',
|
||||||
|
conflict_reason = %s
|
||||||
|
WHERE id = %s
|
||||||
|
""", (
|
||||||
|
f"Vermutlich Sample/Trailer "
|
||||||
|
f"({self._fmt_size(item['source_size'])})",
|
||||||
|
item["id"],
|
||||||
|
))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
# 1. Serie/Staffel/Episode erkennen (Dateiname + Ordnername)
|
||||||
|
info = self._detect_series_info(item["source_file"])
|
||||||
|
series_name = info.get("series", "")
|
||||||
|
season = info.get("season")
|
||||||
|
episode = info.get("episode")
|
||||||
|
|
||||||
|
# 2. Dauer per ffprobe (fuer Konflikt-Check)
|
||||||
|
duration = None
|
||||||
|
try:
|
||||||
|
media = await ProbeService.analyze(item["source_file"])
|
||||||
|
if media:
|
||||||
|
duration = media.source_duration_sec
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 3. TVDB-Lookup (gecacht pro Serienname)
|
||||||
|
tvdb_id = None
|
||||||
|
tvdb_name = series_name
|
||||||
|
tvdb_ep_title = ""
|
||||||
|
if series_name and self.tvdb.is_configured:
|
||||||
|
if series_name.lower() not in tvdb_cache:
|
||||||
|
results = await self.tvdb.search_series(series_name)
|
||||||
|
if results:
|
||||||
|
tvdb_cache[series_name.lower()] = results[0]
|
||||||
|
else:
|
||||||
|
tvdb_cache[series_name.lower()] = None
|
||||||
|
|
||||||
|
cached = tvdb_cache.get(series_name.lower())
|
||||||
|
if cached:
|
||||||
|
tvdb_id = cached.get("tvdb_id")
|
||||||
|
tvdb_name = cached.get("name", series_name)
|
||||||
|
|
||||||
|
# Episodentitel aus TVDB
|
||||||
|
if tvdb_id and season and episode:
|
||||||
|
tvdb_ep_title = await self._get_episode_title(
|
||||||
|
int(tvdb_id), season, episode
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. Ziel-Pfad berechnen
|
||||||
|
pattern = job.get("naming_pattern") or self._naming_pattern
|
||||||
|
season_pattern = job.get("season_pattern") or self._season_pattern
|
||||||
|
target_dir, target_file = self._build_target(
|
||||||
|
tvdb_name or series_name or "Unbekannt",
|
||||||
|
season, episode,
|
||||||
|
tvdb_ep_title or "",
|
||||||
|
ext,
|
||||||
|
lib_path["path"],
|
||||||
|
pattern, season_pattern
|
||||||
|
)
|
||||||
|
target_path = os.path.join(target_dir, target_file)
|
||||||
|
|
||||||
|
# 5. Konflikt-Check
|
||||||
|
status = "matched" if series_name and season and episode else "pending"
|
||||||
|
conflict = None
|
||||||
|
existing_path = None
|
||||||
|
existing_size = None
|
||||||
|
|
||||||
|
if os.path.exists(target_path):
|
||||||
|
existing_path = target_path
|
||||||
|
existing_size = os.path.getsize(target_path)
|
||||||
|
source_size = item["source_size"]
|
||||||
|
|
||||||
|
# Groessen-Vergleich
|
||||||
|
if source_size and existing_size:
|
||||||
|
diff_pct = abs(source_size - existing_size) / max(
|
||||||
|
existing_size, 1
|
||||||
|
) * 100
|
||||||
|
if diff_pct > 20:
|
||||||
|
conflict = (
|
||||||
|
f"Datei existiert bereits "
|
||||||
|
f"(Quelle: {self._fmt_size(source_size)}, "
|
||||||
|
f"Ziel: {self._fmt_size(existing_size)}, "
|
||||||
|
f"Abweichung: {diff_pct:.0f}%)"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
conflict = "Datei existiert bereits (aehnliche Groesse)"
|
||||||
|
else:
|
||||||
|
conflict = "Datei existiert bereits"
|
||||||
|
status = "conflict"
|
||||||
|
|
||||||
|
# 6. In DB aktualisieren
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
UPDATE import_items SET
|
||||||
|
source_duration = %s,
|
||||||
|
detected_series = %s,
|
||||||
|
detected_season = %s,
|
||||||
|
detected_episode = %s,
|
||||||
|
tvdb_series_id = %s,
|
||||||
|
tvdb_series_name = %s,
|
||||||
|
tvdb_episode_title = %s,
|
||||||
|
target_path = %s,
|
||||||
|
target_filename = %s,
|
||||||
|
status = %s,
|
||||||
|
conflict_reason = %s,
|
||||||
|
existing_file_path = %s,
|
||||||
|
existing_file_size = %s
|
||||||
|
WHERE id = %s
|
||||||
|
""", (
|
||||||
|
duration, series_name, season, episode,
|
||||||
|
tvdb_id, tvdb_name, tvdb_ep_title,
|
||||||
|
target_dir, target_file, status,
|
||||||
|
conflict, existing_path, existing_size,
|
||||||
|
item["id"],
|
||||||
|
))
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import-Item analysieren fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
def _detect_series_info(self, file_path: str) -> dict:
|
||||||
|
"""Extrahiert Serienname, Staffel, Episode.
|
||||||
|
|
||||||
|
Versucht zuerst den Dateinamen, dann den uebergeordneten
|
||||||
|
Ordnernamen als Fallback. Der Ordnername ist oft zuverlaessiger
|
||||||
|
bei Release-Gruppen-Prefixes (z.B. 'tlr-24.s07e01.mkv' vs
|
||||||
|
Ordner '24.S07E01.German.DL.1080p.Bluray.x264-TLR').
|
||||||
|
"""
|
||||||
|
filename = os.path.basename(file_path)
|
||||||
|
parent_dir = os.path.basename(os.path.dirname(file_path))
|
||||||
|
|
||||||
|
# Beide Quellen versuchen
|
||||||
|
info_file = self._parse_name(filename)
|
||||||
|
info_dir = self._parse_name(parent_dir)
|
||||||
|
|
||||||
|
# Strategie: Ordnername bevorzugen bei Scene-Releases.
|
||||||
|
# Scene-Ordner: "24.S07E01.German.DL.1080p-TLR" -> Serie="24"
|
||||||
|
# Scene-Datei: "tlr-24.s07e01.1080p.mkv" -> Serie="tlr-24"
|
||||||
|
# Ordnername hat Serienname vorne, Dateiname oft Release-Tag vorne
|
||||||
|
|
||||||
|
# Ordnername hat S/E -> bevorzugen (hat meist korrekten Seriennamen)
|
||||||
|
if info_dir.get("season") and info_dir.get("episode"):
|
||||||
|
if info_dir.get("series"):
|
||||||
|
return info_dir
|
||||||
|
# Ordner hat S/E aber keinen Namen -> Dateiname nehmen
|
||||||
|
if info_file.get("series"):
|
||||||
|
info_dir["series"] = info_file["series"]
|
||||||
|
return info_dir
|
||||||
|
|
||||||
|
# Dateiname hat S/E
|
||||||
|
if info_file.get("season") and info_file.get("episode"):
|
||||||
|
# Ordner-Serienname als Fallback wenn Datei keinen hat
|
||||||
|
if not info_file.get("series") and info_dir.get("series"):
|
||||||
|
info_file["series"] = info_dir["series"]
|
||||||
|
return info_file
|
||||||
|
|
||||||
|
return info_file
|
||||||
|
|
||||||
|
def _parse_name(self, name: str) -> dict:
|
||||||
|
"""Extrahiert Serienname, Staffel, Episode aus einem Namen"""
|
||||||
|
result = {"series": "", "season": None, "episode": None}
|
||||||
|
name_no_ext = os.path.splitext(name)[0]
|
||||||
|
|
||||||
|
# S01E02 Format
|
||||||
|
m = RE_SXXEXX.search(name)
|
||||||
|
if m:
|
||||||
|
result["season"] = int(m.group(1))
|
||||||
|
result["episode"] = int(m.group(2))
|
||||||
|
sm = RE_SERIES_FROM_NAME.match(name_no_ext)
|
||||||
|
if sm:
|
||||||
|
result["series"] = self._clean_name(sm.group(1))
|
||||||
|
return result
|
||||||
|
|
||||||
|
# 1x02 Format
|
||||||
|
m = RE_XXxXX.search(name)
|
||||||
|
if m:
|
||||||
|
result["season"] = int(m.group(1))
|
||||||
|
result["episode"] = int(m.group(2))
|
||||||
|
sm = RE_SERIES_FROM_XXx.match(name_no_ext)
|
||||||
|
if sm:
|
||||||
|
result["series"] = self._clean_name(sm.group(1))
|
||||||
|
return result
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _clean_name(name: str) -> str:
|
||||||
|
"""Bereinigt Seriennamen: Punkte/Underscores durch Leerzeichen"""
|
||||||
|
name = name.replace(".", " ").replace("_", " ")
|
||||||
|
# Mehrfach-Leerzeichen reduzieren
|
||||||
|
name = re.sub(r'\s+', ' ', name).strip()
|
||||||
|
# Trailing Bindestriche entfernen
|
||||||
|
name = name.rstrip(" -")
|
||||||
|
return name
|
||||||
|
|
||||||
|
def _build_target(self, series: str, season: Optional[int],
|
||||||
|
episode: Optional[int], title: str, ext: str,
|
||||||
|
lib_path: str, pattern: str,
|
||||||
|
season_pattern: str) -> tuple[str, str]:
|
||||||
|
"""Baut Ziel-Ordner und Dateiname nach Pattern"""
|
||||||
|
s = season or 1
|
||||||
|
e = episode or 0
|
||||||
|
|
||||||
|
# Season-Ordner
|
||||||
|
season_dir = season_pattern.format(season=s)
|
||||||
|
|
||||||
|
# Dateiname
|
||||||
|
try:
|
||||||
|
filename = pattern.format(
|
||||||
|
series=series, season=s, episode=e,
|
||||||
|
title=title or "Unbekannt", ext=ext
|
||||||
|
)
|
||||||
|
except (KeyError, ValueError):
|
||||||
|
filename = f"{series} - S{s:02d}E{e:02d} - {title or 'Unbekannt'}.{ext}"
|
||||||
|
|
||||||
|
# Ungueltige Zeichen entfernen
|
||||||
|
for ch in ['<', '>', ':', '"', '|', '?', '*']:
|
||||||
|
filename = filename.replace(ch, '')
|
||||||
|
series = series.replace(ch, '')
|
||||||
|
|
||||||
|
target_dir = os.path.join(lib_path, series, season_dir)
|
||||||
|
return target_dir, filename
|
||||||
|
|
||||||
|
async def _get_episode_title(self, tvdb_id: int,
|
||||||
|
season: int, episode: int) -> str:
|
||||||
|
"""Episodentitel aus TVDB-Cache oder API holen"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return ""
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
# Zuerst Cache pruefen
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT episode_name FROM tvdb_episode_cache "
|
||||||
|
"WHERE series_tvdb_id = %s "
|
||||||
|
"AND season_number = %s "
|
||||||
|
"AND episode_number = %s",
|
||||||
|
(tvdb_id, season, episode)
|
||||||
|
)
|
||||||
|
row = await cur.fetchone()
|
||||||
|
if row and row[0]:
|
||||||
|
return row[0]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Cache leer -> Episoden von TVDB laden
|
||||||
|
episodes = await self.tvdb.fetch_episodes(tvdb_id)
|
||||||
|
for ep in episodes:
|
||||||
|
if ep["season_number"] == season and ep["episode_number"] == episode:
|
||||||
|
return ep.get("episode_name", "")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
async def execute_import(self, job_id: int) -> dict:
|
||||||
|
"""Fuehrt den Import aus (Kopieren/Verschieben)"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return {"error": "Keine DB-Verbindung"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_jobs WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
job = await cur.fetchone()
|
||||||
|
if not job:
|
||||||
|
return {"error": "Job nicht gefunden"}
|
||||||
|
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_jobs SET status = 'importing' "
|
||||||
|
"WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Nur Items mit status matched oder conflict+overwrite
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_items "
|
||||||
|
"WHERE import_job_id = %s "
|
||||||
|
"AND (status = 'matched' "
|
||||||
|
" OR (status = 'conflict' "
|
||||||
|
" AND user_action = 'overwrite'))",
|
||||||
|
(job_id,)
|
||||||
|
)
|
||||||
|
items = await cur.fetchall()
|
||||||
|
|
||||||
|
done = 0
|
||||||
|
errors = 0
|
||||||
|
mode = job.get("mode", "copy")
|
||||||
|
|
||||||
|
for item in items:
|
||||||
|
ok = await self._process_item(item, mode)
|
||||||
|
if ok:
|
||||||
|
done += 1
|
||||||
|
else:
|
||||||
|
errors += 1
|
||||||
|
|
||||||
|
# Progress updaten
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_jobs SET processed_files = %s "
|
||||||
|
"WHERE id = %s", (done + errors, job_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Job abschliessen
|
||||||
|
status = "done" if errors == 0 else "error"
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_jobs SET status = %s "
|
||||||
|
"WHERE id = %s", (status, job_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
return {"done": done, "errors": errors}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import ausfuehren fehlgeschlagen: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
async def _process_item(self, item: dict, mode: str) -> bool:
|
||||||
|
"""Einzelnes Item importieren (kopieren/verschieben)"""
|
||||||
|
src = item["source_file"]
|
||||||
|
target_dir = item["target_path"]
|
||||||
|
target_file = item["target_filename"]
|
||||||
|
|
||||||
|
if not target_dir or not target_file:
|
||||||
|
await self._update_item_status(item["id"], "error")
|
||||||
|
return False
|
||||||
|
|
||||||
|
target = os.path.join(target_dir, target_file)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Zielordner erstellen
|
||||||
|
os.makedirs(target_dir, exist_ok=True)
|
||||||
|
|
||||||
|
if mode == "move":
|
||||||
|
shutil.move(src, target)
|
||||||
|
else:
|
||||||
|
shutil.copy2(src, target)
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Import: {os.path.basename(src)} -> {target}"
|
||||||
|
)
|
||||||
|
await self._update_item_status(item["id"], "done")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import fehlgeschlagen: {src}: {e}")
|
||||||
|
await self._update_item_status(item["id"], "error")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def _update_item_status(self, item_id: int,
|
||||||
|
status: str) -> None:
|
||||||
|
if not self._db_pool:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_items SET status = %s "
|
||||||
|
"WHERE id = %s", (status, item_id)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def resolve_conflict(self, item_id: int,
|
||||||
|
action: str) -> bool:
|
||||||
|
"""Konflikt loesen: overwrite, skip, rename"""
|
||||||
|
if not self._db_pool or action not in ('overwrite', 'skip', 'rename'):
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
if action == 'skip':
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_items SET status = 'skipped', "
|
||||||
|
"user_action = 'skip' WHERE id = %s",
|
||||||
|
(item_id,)
|
||||||
|
)
|
||||||
|
elif action == 'rename':
|
||||||
|
# Dateiname mit Suffix versehen
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT target_filename FROM import_items "
|
||||||
|
"WHERE id = %s", (item_id,)
|
||||||
|
)
|
||||||
|
row = await cur.fetchone()
|
||||||
|
if row and row[0]:
|
||||||
|
name, ext = os.path.splitext(row[0])
|
||||||
|
new_name = f"{name}_neu{ext}"
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_items SET "
|
||||||
|
"target_filename = %s, "
|
||||||
|
"status = 'matched', "
|
||||||
|
"user_action = 'rename' "
|
||||||
|
"WHERE id = %s",
|
||||||
|
(new_name, item_id)
|
||||||
|
)
|
||||||
|
else: # overwrite
|
||||||
|
await cur.execute(
|
||||||
|
"UPDATE import_items SET user_action = 'overwrite' "
|
||||||
|
"WHERE id = %s", (item_id,)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Konflikt loesen fehlgeschlagen: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def update_item(self, item_id: int, **kwargs) -> bool:
|
||||||
|
"""Manuelle Korrektur eines Items"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return False
|
||||||
|
allowed = {
|
||||||
|
'detected_series', 'detected_season', 'detected_episode',
|
||||||
|
'tvdb_series_id', 'tvdb_series_name', 'tvdb_episode_title',
|
||||||
|
'target_path', 'target_filename', 'status'
|
||||||
|
}
|
||||||
|
updates = []
|
||||||
|
params = []
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
if k in allowed:
|
||||||
|
updates.append(f"{k} = %s")
|
||||||
|
params.append(v)
|
||||||
|
if not updates:
|
||||||
|
return False
|
||||||
|
params.append(item_id)
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute(
|
||||||
|
f"UPDATE import_items SET {', '.join(updates)} "
|
||||||
|
f"WHERE id = %s", params
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Import-Item aktualisieren fehlgeschlagen: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def get_job_status(self, job_id: int) -> dict:
|
||||||
|
"""Status eines Import-Jobs mit allen Items"""
|
||||||
|
if not self._db_pool:
|
||||||
|
return {"error": "Keine DB-Verbindung"}
|
||||||
|
try:
|
||||||
|
async with self._db_pool.acquire() as conn:
|
||||||
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_jobs WHERE id = %s", (job_id,)
|
||||||
|
)
|
||||||
|
job = await cur.fetchone()
|
||||||
|
if not job:
|
||||||
|
return {"error": "Job nicht gefunden"}
|
||||||
|
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM import_items "
|
||||||
|
"WHERE import_job_id = %s ORDER BY source_file",
|
||||||
|
(job_id,)
|
||||||
|
)
|
||||||
|
items = await cur.fetchall()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"job": self._serialize(job),
|
||||||
|
"items": [self._serialize(i) for i in items],
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _serialize(row: dict) -> dict:
|
||||||
|
"""Dict JSON-kompatibel machen"""
|
||||||
|
result = {}
|
||||||
|
for k, v in row.items():
|
||||||
|
if hasattr(v, "isoformat"):
|
||||||
|
result[k] = str(v)
|
||||||
|
else:
|
||||||
|
result[k] = v
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fmt_size(b: int) -> str:
|
||||||
|
"""Bytes menschenlesbar"""
|
||||||
|
for u in ("B", "KiB", "MiB", "GiB"):
|
||||||
|
if b < 1024:
|
||||||
|
return f"{b:.1f} {u}"
|
||||||
|
b /= 1024
|
||||||
|
return f"{b:.1f} TiB"
|
||||||
1747
app/services/library.py
Normal file
1747
app/services/library.py
Normal file
File diff suppressed because it is too large
Load diff
177
app/services/probe.py
Normal file
177
app/services/probe.py
Normal file
|
|
@ -0,0 +1,177 @@
|
||||||
|
"""Asynchrone ffprobe Media-Analyse"""
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import Optional
|
||||||
|
from app.models.media import MediaFile, VideoStream, AudioStream, SubtitleStream
|
||||||
|
|
||||||
|
|
||||||
|
class ProbeService:
|
||||||
|
"""ffprobe-basierte Media-Analyse - vollstaendig asynchron"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def analyze(file_path: str) -> Optional[MediaFile]:
|
||||||
|
"""
|
||||||
|
Analysiert eine Mediendatei mit ffprobe.
|
||||||
|
Fuehrt 3 Aufrufe parallel aus (Video/Audio/Subtitle).
|
||||||
|
Gibt None zurueck bei Fehler.
|
||||||
|
"""
|
||||||
|
if not os.path.exists(file_path):
|
||||||
|
logging.error(f"Datei nicht gefunden: {file_path}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Alle drei Stream-Typen parallel abfragen
|
||||||
|
video_task = ProbeService._probe_streams(file_path, "v")
|
||||||
|
audio_task = ProbeService._probe_streams(file_path, "a")
|
||||||
|
subtitle_task = ProbeService._probe_streams(file_path, "s")
|
||||||
|
|
||||||
|
video_data, audio_data, subtitle_data = await asyncio.gather(
|
||||||
|
video_task, audio_task, subtitle_task
|
||||||
|
)
|
||||||
|
|
||||||
|
# Streams parsen
|
||||||
|
video_streams = ProbeService._parse_video_streams(video_data)
|
||||||
|
audio_streams = ProbeService._parse_audio_streams(audio_data)
|
||||||
|
subtitle_streams = ProbeService._parse_subtitle_streams(subtitle_data)
|
||||||
|
|
||||||
|
# Format-Informationen aus Video-Abfrage
|
||||||
|
size_bytes, duration_sec, bitrate = ProbeService._parse_format(video_data)
|
||||||
|
|
||||||
|
media = MediaFile(
|
||||||
|
source_path=file_path,
|
||||||
|
source_size_bytes=size_bytes,
|
||||||
|
source_duration_sec=duration_sec,
|
||||||
|
source_bitrate=bitrate,
|
||||||
|
video_streams=video_streams,
|
||||||
|
audio_streams=audio_streams,
|
||||||
|
subtitle_streams=subtitle_streams,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Analysiert: {media.source_filename} "
|
||||||
|
f"({media.source_size_human[0]} {media.source_size_human[1]}, "
|
||||||
|
f"{MediaFile.format_time(duration_sec)}, "
|
||||||
|
f"{len(video_streams)}V/{len(audio_streams)}A/{len(subtitle_streams)}S)"
|
||||||
|
)
|
||||||
|
return media
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"ffprobe Analyse fehlgeschlagen fuer {file_path}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _probe_streams(file_path: str, stream_type: str) -> dict:
|
||||||
|
"""
|
||||||
|
Einzelner ffprobe-Aufruf (async).
|
||||||
|
stream_type: 'v' (Video), 'a' (Audio), 's' (Subtitle)
|
||||||
|
"""
|
||||||
|
command = [
|
||||||
|
"ffprobe", "-v", "error",
|
||||||
|
"-select_streams", stream_type,
|
||||||
|
"-show_entries",
|
||||||
|
"stream=index,channels,codec_name,codec_type,pix_fmt,level,"
|
||||||
|
"r_frame_rate,bit_rate,sample_rate,width,height"
|
||||||
|
":stream_tags=language",
|
||||||
|
"-show_entries",
|
||||||
|
"format=size,bit_rate,nb_streams,duration",
|
||||||
|
"-of", "json",
|
||||||
|
file_path,
|
||||||
|
]
|
||||||
|
|
||||||
|
process = await asyncio.create_subprocess_exec(
|
||||||
|
*command,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
stdout, stderr = await process.communicate()
|
||||||
|
|
||||||
|
if process.returncode != 0:
|
||||||
|
logging.warning(
|
||||||
|
f"ffprobe Fehler (stream={stream_type}): "
|
||||||
|
f"{stderr.decode(errors='replace').strip()}"
|
||||||
|
)
|
||||||
|
return {"streams": [], "format": {}}
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.loads(stdout.decode())
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logging.error(f"ffprobe JSON-Parsing fehlgeschlagen (stream={stream_type})")
|
||||||
|
return {"streams": [], "format": {}}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_video_streams(data: dict) -> list[VideoStream]:
|
||||||
|
"""Parst ffprobe JSON in VideoStream-Objekte"""
|
||||||
|
streams = []
|
||||||
|
for s in data.get("streams", []):
|
||||||
|
# Framerate berechnen (z.B. "24000/1001" -> 23.976)
|
||||||
|
fr_str = s.get("r_frame_rate", "0/1")
|
||||||
|
parts = fr_str.split("/")
|
||||||
|
if len(parts) == 2 and int(parts[1]) > 0:
|
||||||
|
frame_rate = round(int(parts[0]) / int(parts[1]), 3)
|
||||||
|
else:
|
||||||
|
frame_rate = 0.0
|
||||||
|
|
||||||
|
streams.append(VideoStream(
|
||||||
|
index=s.get("index", 0),
|
||||||
|
codec_name=s.get("codec_name", "unknown"),
|
||||||
|
width=s.get("width", 0),
|
||||||
|
height=s.get("height", 0),
|
||||||
|
pix_fmt=s.get("pix_fmt", ""),
|
||||||
|
frame_rate=frame_rate,
|
||||||
|
level=s.get("level"),
|
||||||
|
bit_rate=int(s["bit_rate"]) if s.get("bit_rate") else None,
|
||||||
|
))
|
||||||
|
return streams
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_audio_streams(data: dict) -> list[AudioStream]:
|
||||||
|
"""Parst ffprobe JSON in AudioStream-Objekte"""
|
||||||
|
streams = []
|
||||||
|
for s in data.get("streams", []):
|
||||||
|
language = s.get("tags", {}).get("language")
|
||||||
|
streams.append(AudioStream(
|
||||||
|
index=s.get("index", 0),
|
||||||
|
codec_name=s.get("codec_name", "unknown"),
|
||||||
|
channels=s.get("channels", 2),
|
||||||
|
sample_rate=int(s.get("sample_rate", 48000)),
|
||||||
|
language=language,
|
||||||
|
bit_rate=int(s["bit_rate"]) if s.get("bit_rate") else None,
|
||||||
|
))
|
||||||
|
return streams
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_subtitle_streams(data: dict) -> list[SubtitleStream]:
|
||||||
|
"""Parst ffprobe JSON in SubtitleStream-Objekte"""
|
||||||
|
streams = []
|
||||||
|
for s in data.get("streams", []):
|
||||||
|
language = s.get("tags", {}).get("language")
|
||||||
|
streams.append(SubtitleStream(
|
||||||
|
index=s.get("index", 0),
|
||||||
|
codec_name=s.get("codec_name", "unknown"),
|
||||||
|
language=language,
|
||||||
|
))
|
||||||
|
return streams
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_format(data: dict) -> tuple[int, float, int]:
|
||||||
|
"""Parst Format-Informationen: (size_bytes, duration_sec, bitrate)"""
|
||||||
|
fmt = data.get("format", {})
|
||||||
|
if isinstance(fmt, list):
|
||||||
|
fmt = fmt[0] if fmt else {}
|
||||||
|
|
||||||
|
size_bytes = int(fmt.get("size", 0))
|
||||||
|
bitrate = int(fmt.get("bit_rate", 0))
|
||||||
|
|
||||||
|
# Duration kann HH:MM:SS oder Sekunden sein
|
||||||
|
duration_raw = fmt.get("duration", "0")
|
||||||
|
try:
|
||||||
|
if ":" in str(duration_raw):
|
||||||
|
duration_sec = MediaFile.time_to_seconds(str(duration_raw))
|
||||||
|
else:
|
||||||
|
duration_sec = float(duration_raw)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
duration_sec = 0.0
|
||||||
|
|
||||||
|
return size_bytes, duration_sec, bitrate
|
||||||
132
app/services/progress.py
Normal file
132
app/services/progress.py
Normal file
|
|
@ -0,0 +1,132 @@
|
||||||
|
"""Echtzeit-Parsing der ffmpeg stderr-Ausgabe"""
|
||||||
|
import re
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from typing import Callable, Awaitable
|
||||||
|
from app.models.job import ConversionJob
|
||||||
|
from app.models.media import MediaFile
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressParser:
|
||||||
|
"""
|
||||||
|
Liest ffmpeg stderr und extrahiert Fortschrittsinformationen.
|
||||||
|
Ruft Callback fuer WebSocket-Updates auf.
|
||||||
|
Speichert letzte stderr-Zeilen fuer Fehlerdiagnose.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, on_progress: Callable[[ConversionJob], Awaitable[None]]):
|
||||||
|
self.on_progress = on_progress
|
||||||
|
self.last_lines: list[str] = []
|
||||||
|
self._max_lines = 50
|
||||||
|
|
||||||
|
async def monitor(self, job: ConversionJob) -> None:
|
||||||
|
"""
|
||||||
|
Hauptschleife: Liest stderr des ffmpeg-Prozesses,
|
||||||
|
parst Fortschritt und ruft Callback auf.
|
||||||
|
"""
|
||||||
|
if not job.process or not job.process.stderr:
|
||||||
|
return
|
||||||
|
|
||||||
|
empty_reads = 0
|
||||||
|
max_empty_reads = 30
|
||||||
|
update_counter = 0
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
data = await job.process.stderr.read(1024)
|
||||||
|
except Exception:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
empty_reads += 1
|
||||||
|
if empty_reads > max_empty_reads:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
continue
|
||||||
|
|
||||||
|
empty_reads = 0
|
||||||
|
line = data.decode(errors="replace")
|
||||||
|
|
||||||
|
# Letzte Zeilen fuer Fehlerdiagnose speichern
|
||||||
|
for part in line.splitlines():
|
||||||
|
part = part.strip()
|
||||||
|
if part:
|
||||||
|
self.last_lines.append(part)
|
||||||
|
if len(self.last_lines) > self._max_lines:
|
||||||
|
self.last_lines.pop(0)
|
||||||
|
|
||||||
|
self._extract_values(job, line)
|
||||||
|
self._calculate_progress(job)
|
||||||
|
job.update_stats(job.progress_fps, job.progress_speed, job.progress_bitrate)
|
||||||
|
|
||||||
|
# WebSocket-Update senden
|
||||||
|
await self.on_progress(job)
|
||||||
|
|
||||||
|
# Ausfuehrliches Logging alle 100 Reads
|
||||||
|
update_counter += 1
|
||||||
|
if update_counter % 100 == 0:
|
||||||
|
logging.info(
|
||||||
|
f"[{job.media.source_filename}] "
|
||||||
|
f"{job.progress_percent:.1f}% | "
|
||||||
|
f"FPS: {job.progress_fps} | "
|
||||||
|
f"Speed: {job.progress_speed}x | "
|
||||||
|
f"ETA: {MediaFile.format_time(job.progress_eta_sec)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_error_output(self) -> str:
|
||||||
|
"""Gibt die letzten stderr-Zeilen als String zurueck"""
|
||||||
|
return "\n".join(self.last_lines[-10:])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_values(job: ConversionJob, line: str) -> None:
|
||||||
|
"""Regex-Extraktion aus ffmpeg stderr"""
|
||||||
|
# Frame
|
||||||
|
match = re.findall(r"frame=\s*(\d+)", line)
|
||||||
|
if match:
|
||||||
|
frames = int(match[-1])
|
||||||
|
if frames > job.progress_frames:
|
||||||
|
job.progress_frames = frames
|
||||||
|
|
||||||
|
# FPS
|
||||||
|
match = re.findall(r"fps=\s*(\d+\.?\d*)", line)
|
||||||
|
if match:
|
||||||
|
job.progress_fps = float(match[-1])
|
||||||
|
|
||||||
|
# Speed
|
||||||
|
match = re.findall(r"speed=\s*(\d+\.?\d*)", line)
|
||||||
|
if match:
|
||||||
|
job.progress_speed = float(match[-1])
|
||||||
|
|
||||||
|
# Bitrate
|
||||||
|
match = re.findall(r"bitrate=\s*(\d+)", line)
|
||||||
|
if match:
|
||||||
|
job.progress_bitrate = int(match[-1])
|
||||||
|
|
||||||
|
# Size (KiB von ffmpeg)
|
||||||
|
match = re.findall(r"size=\s*(\d+)", line)
|
||||||
|
if match:
|
||||||
|
size_kib = int(match[-1])
|
||||||
|
size_bytes = size_kib * 1024
|
||||||
|
if size_bytes > job.progress_size_bytes:
|
||||||
|
job.progress_size_bytes = size_bytes
|
||||||
|
|
||||||
|
# Time (HH:MM:SS)
|
||||||
|
match = re.findall(r"time=\s*(\d+:\d+:\d+\.?\d*)", line)
|
||||||
|
if match:
|
||||||
|
seconds = MediaFile.time_to_seconds(match[-1])
|
||||||
|
if seconds > job.progress_time_sec:
|
||||||
|
job.progress_time_sec = seconds
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _calculate_progress(job: ConversionJob) -> None:
|
||||||
|
"""Berechnet Fortschritt in % und ETA"""
|
||||||
|
total_frames = job.media.total_frames
|
||||||
|
if total_frames > 0:
|
||||||
|
job.progress_percent = min(
|
||||||
|
(job.progress_frames / total_frames) * 100, 100.0
|
||||||
|
)
|
||||||
|
|
||||||
|
# ETA basierend auf durchschnittlicher FPS
|
||||||
|
if job.avg_fps > 0 and total_frames > 0:
|
||||||
|
remaining_frames = total_frames - job.progress_frames
|
||||||
|
job.progress_eta_sec = max(0, remaining_frames / job.avg_fps)
|
||||||
541
app/services/queue.py
Normal file
541
app/services/queue.py
Normal file
|
|
@ -0,0 +1,541 @@
|
||||||
|
"""Job-Queue mit Persistierung und paralleler Ausfuehrung"""
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from collections import OrderedDict
|
||||||
|
from decimal import Decimal
|
||||||
|
from typing import Optional, TYPE_CHECKING
|
||||||
|
|
||||||
|
import aiomysql
|
||||||
|
|
||||||
|
from app.config import Config
|
||||||
|
from app.models.job import ConversionJob, JobStatus
|
||||||
|
from app.models.media import MediaFile
|
||||||
|
from app.services.encoder import EncoderService
|
||||||
|
from app.services.probe import ProbeService
|
||||||
|
from app.services.progress import ProgressParser
|
||||||
|
from app.services.scanner import ScannerService
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from app.routes.ws import WebSocketManager
|
||||||
|
|
||||||
|
|
||||||
|
class QueueService:
|
||||||
|
"""Verwaltet die Konvertierungs-Queue mit Persistierung"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config, ws_manager: 'WebSocketManager'):
|
||||||
|
self.config = config
|
||||||
|
self.ws_manager = ws_manager
|
||||||
|
self.encoder = EncoderService(config)
|
||||||
|
self.scanner = ScannerService(config)
|
||||||
|
self.jobs: OrderedDict[int, ConversionJob] = OrderedDict()
|
||||||
|
self._active_count: int = 0
|
||||||
|
self._running: bool = False
|
||||||
|
self._queue_task: Optional[asyncio.Task] = None
|
||||||
|
self._queue_file = str(config.data_path / "queue.json")
|
||||||
|
self._db_pool: Optional[aiomysql.Pool] = None
|
||||||
|
|
||||||
|
async def start(self) -> None:
|
||||||
|
"""Startet den Queue-Worker und initialisiert die Datenbank"""
|
||||||
|
await self._init_db()
|
||||||
|
pending = self._load_queue()
|
||||||
|
self._running = True
|
||||||
|
self._queue_task = asyncio.create_task(self._process_loop())
|
||||||
|
logging.info(
|
||||||
|
f"Queue gestartet ({len(self.jobs)} Jobs geladen, "
|
||||||
|
f"max {self.config.max_parallel_jobs} parallel)"
|
||||||
|
)
|
||||||
|
# Gespeicherte Jobs asynchron wieder einreihen
|
||||||
|
if pending:
|
||||||
|
asyncio.create_task(self.add_paths(pending))
|
||||||
|
|
||||||
|
async def stop(self) -> None:
|
||||||
|
"""Stoppt den Queue-Worker"""
|
||||||
|
self._running = False
|
||||||
|
if self._queue_task:
|
||||||
|
self._queue_task.cancel()
|
||||||
|
try:
|
||||||
|
await self._queue_task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
if self._db_pool is not None:
|
||||||
|
self._db_pool.close()
|
||||||
|
await self._db_pool.wait_closed()
|
||||||
|
logging.info("Queue gestoppt")
|
||||||
|
|
||||||
|
async def add_job(self, media: MediaFile,
|
||||||
|
preset_name: Optional[str] = None) -> Optional[ConversionJob]:
|
||||||
|
"""Fuegt neuen Job zur Queue hinzu"""
|
||||||
|
if self._is_duplicate(media.source_path):
|
||||||
|
logging.info(f"Duplikat uebersprungen: {media.source_filename}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not preset_name:
|
||||||
|
preset_name = self.config.default_preset_name
|
||||||
|
|
||||||
|
job = ConversionJob(media=media, preset_name=preset_name)
|
||||||
|
job.build_target_path(self.config)
|
||||||
|
self.jobs[job.id] = job
|
||||||
|
self._save_queue()
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Job hinzugefuegt: {media.source_filename} "
|
||||||
|
f"-> {job.target_filename} (Preset: {preset_name})"
|
||||||
|
)
|
||||||
|
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
return job
|
||||||
|
|
||||||
|
async def add_paths(self, paths: list[str],
|
||||||
|
preset_name: Optional[str] = None,
|
||||||
|
recursive: Optional[bool] = None) -> list[ConversionJob]:
|
||||||
|
"""Fuegt mehrere Pfade hinzu (Dateien und Ordner)"""
|
||||||
|
jobs = []
|
||||||
|
all_files = []
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
path = path.strip()
|
||||||
|
if not path:
|
||||||
|
continue
|
||||||
|
scanned = self.scanner.scan_path(path, recursive)
|
||||||
|
all_files.extend(scanned)
|
||||||
|
|
||||||
|
logging.info(f"{len(all_files)} Dateien aus {len(paths)} Pfaden gefunden")
|
||||||
|
|
||||||
|
for file_path in all_files:
|
||||||
|
media = await ProbeService.analyze(file_path)
|
||||||
|
if media:
|
||||||
|
job = await self.add_job(media, preset_name)
|
||||||
|
if job:
|
||||||
|
jobs.append(job)
|
||||||
|
|
||||||
|
return jobs
|
||||||
|
|
||||||
|
async def remove_job(self, job_id: int) -> bool:
|
||||||
|
"""Entfernt Job aus Queue, bricht laufende Konvertierung ab"""
|
||||||
|
job = self.jobs.get(job_id)
|
||||||
|
if not job:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if job.status == JobStatus.ACTIVE and job.process:
|
||||||
|
try:
|
||||||
|
job.process.terminate()
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
if job.process.returncode is None:
|
||||||
|
job.process.kill()
|
||||||
|
except ProcessLookupError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if job.task and not job.task.done():
|
||||||
|
job.task.cancel()
|
||||||
|
|
||||||
|
del self.jobs[job_id]
|
||||||
|
self._save_queue()
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
logging.info(f"Job entfernt: {job.media.source_filename}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def cancel_job(self, job_id: int) -> bool:
|
||||||
|
"""Bricht laufenden Job ab"""
|
||||||
|
job = self.jobs.get(job_id)
|
||||||
|
if not job or job.status != JobStatus.ACTIVE:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if job.process:
|
||||||
|
try:
|
||||||
|
job.process.terminate()
|
||||||
|
except ProcessLookupError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
job.status = JobStatus.CANCELLED
|
||||||
|
job.finished_at = time.time()
|
||||||
|
self._active_count = max(0, self._active_count - 1)
|
||||||
|
self._save_queue()
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
logging.info(f"Job abgebrochen: {job.media.source_filename}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def retry_job(self, job_id: int) -> bool:
|
||||||
|
"""Setzt fehlgeschlagenen Job zurueck auf QUEUED"""
|
||||||
|
job = self.jobs.get(job_id)
|
||||||
|
if not job or job.status not in (JobStatus.FAILED, JobStatus.CANCELLED):
|
||||||
|
return False
|
||||||
|
|
||||||
|
job.status = JobStatus.QUEUED
|
||||||
|
job.progress_percent = 0.0
|
||||||
|
job.progress_frames = 0
|
||||||
|
job.progress_fps = 0.0
|
||||||
|
job.progress_speed = 0.0
|
||||||
|
job.progress_size_bytes = 0
|
||||||
|
job.progress_time_sec = 0.0
|
||||||
|
job.progress_eta_sec = 0.0
|
||||||
|
job.started_at = None
|
||||||
|
job.finished_at = None
|
||||||
|
job._stat_fps = [0.0, 0]
|
||||||
|
job._stat_speed = [0.0, 0]
|
||||||
|
job._stat_bitrate = [0, 0]
|
||||||
|
self._save_queue()
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
logging.info(f"Job wiederholt: {job.media.source_filename}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_queue_state(self) -> dict:
|
||||||
|
"""Queue-Status fuer WebSocket"""
|
||||||
|
queue = {}
|
||||||
|
for job_id, job in self.jobs.items():
|
||||||
|
if job.status in (JobStatus.QUEUED, JobStatus.ACTIVE,
|
||||||
|
JobStatus.FAILED, JobStatus.CANCELLED):
|
||||||
|
queue[job_id] = job.to_dict_queue()
|
||||||
|
return {"data_queue": queue}
|
||||||
|
|
||||||
|
def get_active_jobs(self) -> dict:
|
||||||
|
"""Aktive Jobs fuer WebSocket"""
|
||||||
|
active = {}
|
||||||
|
for job_id, job in self.jobs.items():
|
||||||
|
if job.status == JobStatus.ACTIVE:
|
||||||
|
active[job_id] = job.to_dict_active()
|
||||||
|
return {"data_convert": active}
|
||||||
|
|
||||||
|
def get_all_jobs(self) -> list[dict]:
|
||||||
|
"""Alle Jobs als Liste fuer API"""
|
||||||
|
return [
|
||||||
|
{"id": jid, **job.to_dict_active(), "status_name": job.status.name}
|
||||||
|
for jid, job in self.jobs.items()
|
||||||
|
]
|
||||||
|
|
||||||
|
# --- Interner Queue-Worker ---
|
||||||
|
|
||||||
|
async def _process_loop(self) -> None:
|
||||||
|
"""Hauptschleife: Startet neue Jobs wenn Kapazitaet frei"""
|
||||||
|
while self._running:
|
||||||
|
try:
|
||||||
|
if self._active_count < self.config.max_parallel_jobs:
|
||||||
|
next_job = self._get_next_queued()
|
||||||
|
if next_job:
|
||||||
|
asyncio.create_task(self._execute_job(next_job))
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Queue-Worker Fehler: {e}")
|
||||||
|
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
|
async def _execute_job(self, job: ConversionJob) -> None:
|
||||||
|
"""Fuehrt einzelnen Konvertierungs-Job aus"""
|
||||||
|
self._active_count += 1
|
||||||
|
job.status = JobStatus.ACTIVE
|
||||||
|
job.started_at = time.time()
|
||||||
|
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
|
||||||
|
command = self.encoder.build_command(job)
|
||||||
|
logging.info(
|
||||||
|
f"Starte Konvertierung: {job.media.source_filename}\n"
|
||||||
|
f" Befehl: {' '.join(command)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
job.process = await asyncio.create_subprocess_exec(
|
||||||
|
*command,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
progress = ProgressParser(self.ws_manager.broadcast_progress)
|
||||||
|
await progress.monitor(job)
|
||||||
|
await job.process.wait()
|
||||||
|
|
||||||
|
if job.process.returncode == 0:
|
||||||
|
job.status = JobStatus.FINISHED
|
||||||
|
# Tatsaechliche Dateigroesse von Disk lesen
|
||||||
|
if os.path.exists(job.target_path):
|
||||||
|
job.progress_size_bytes = os.path.getsize(job.target_path)
|
||||||
|
logging.info(
|
||||||
|
f"Konvertierung abgeschlossen: {job.media.source_filename} "
|
||||||
|
f"({MediaFile.format_time(time.time() - job.started_at)})"
|
||||||
|
)
|
||||||
|
await self._post_conversion_cleanup(job)
|
||||||
|
else:
|
||||||
|
job.status = JobStatus.FAILED
|
||||||
|
error_output = progress.get_error_output()
|
||||||
|
logging.error(
|
||||||
|
f"Konvertierung fehlgeschlagen (Code {job.process.returncode}): "
|
||||||
|
f"{job.media.source_filename}\n"
|
||||||
|
f" ffmpeg stderr:\n{error_output}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
job.status = JobStatus.CANCELLED
|
||||||
|
logging.info(f"Konvertierung abgebrochen: {job.media.source_filename}")
|
||||||
|
except Exception as e:
|
||||||
|
job.status = JobStatus.FAILED
|
||||||
|
logging.error(f"Fehler bei Konvertierung: {e}")
|
||||||
|
finally:
|
||||||
|
job.finished_at = time.time()
|
||||||
|
self._active_count = max(0, self._active_count - 1)
|
||||||
|
self._save_queue()
|
||||||
|
await self._save_stats(job)
|
||||||
|
await self.ws_manager.broadcast_queue_update()
|
||||||
|
|
||||||
|
async def _post_conversion_cleanup(self, job: ConversionJob) -> None:
|
||||||
|
"""Cleanup nach erfolgreicher Konvertierung"""
|
||||||
|
files_cfg = self.config.files_config
|
||||||
|
|
||||||
|
if files_cfg.get("delete_source", False):
|
||||||
|
target_exists = os.path.exists(job.target_path)
|
||||||
|
target_size = os.path.getsize(job.target_path) if target_exists else 0
|
||||||
|
if target_exists and target_size > 0:
|
||||||
|
try:
|
||||||
|
os.remove(job.media.source_path)
|
||||||
|
logging.info(f"Quelldatei geloescht: {job.media.source_path}")
|
||||||
|
except OSError as e:
|
||||||
|
logging.error(f"Quelldatei loeschen fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
cleanup_cfg = self.config.cleanup_config
|
||||||
|
if cleanup_cfg.get("enabled", False):
|
||||||
|
deleted = self.scanner.cleanup_directory(job.media.source_dir)
|
||||||
|
if deleted:
|
||||||
|
logging.info(
|
||||||
|
f"{len(deleted)} Dateien bereinigt in {job.media.source_dir}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_next_queued(self) -> Optional[ConversionJob]:
|
||||||
|
"""Naechster Job mit Status QUEUED (FIFO)"""
|
||||||
|
for job in self.jobs.values():
|
||||||
|
if job.status == JobStatus.QUEUED:
|
||||||
|
return job
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _is_duplicate(self, source_path: str) -> bool:
|
||||||
|
"""Prueft ob Pfad bereits in Queue (nur aktive/wartende)"""
|
||||||
|
for job in self.jobs.values():
|
||||||
|
if (job.media.source_path == source_path and
|
||||||
|
job.status in (JobStatus.QUEUED, JobStatus.ACTIVE)):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _save_queue(self) -> None:
|
||||||
|
"""Persistiert Queue nach queue.json"""
|
||||||
|
queue_data = []
|
||||||
|
for job in self.jobs.values():
|
||||||
|
if job.status in (JobStatus.QUEUED, JobStatus.FAILED):
|
||||||
|
queue_data.append(job.to_json())
|
||||||
|
try:
|
||||||
|
with open(self._queue_file, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(queue_data, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Queue speichern fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
def _load_queue(self) -> list[str]:
|
||||||
|
"""Laedt Queue aus queue.json, gibt Pfade zurueck"""
|
||||||
|
if not os.path.exists(self._queue_file):
|
||||||
|
return []
|
||||||
|
try:
|
||||||
|
with open(self._queue_file, "r", encoding="utf-8") as f:
|
||||||
|
queue_data = json.load(f)
|
||||||
|
pending = [
|
||||||
|
item["source_path"] for item in queue_data
|
||||||
|
if item.get("status", 0) == JobStatus.QUEUED
|
||||||
|
]
|
||||||
|
if pending:
|
||||||
|
logging.info(f"{len(pending)} Jobs aus Queue geladen")
|
||||||
|
return pending
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Queue laden fehlgeschlagen: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# --- MariaDB Statistik-Datenbank ---
|
||||||
|
|
||||||
|
def _get_db_config(self) -> dict:
|
||||||
|
"""DB-Konfiguration aus Settings"""
|
||||||
|
db_cfg = self.config.settings.get("database", {})
|
||||||
|
return {
|
||||||
|
"host": db_cfg.get("host", "192.168.155.11"),
|
||||||
|
"port": db_cfg.get("port", 3306),
|
||||||
|
"user": db_cfg.get("user", "video"),
|
||||||
|
"password": db_cfg.get("password", "8715"),
|
||||||
|
"db": db_cfg.get("database", "video_converter"),
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _get_pool(self) -> Optional[aiomysql.Pool]:
|
||||||
|
"""Gibt den Connection-Pool zurueck, erstellt ihn bei Bedarf"""
|
||||||
|
if self._db_pool is not None:
|
||||||
|
return self._db_pool
|
||||||
|
db_cfg = self._get_db_config()
|
||||||
|
self._db_pool = await aiomysql.create_pool(
|
||||||
|
host=db_cfg["host"],
|
||||||
|
port=db_cfg["port"],
|
||||||
|
user=db_cfg["user"],
|
||||||
|
password=db_cfg["password"],
|
||||||
|
db=db_cfg["db"],
|
||||||
|
charset="utf8mb4",
|
||||||
|
autocommit=True,
|
||||||
|
minsize=1,
|
||||||
|
maxsize=5,
|
||||||
|
connect_timeout=10,
|
||||||
|
)
|
||||||
|
return self._db_pool
|
||||||
|
|
||||||
|
async def _init_db(self) -> None:
|
||||||
|
"""Erstellt MariaDB-Tabelle falls nicht vorhanden"""
|
||||||
|
db_cfg = self._get_db_config()
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS conversions (
|
||||||
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
source_path VARCHAR(1024) NOT NULL,
|
||||||
|
source_filename VARCHAR(512) NOT NULL,
|
||||||
|
source_size_bytes BIGINT,
|
||||||
|
source_duration_sec DOUBLE,
|
||||||
|
source_frame_rate DOUBLE,
|
||||||
|
source_frames_total INT,
|
||||||
|
target_path VARCHAR(1024),
|
||||||
|
target_filename VARCHAR(512),
|
||||||
|
target_size_bytes BIGINT,
|
||||||
|
target_container VARCHAR(10),
|
||||||
|
preset_name VARCHAR(64),
|
||||||
|
status INT,
|
||||||
|
started_at DOUBLE,
|
||||||
|
finished_at DOUBLE,
|
||||||
|
duration_sec DOUBLE,
|
||||||
|
avg_fps DOUBLE,
|
||||||
|
avg_speed DOUBLE,
|
||||||
|
avg_bitrate INT,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
INDEX idx_created_at (created_at),
|
||||||
|
INDEX idx_status (status)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
|
||||||
|
""")
|
||||||
|
logging.info(
|
||||||
|
f"MariaDB verbunden: {db_cfg['host']}:{db_cfg['port']}/"
|
||||||
|
f"{db_cfg['db']}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(
|
||||||
|
f"MariaDB Initialisierung fehlgeschlagen "
|
||||||
|
f"({db_cfg['host']}:{db_cfg['port']}): {e}"
|
||||||
|
)
|
||||||
|
logging.warning("Statistiken werden ohne Datenbank ausgefuehrt")
|
||||||
|
self._db_pool = None
|
||||||
|
|
||||||
|
async def _save_stats(self, job: ConversionJob) -> None:
|
||||||
|
"""Speichert Konvertierungs-Ergebnis in MariaDB"""
|
||||||
|
if self._db_pool is None:
|
||||||
|
return
|
||||||
|
stats = job.to_dict_stats()
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
INSERT INTO conversions (
|
||||||
|
source_path, source_filename, source_size_bytes,
|
||||||
|
source_duration_sec, source_frame_rate, source_frames_total,
|
||||||
|
target_path, target_filename, target_size_bytes,
|
||||||
|
target_container, preset_name, status,
|
||||||
|
started_at, finished_at, duration_sec,
|
||||||
|
avg_fps, avg_speed, avg_bitrate
|
||||||
|
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s,
|
||||||
|
%s, %s, %s, %s, %s, %s, %s, %s, %s)
|
||||||
|
""", (
|
||||||
|
stats["source_path"], stats["source_filename"],
|
||||||
|
stats["source_size_bytes"], stats["source_duration_sec"],
|
||||||
|
stats["source_frame_rate"], stats["source_frames_total"],
|
||||||
|
stats["target_path"], stats["target_filename"],
|
||||||
|
stats["target_size_bytes"], stats["target_container"],
|
||||||
|
stats["preset_name"], stats["status"],
|
||||||
|
stats["started_at"], stats["finished_at"],
|
||||||
|
stats["duration_sec"], stats["avg_fps"],
|
||||||
|
stats["avg_speed"], stats["avg_bitrate"],
|
||||||
|
))
|
||||||
|
|
||||||
|
# Max-Eintraege bereinigen
|
||||||
|
max_entries = self.config.settings.get(
|
||||||
|
"statistics", {}
|
||||||
|
).get("max_entries", 5000)
|
||||||
|
await cur.execute(
|
||||||
|
"DELETE FROM conversions WHERE id NOT IN ("
|
||||||
|
"SELECT id FROM (SELECT id FROM conversions "
|
||||||
|
"ORDER BY created_at DESC LIMIT %s) AS tmp)",
|
||||||
|
(max_entries,)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Statistik speichern fehlgeschlagen: {e}")
|
||||||
|
|
||||||
|
async def get_statistics(self, limit: int = 50,
|
||||||
|
offset: int = 0) -> list[dict]:
|
||||||
|
"""Liest Statistiken aus MariaDB"""
|
||||||
|
if self._db_pool is None:
|
||||||
|
return []
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
||||||
|
await cur.execute(
|
||||||
|
"SELECT * FROM conversions ORDER BY created_at DESC "
|
||||||
|
"LIMIT %s OFFSET %s",
|
||||||
|
(limit, offset),
|
||||||
|
)
|
||||||
|
rows = await cur.fetchall()
|
||||||
|
# MariaDB-Typen JSON-kompatibel machen
|
||||||
|
result = []
|
||||||
|
for row in rows:
|
||||||
|
entry = {}
|
||||||
|
for k, v in row.items():
|
||||||
|
if isinstance(v, Decimal):
|
||||||
|
entry[k] = float(v)
|
||||||
|
elif hasattr(v, "isoformat"):
|
||||||
|
entry[k] = str(v)
|
||||||
|
else:
|
||||||
|
entry[k] = v
|
||||||
|
result.append(entry)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Statistik lesen fehlgeschlagen: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_statistics_summary(self) -> dict:
|
||||||
|
"""Zusammenfassung der Statistiken"""
|
||||||
|
if self._db_pool is None:
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
pool = await self._get_pool()
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
await cur.execute("""
|
||||||
|
SELECT
|
||||||
|
COUNT(*) as total,
|
||||||
|
SUM(CASE WHEN status = 2 THEN 1 ELSE 0 END),
|
||||||
|
SUM(CASE WHEN status = 3 THEN 1 ELSE 0 END),
|
||||||
|
SUM(source_size_bytes),
|
||||||
|
SUM(target_size_bytes),
|
||||||
|
SUM(duration_sec),
|
||||||
|
AVG(avg_fps),
|
||||||
|
AVG(avg_speed)
|
||||||
|
FROM conversions
|
||||||
|
""")
|
||||||
|
row = await cur.fetchone()
|
||||||
|
if row:
|
||||||
|
# Decimal -> float/int fuer JSON
|
||||||
|
def _n(v, as_int=False):
|
||||||
|
v = v or 0
|
||||||
|
return int(v) if as_int else float(v)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total": _n(row[0], True),
|
||||||
|
"finished": _n(row[1], True),
|
||||||
|
"failed": _n(row[2], True),
|
||||||
|
"total_source_size": _n(row[3], True),
|
||||||
|
"total_target_size": _n(row[4], True),
|
||||||
|
"space_saved": _n(row[3], True) - _n(row[4], True),
|
||||||
|
"total_duration": _n(row[5]),
|
||||||
|
"avg_fps": round(float(row[6] or 0), 1),
|
||||||
|
"avg_speed": round(float(row[7] or 0), 2),
|
||||||
|
}
|
||||||
|
return {}
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Statistik-Zusammenfassung fehlgeschlagen: {e}")
|
||||||
|
return {}
|
||||||
149
app/services/scanner.py
Normal file
149
app/services/scanner.py
Normal file
|
|
@ -0,0 +1,149 @@
|
||||||
|
"""Rekursives Ordner-Scanning und Cleanup-Service"""
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import fnmatch
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
from app.config import Config
|
||||||
|
|
||||||
|
|
||||||
|
# Sicherheits-Blacklist: Diese Pfade duerfen NIE bereinigt werden
|
||||||
|
_PROTECTED_PATHS = {"/", "/home", "/root", "/mnt", "/media", "/tmp", "/var", "/etc"}
|
||||||
|
|
||||||
|
|
||||||
|
class ScannerService:
|
||||||
|
"""Scannt Ordner nach Videodateien und fuehrt optionalen Cleanup durch"""
|
||||||
|
|
||||||
|
def __init__(self, config: Config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def scan_path(self, path: str, recursive: Optional[bool] = None) -> list[str]:
|
||||||
|
"""
|
||||||
|
Scannt einen Pfad nach Videodateien.
|
||||||
|
- Einzelne Datei: Gibt [path] zurueck wenn gueltige Extension
|
||||||
|
- Ordner: Scannt nach konfigurierten Extensions
|
||||||
|
"""
|
||||||
|
path = path.strip()
|
||||||
|
if not path or not os.path.exists(path):
|
||||||
|
logging.warning(f"Pfad nicht gefunden: {path}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
files_cfg = self.config.files_config
|
||||||
|
extensions = set(files_cfg.get("scan_extensions", []))
|
||||||
|
|
||||||
|
# Einzelne Datei
|
||||||
|
if os.path.isfile(path):
|
||||||
|
ext = os.path.splitext(path)[1].lower()
|
||||||
|
if ext in extensions:
|
||||||
|
return [path]
|
||||||
|
else:
|
||||||
|
logging.warning(f"Dateiendung {ext} nicht in scan_extensions: {path}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Ordner scannen
|
||||||
|
if os.path.isdir(path):
|
||||||
|
use_recursive = recursive if recursive is not None else \
|
||||||
|
files_cfg.get("recursive_scan", True)
|
||||||
|
return self._scan_directory(path, use_recursive, extensions)
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _scan_directory(self, directory: str, recursive: bool,
|
||||||
|
extensions: set) -> list[str]:
|
||||||
|
"""Scannt Ordner nach Video-Extensions"""
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if recursive:
|
||||||
|
for root, dirs, files in os.walk(directory):
|
||||||
|
for f in files:
|
||||||
|
if Path(f).suffix.lower() in extensions:
|
||||||
|
results.append(os.path.join(root, f))
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
for f in os.listdir(directory):
|
||||||
|
full = os.path.join(directory, f)
|
||||||
|
if os.path.isfile(full) and Path(f).suffix.lower() in extensions:
|
||||||
|
results.append(full)
|
||||||
|
except PermissionError:
|
||||||
|
logging.error(f"Keine Leseberechtigung: {directory}")
|
||||||
|
|
||||||
|
logging.info(f"Scan: {len(results)} Dateien in {directory} "
|
||||||
|
f"(rekursiv={recursive})")
|
||||||
|
return sorted(results)
|
||||||
|
|
||||||
|
def cleanup_directory(self, directory: str) -> list[str]:
|
||||||
|
"""
|
||||||
|
Loescht konfigurierte Datei-Typen aus einem Verzeichnis.
|
||||||
|
Ausfuehrliche Sicherheits-Checks!
|
||||||
|
"""
|
||||||
|
cleanup_cfg = self.config.cleanup_config
|
||||||
|
|
||||||
|
# Check 1: Cleanup aktiviert?
|
||||||
|
if not cleanup_cfg.get("enabled", False):
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Check 2: Gueltiger Pfad?
|
||||||
|
if not os.path.isdir(directory):
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Check 3: Geschuetzter Pfad?
|
||||||
|
abs_path = os.path.abspath(directory)
|
||||||
|
if abs_path in _PROTECTED_PATHS:
|
||||||
|
logging.warning(f"Geschuetzter Pfad, Cleanup uebersprungen: {abs_path}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Check 4: Ordner muss Videodateien enthalten (oder enthalten haben)
|
||||||
|
if not self._has_video_files(directory):
|
||||||
|
logging.debug(f"Keine Videodateien im Ordner, Cleanup uebersprungen: "
|
||||||
|
f"{directory}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
delete_extensions = set(cleanup_cfg.get("delete_extensions", []))
|
||||||
|
deleted = []
|
||||||
|
|
||||||
|
# NUR im angegebenen Ordner, NICHT rekursiv
|
||||||
|
try:
|
||||||
|
for f in os.listdir(directory):
|
||||||
|
full_path = os.path.join(directory, f)
|
||||||
|
if not os.path.isfile(full_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
ext = os.path.splitext(f)[1].lower()
|
||||||
|
if ext not in delete_extensions:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self._is_excluded(f):
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.remove(full_path)
|
||||||
|
deleted.append(full_path)
|
||||||
|
logging.info(f"Cleanup: Geloescht {full_path}")
|
||||||
|
except OSError as e:
|
||||||
|
logging.error(f"Cleanup: Loeschen fehlgeschlagen {full_path}: {e}")
|
||||||
|
|
||||||
|
except PermissionError:
|
||||||
|
logging.error(f"Keine Berechtigung fuer Cleanup: {directory}")
|
||||||
|
|
||||||
|
if deleted:
|
||||||
|
logging.info(f"Cleanup: {len(deleted)} Dateien in {directory} geloescht")
|
||||||
|
|
||||||
|
return deleted
|
||||||
|
|
||||||
|
def _is_excluded(self, filename: str) -> bool:
|
||||||
|
"""Prueft ob Dateiname auf exclude_patterns matcht"""
|
||||||
|
patterns = self.config.cleanup_config.get("exclude_patterns", [])
|
||||||
|
return any(
|
||||||
|
fnmatch.fnmatch(filename.lower(), p.lower()) for p in patterns
|
||||||
|
)
|
||||||
|
|
||||||
|
def _has_video_files(self, directory: str) -> bool:
|
||||||
|
"""Prueft ob Ordner mindestens eine Videodatei enthaelt"""
|
||||||
|
extensions = set(self.config.files_config.get("scan_extensions", []))
|
||||||
|
try:
|
||||||
|
for f in os.listdir(directory):
|
||||||
|
if os.path.splitext(f)[1].lower() in extensions:
|
||||||
|
return True
|
||||||
|
except PermissionError:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
1005
app/services/tvdb.py
Normal file
1005
app/services/tvdb.py
Normal file
File diff suppressed because it is too large
Load diff
1554
app/static/css/style.css
Normal file
1554
app/static/css/style.css
Normal file
File diff suppressed because it is too large
Load diff
BIN
app/static/icons/favicon.ico
Normal file
BIN
app/static/icons/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
301
app/static/js/filebrowser.js
Normal file
301
app/static/js/filebrowser.js
Normal file
|
|
@ -0,0 +1,301 @@
|
||||||
|
/**
|
||||||
|
* Filebrowser + Upload fuer VideoKonverter
|
||||||
|
*/
|
||||||
|
|
||||||
|
// === Filebrowser ===
|
||||||
|
|
||||||
|
let fbCurrentPath = "/mnt";
|
||||||
|
let fbSelectedFiles = new Set();
|
||||||
|
let fbSelectedDirs = new Set();
|
||||||
|
|
||||||
|
function openFileBrowser() {
|
||||||
|
document.getElementById("filebrowser-overlay").style.display = "flex";
|
||||||
|
fbSelectedFiles.clear();
|
||||||
|
fbSelectedDirs.clear();
|
||||||
|
fbNavigate("/mnt");
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeFileBrowser() {
|
||||||
|
document.getElementById("filebrowser-overlay").style.display = "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeBrowserOnOverlay(e) {
|
||||||
|
if (e.target === e.currentTarget) closeFileBrowser();
|
||||||
|
}
|
||||||
|
|
||||||
|
async function fbNavigate(path) {
|
||||||
|
fbCurrentPath = path;
|
||||||
|
fbSelectedFiles.clear();
|
||||||
|
fbSelectedDirs.clear();
|
||||||
|
updateFbSelection();
|
||||||
|
|
||||||
|
const content = document.getElementById("fb-content");
|
||||||
|
content.innerHTML = '<div class="fb-loading">Lade...</div>';
|
||||||
|
|
||||||
|
try {
|
||||||
|
const resp = await fetch("/api/browse?path=" + encodeURIComponent(path));
|
||||||
|
const data = await resp.json();
|
||||||
|
|
||||||
|
if (!resp.ok) {
|
||||||
|
content.innerHTML = `<div class="fb-error">${data.error}</div>`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
renderBreadcrumb(data.path);
|
||||||
|
renderBrowser(data);
|
||||||
|
} catch (e) {
|
||||||
|
content.innerHTML = '<div class="fb-error">Verbindungsfehler</div>';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderBreadcrumb(path) {
|
||||||
|
const bc = document.getElementById("fb-breadcrumb");
|
||||||
|
const parts = path.split("/").filter(Boolean);
|
||||||
|
let html = '<span class="bc-item" onclick="fbNavigate(\'/mnt\')">/mnt</span>';
|
||||||
|
|
||||||
|
let current = "";
|
||||||
|
for (const part of parts) {
|
||||||
|
current += "/" + part;
|
||||||
|
if (current === "/mnt") continue;
|
||||||
|
html += ` <span class="bc-sep">/</span> `;
|
||||||
|
html += `<span class="bc-item" onclick="fbNavigate('${current}')">${part}</span>`;
|
||||||
|
}
|
||||||
|
bc.innerHTML = html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderBrowser(data) {
|
||||||
|
const content = document.getElementById("fb-content");
|
||||||
|
let html = "";
|
||||||
|
|
||||||
|
// "Nach oben" Link
|
||||||
|
if (data.parent) {
|
||||||
|
html += `<div class="fb-item fb-dir fb-parent" onclick="fbNavigate('${data.parent}')">
|
||||||
|
<span class="fb-icon">↩</span>
|
||||||
|
<span class="fb-name">..</span>
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ordner
|
||||||
|
for (const dir of data.dirs) {
|
||||||
|
const badge = dir.video_count > 0 ? `<span class="fb-badge">${dir.video_count} Videos</span>` : "";
|
||||||
|
html += `<div class="fb-item fb-dir" ondblclick="fbNavigate('${dir.path}')">
|
||||||
|
<label class="fb-check" onclick="event.stopPropagation()">
|
||||||
|
<input type="checkbox" onchange="fbToggleDir('${dir.path}', this.checked)">
|
||||||
|
</label>
|
||||||
|
<span class="fb-icon" onclick="fbNavigate('${dir.path}')">📁</span>
|
||||||
|
<span class="fb-name" onclick="fbNavigate('${dir.path}')">${dir.name}</span>
|
||||||
|
${badge}
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dateien
|
||||||
|
for (const file of data.files) {
|
||||||
|
html += `<div class="fb-item fb-file">
|
||||||
|
<label class="fb-check">
|
||||||
|
<input type="checkbox" onchange="fbToggleFile('${file.path}', this.checked)">
|
||||||
|
</label>
|
||||||
|
<span class="fb-icon">🎥</span>
|
||||||
|
<span class="fb-name">${file.name}</span>
|
||||||
|
<span class="fb-size">${file.size_human}</span>
|
||||||
|
</div>`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data.dirs.length === 0 && data.files.length === 0) {
|
||||||
|
html = '<div class="fb-empty">Keine Videodateien in diesem Ordner</div>';
|
||||||
|
}
|
||||||
|
|
||||||
|
content.innerHTML = html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function fbToggleFile(path, checked) {
|
||||||
|
if (checked) fbSelectedFiles.add(path);
|
||||||
|
else fbSelectedFiles.delete(path);
|
||||||
|
updateFbSelection();
|
||||||
|
}
|
||||||
|
|
||||||
|
function fbToggleDir(path, checked) {
|
||||||
|
if (checked) fbSelectedDirs.add(path);
|
||||||
|
else fbSelectedDirs.delete(path);
|
||||||
|
updateFbSelection();
|
||||||
|
}
|
||||||
|
|
||||||
|
function fbSelectAll() {
|
||||||
|
const checks = document.querySelectorAll("#fb-content input[type=checkbox]");
|
||||||
|
const allChecked = Array.from(checks).every(c => c.checked);
|
||||||
|
|
||||||
|
checks.forEach(c => {
|
||||||
|
c.checked = !allChecked;
|
||||||
|
c.dispatchEvent(new Event("change"));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateFbSelection() {
|
||||||
|
const count = fbSelectedFiles.size + fbSelectedDirs.size;
|
||||||
|
document.getElementById("fb-selected-count").textContent = `${count} ausgewaehlt`;
|
||||||
|
document.getElementById("fb-convert").disabled = count === 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function fbConvertSelected() {
|
||||||
|
const paths = [...fbSelectedFiles, ...fbSelectedDirs];
|
||||||
|
if (paths.length === 0) return;
|
||||||
|
|
||||||
|
document.getElementById("fb-convert").disabled = true;
|
||||||
|
document.getElementById("fb-convert").textContent = "Wird gesendet...";
|
||||||
|
|
||||||
|
try {
|
||||||
|
const resp = await fetch("/api/convert", {
|
||||||
|
method: "POST",
|
||||||
|
headers: {"Content-Type": "application/json"},
|
||||||
|
body: JSON.stringify({files: paths}),
|
||||||
|
});
|
||||||
|
const data = await resp.json();
|
||||||
|
|
||||||
|
showToast(data.message, "success");
|
||||||
|
closeFileBrowser();
|
||||||
|
} catch (e) {
|
||||||
|
showToast("Fehler beim Senden", "error");
|
||||||
|
} finally {
|
||||||
|
document.getElementById("fb-convert").disabled = false;
|
||||||
|
document.getElementById("fb-convert").textContent = "Konvertieren";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Upload ===
|
||||||
|
|
||||||
|
let uploadFiles = [];
|
||||||
|
|
||||||
|
function openUpload() {
|
||||||
|
document.getElementById("upload-overlay").style.display = "flex";
|
||||||
|
uploadFiles = [];
|
||||||
|
document.getElementById("upload-list").innerHTML = "";
|
||||||
|
document.getElementById("upload-progress").style.display = "none";
|
||||||
|
document.getElementById("upload-start").disabled = true;
|
||||||
|
document.getElementById("upload-input").value = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeUpload() {
|
||||||
|
document.getElementById("upload-overlay").style.display = "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeUploadOnOverlay(e) {
|
||||||
|
if (e.target === e.currentTarget) closeUpload();
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleDragOver(e) {
|
||||||
|
e.preventDefault();
|
||||||
|
e.currentTarget.classList.add("drag-over");
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleDragLeave(e) {
|
||||||
|
e.currentTarget.classList.remove("drag-over");
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleDrop(e) {
|
||||||
|
e.preventDefault();
|
||||||
|
e.currentTarget.classList.remove("drag-over");
|
||||||
|
addFiles(e.dataTransfer.files);
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleFileSelect(e) {
|
||||||
|
addFiles(e.target.files);
|
||||||
|
}
|
||||||
|
|
||||||
|
function addFiles(fileList) {
|
||||||
|
for (const file of fileList) {
|
||||||
|
if (!uploadFiles.some(f => f.name === file.name && f.size === file.size)) {
|
||||||
|
uploadFiles.push(file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
renderUploadList();
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeUploadFile(index) {
|
||||||
|
uploadFiles.splice(index, 1);
|
||||||
|
renderUploadList();
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderUploadList() {
|
||||||
|
const list = document.getElementById("upload-list");
|
||||||
|
if (uploadFiles.length === 0) {
|
||||||
|
list.innerHTML = "";
|
||||||
|
document.getElementById("upload-start").disabled = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let html = "";
|
||||||
|
uploadFiles.forEach((file, i) => {
|
||||||
|
const size = file.size < 1024 * 1024
|
||||||
|
? (file.size / 1024).toFixed(0) + " KiB"
|
||||||
|
: file.size < 1024 * 1024 * 1024
|
||||||
|
? (file.size / (1024 * 1024)).toFixed(1) + " MiB"
|
||||||
|
: (file.size / (1024 * 1024 * 1024)).toFixed(2) + " GiB";
|
||||||
|
html += `<div class="upload-item">
|
||||||
|
<span class="upload-item-name">${file.name}</span>
|
||||||
|
<span class="upload-item-size">${size}</span>
|
||||||
|
<button class="btn-danger btn-small" onclick="removeUploadFile(${i})">×</button>
|
||||||
|
</div>`;
|
||||||
|
});
|
||||||
|
list.innerHTML = html;
|
||||||
|
document.getElementById("upload-start").disabled = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function startUpload() {
|
||||||
|
if (uploadFiles.length === 0) return;
|
||||||
|
|
||||||
|
const btn = document.getElementById("upload-start");
|
||||||
|
btn.disabled = true;
|
||||||
|
btn.textContent = "Wird hochgeladen...";
|
||||||
|
|
||||||
|
const progress = document.getElementById("upload-progress");
|
||||||
|
const bar = document.getElementById("upload-bar");
|
||||||
|
const status = document.getElementById("upload-status");
|
||||||
|
progress.style.display = "block";
|
||||||
|
|
||||||
|
const formData = new FormData();
|
||||||
|
uploadFiles.forEach(f => formData.append("files", f));
|
||||||
|
|
||||||
|
try {
|
||||||
|
const xhr = new XMLHttpRequest();
|
||||||
|
xhr.open("POST", "/api/upload");
|
||||||
|
|
||||||
|
xhr.upload.onprogress = (e) => {
|
||||||
|
if (e.lengthComputable) {
|
||||||
|
const pct = (e.loaded / e.total) * 100;
|
||||||
|
bar.style.width = pct + "%";
|
||||||
|
const loaded = (e.loaded / (1024 * 1024)).toFixed(1);
|
||||||
|
const total = (e.total / (1024 * 1024)).toFixed(1);
|
||||||
|
status.textContent = `${loaded} / ${total} MiB (${pct.toFixed(0)}%)`;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await new Promise((resolve, reject) => {
|
||||||
|
xhr.onload = () => {
|
||||||
|
if (xhr.status === 200) resolve(JSON.parse(xhr.responseText));
|
||||||
|
else reject(JSON.parse(xhr.responseText));
|
||||||
|
};
|
||||||
|
xhr.onerror = () => reject({error: "Netzwerkfehler"});
|
||||||
|
xhr.send(formData);
|
||||||
|
});
|
||||||
|
|
||||||
|
showToast(result.message, "success");
|
||||||
|
closeUpload();
|
||||||
|
} catch (e) {
|
||||||
|
showToast(e.error || "Upload fehlgeschlagen", "error");
|
||||||
|
btn.disabled = false;
|
||||||
|
btn.textContent = "Hochladen & Konvertieren";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Toast ===
|
||||||
|
|
||||||
|
function showToast(message, type) {
|
||||||
|
const container = document.getElementById("toast-container");
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
const toast = document.createElement("div");
|
||||||
|
toast.className = `toast ${type}`;
|
||||||
|
toast.textContent = message;
|
||||||
|
container.appendChild(toast);
|
||||||
|
|
||||||
|
setTimeout(() => toast.remove(), 3000);
|
||||||
|
}
|
||||||
1912
app/static/js/library.js
Normal file
1912
app/static/js/library.js
Normal file
File diff suppressed because it is too large
Load diff
181
app/static/js/websocket.js
Normal file
181
app/static/js/websocket.js
Normal file
|
|
@ -0,0 +1,181 @@
|
||||||
|
/**
|
||||||
|
* WebSocket-Client fuer Echtzeit-Updates
|
||||||
|
* Verbindet sich mit dem Server und aktualisiert Dashboard dynamisch
|
||||||
|
*/
|
||||||
|
|
||||||
|
let ws = null;
|
||||||
|
let videoActive = {};
|
||||||
|
let videoQueue = {};
|
||||||
|
let reconnectTimer = null;
|
||||||
|
|
||||||
|
// WebSocket verbinden
|
||||||
|
function connectWebSocket() {
|
||||||
|
if (!window.WS_URL) return;
|
||||||
|
|
||||||
|
ws = new WebSocket(WS_URL);
|
||||||
|
|
||||||
|
ws.onopen = function () {
|
||||||
|
console.log("WebSocket verbunden:", WS_URL);
|
||||||
|
if (reconnectTimer) {
|
||||||
|
clearTimeout(reconnectTimer);
|
||||||
|
reconnectTimer = null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ws.onmessage = function (event) {
|
||||||
|
try {
|
||||||
|
const packet = JSON.parse(event.data);
|
||||||
|
|
||||||
|
if (packet.data_flow !== undefined) {
|
||||||
|
updateProgress(packet.data_flow);
|
||||||
|
} else if (packet.data_convert !== undefined) {
|
||||||
|
updateActiveConversions(packet.data_convert);
|
||||||
|
} else if (packet.data_queue !== undefined) {
|
||||||
|
updateQueue(packet.data_queue);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error("WebSocket Nachricht parsen fehlgeschlagen:", e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ws.onclose = function () {
|
||||||
|
console.log("WebSocket getrennt, Reconnect in 3s...");
|
||||||
|
reconnectTimer = setTimeout(connectWebSocket, 3000);
|
||||||
|
};
|
||||||
|
|
||||||
|
ws.onerror = function (err) {
|
||||||
|
console.error("WebSocket Fehler:", err);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Aktive Konvertierungen ===
|
||||||
|
|
||||||
|
function updateActiveConversions(data) {
|
||||||
|
const container = document.getElementById("active-conversions");
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
// Entfernte Jobs loeschen
|
||||||
|
for (const key in videoActive) {
|
||||||
|
if (!(key in data)) {
|
||||||
|
const elem = document.getElementById("convert_" + key);
|
||||||
|
if (elem) elem.remove();
|
||||||
|
delete videoActive[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Neue Jobs hinzufuegen
|
||||||
|
for (const [key, video] of Object.entries(data)) {
|
||||||
|
if (!videoActive[key]) {
|
||||||
|
const card = document.createElement("div");
|
||||||
|
card.className = "video-card";
|
||||||
|
card.id = "convert_" + key;
|
||||||
|
card.innerHTML = `
|
||||||
|
<h3 title="${video.source_path}">${video.source_file_name} → ${video.target_file_name}</h3>
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-bar"></div>
|
||||||
|
</div>
|
||||||
|
<div class="progress-text">
|
||||||
|
<span class="loading-pct">0</span>%
|
||||||
|
</div>
|
||||||
|
<div class="video-card-values">
|
||||||
|
<div class="video-card-values-items"><b>Frames</b><br><span class="frames">0</span></div>
|
||||||
|
<div class="video-card-values-items"><b>FPS</b><br><span class="fps">0</span></div>
|
||||||
|
<div class="video-card-values-items"><b>Speed</b><br><span class="speed">0</span>x</div>
|
||||||
|
<div class="video-card-values-items"><b>Groesse</b><br><span class="size">0</span> <span class="size_unit">KiB</span></div>
|
||||||
|
<div class="video-card-values-items"><b>Bitrate</b><br><span class="bitrate">0</span> <span class="bitrate_unit">kbits/s</span></div>
|
||||||
|
<div class="video-card-values-items"><b>Zeit</b><br><span class="time">0 Min</span></div>
|
||||||
|
<div class="video-card-values-items"><b>Verbleibend</b><br><span class="eta">-</span></div>
|
||||||
|
<div class="video-card-values-items">
|
||||||
|
<button class="btn-danger" onclick="sendCommand('cancel', ${key})">Abbrechen</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
container.appendChild(card);
|
||||||
|
videoActive[key] = video;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateProgress(flow) {
|
||||||
|
const container = document.getElementById("convert_" + flow.id);
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
container.querySelector(".frames").textContent = flow.frames || 0;
|
||||||
|
container.querySelector(".fps").textContent = flow.fps || 0;
|
||||||
|
container.querySelector(".speed").textContent = flow.speed || 0;
|
||||||
|
container.querySelector(".size").textContent = flow.size ? flow.size[0] : 0;
|
||||||
|
container.querySelector(".size_unit").textContent = flow.size ? flow.size[1] : "KiB";
|
||||||
|
container.querySelector(".bitrate").textContent = flow.bitrate ? flow.bitrate[0] : 0;
|
||||||
|
container.querySelector(".bitrate_unit").textContent = flow.bitrate ? flow.bitrate[1] : "kbits/s";
|
||||||
|
container.querySelector(".time").textContent = flow.time || "0 Min";
|
||||||
|
container.querySelector(".eta").textContent = flow.time_remaining || "-";
|
||||||
|
container.querySelector(".loading-pct").textContent = (flow.loading || 0).toFixed(1);
|
||||||
|
|
||||||
|
const bar = container.querySelector(".progress-bar");
|
||||||
|
bar.style.width = (flow.loading || 0) + "%";
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Warteschlange ===
|
||||||
|
|
||||||
|
function updateQueue(data) {
|
||||||
|
const container = document.getElementById("queue");
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
// Entfernte/geaenderte Jobs loeschen
|
||||||
|
for (const key in videoQueue) {
|
||||||
|
if (!(key in data) || videoQueue[key]?.status !== data[key]?.status) {
|
||||||
|
const elem = document.getElementById("queue_" + key);
|
||||||
|
if (elem) elem.remove();
|
||||||
|
delete videoQueue[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Neue Jobs hinzufuegen
|
||||||
|
for (const [key, video] of Object.entries(data)) {
|
||||||
|
if (!videoQueue[key]) {
|
||||||
|
const card = document.createElement("div");
|
||||||
|
card.className = "queue-card";
|
||||||
|
card.id = "queue_" + key;
|
||||||
|
|
||||||
|
let statusHtml;
|
||||||
|
if (video.status === 1) {
|
||||||
|
statusHtml = '<span class="status-badge active">Aktiv</span>';
|
||||||
|
} else if (video.status === 3) {
|
||||||
|
statusHtml = '<span class="status-badge error">Fehler</span>';
|
||||||
|
} else if (video.status === 4) {
|
||||||
|
statusHtml = '<span class="status-badge warn">Abgebrochen</span>';
|
||||||
|
} else {
|
||||||
|
statusHtml = '<span class="status-badge queued">Wartend</span>';
|
||||||
|
}
|
||||||
|
|
||||||
|
card.innerHTML = `
|
||||||
|
<h4 title="${video.source_path}">${video.source_file_name}</h4>
|
||||||
|
<div class="queue-card-footer">
|
||||||
|
${statusHtml}
|
||||||
|
<div>
|
||||||
|
${video.status === 3 || video.status === 4 ?
|
||||||
|
`<button class="btn-secondary btn-small" onclick="sendCommand('retry', ${key})">Wiederholen</button>` : ""}
|
||||||
|
<button class="btn-danger btn-small" onclick="sendCommand('delete', ${key})">Loeschen</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
container.appendChild(card);
|
||||||
|
videoQueue[key] = video;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Befehle senden ===
|
||||||
|
|
||||||
|
function sendCommand(command, id) {
|
||||||
|
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||||
|
ws.send(JSON.stringify({
|
||||||
|
data_command: { cmd: command, id: id }
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
console.warn("WebSocket nicht verbunden");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verbindung herstellen
|
||||||
|
connectWebSocket();
|
||||||
342
app/templates/admin.html
Normal file
342
app/templates/admin.html
Normal file
|
|
@ -0,0 +1,342 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Einstellungen - VideoKonverter{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<section class="admin-section">
|
||||||
|
<h2>Einstellungen</h2>
|
||||||
|
|
||||||
|
<form hx-post="/htmx/settings" hx-target="#save-result" hx-swap="innerHTML">
|
||||||
|
|
||||||
|
<!-- Encoding -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Encoding</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="encoding_mode">Modus</label>
|
||||||
|
<select name="encoding_mode" id="encoding_mode">
|
||||||
|
<option value="cpu" {% if settings.encoding.mode == 'cpu' %}selected{% endif %}>CPU</option>
|
||||||
|
<option value="gpu" {% if settings.encoding.mode == 'gpu' %}selected{% endif %}>GPU (Intel VAAPI)</option>
|
||||||
|
<option value="auto" {% if settings.encoding.mode == 'auto' %}selected{% endif %}>Auto-Erkennung</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="gpu_device">GPU Device</label>
|
||||||
|
<select name="gpu_device" id="gpu_device">
|
||||||
|
{% for device in gpu_devices %}
|
||||||
|
<option value="{{ device }}" {% if device == settings.encoding.gpu_device %}selected{% endif %}>{{ device }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
{% if not gpu_devices %}
|
||||||
|
<option value="/dev/dri/renderD128">Keine GPU erkannt</option>
|
||||||
|
{% endif %}
|
||||||
|
</select>
|
||||||
|
{% if gpu_available %}
|
||||||
|
<span class="status-badge ok">GPU verfuegbar</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="status-badge warn">Keine GPU</span>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="default_preset">Standard-Preset</label>
|
||||||
|
<select name="default_preset" id="default_preset">
|
||||||
|
{% for key, preset in presets.items() %}
|
||||||
|
<option value="{{ key }}" {% if key == settings.encoding.default_preset %}selected{% endif %}>{{ preset.name }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="max_parallel_jobs">Max. parallele Jobs</label>
|
||||||
|
<input type="number" name="max_parallel_jobs" id="max_parallel_jobs"
|
||||||
|
value="{{ settings.encoding.max_parallel_jobs }}" min="1" max="8">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- Dateien -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Dateien</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="target_container">Ziel-Container</label>
|
||||||
|
<select name="target_container" id="target_container">
|
||||||
|
<option value="webm" {% if settings.files.target_container == 'webm' %}selected{% endif %}>WebM (AV1/Opus)</option>
|
||||||
|
<option value="mkv" {% if settings.files.target_container == 'mkv' %}selected{% endif %}>MKV (Matroska)</option>
|
||||||
|
<option value="mp4" {% if settings.files.target_container == 'mp4' %}selected{% endif %}>MP4</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="target_folder">Ziel-Ordner</label>
|
||||||
|
<input type="text" name="target_folder" id="target_folder"
|
||||||
|
value="{{ settings.files.target_folder }}"
|
||||||
|
placeholder="same = gleicher Ordner">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group checkbox-group">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" name="delete_source"
|
||||||
|
{% if settings.files.delete_source %}checked{% endif %}>
|
||||||
|
Quelldatei nach Konvertierung loeschen
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group checkbox-group">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" name="recursive_scan"
|
||||||
|
{% if settings.files.recursive_scan %}checked{% endif %}>
|
||||||
|
Unterordner rekursiv scannen
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- Cleanup -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Cleanup</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group checkbox-group">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" name="cleanup_enabled"
|
||||||
|
{% if settings.cleanup.enabled %}checked{% endif %}>
|
||||||
|
Auto-Cleanup aktivieren
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Zu loeschende Extensions</label>
|
||||||
|
<input type="text" name="cleanup_extensions"
|
||||||
|
value="{{ settings.cleanup.delete_extensions | join(', ') }}"
|
||||||
|
placeholder=".avi, .wmv, .nfo, .txt, .jpg">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Ausnahmen (Muster)</label>
|
||||||
|
<input type="text" name="cleanup_exclude"
|
||||||
|
value="{{ settings.cleanup.exclude_patterns | join(', ') }}"
|
||||||
|
placeholder="readme*, *.md">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- Audio -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Audio</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="audio_languages">Sprachen</label>
|
||||||
|
<input type="text" name="audio_languages" id="audio_languages"
|
||||||
|
value="{{ settings.audio.languages | join(', ') }}"
|
||||||
|
placeholder="ger, eng, und">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="audio_codec">Codec</label>
|
||||||
|
<select name="audio_codec" id="audio_codec">
|
||||||
|
<option value="libopus" {% if settings.audio.default_codec == 'libopus' %}selected{% endif %}>Opus</option>
|
||||||
|
<option value="aac" {% if settings.audio.default_codec == 'aac' %}selected{% endif %}>AAC</option>
|
||||||
|
<option value="copy" {% if settings.audio.default_codec == 'copy' %}selected{% endif %}>Stream Copy</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="form-group checkbox-group">
|
||||||
|
<label>
|
||||||
|
<input type="checkbox" name="keep_channels"
|
||||||
|
{% if settings.audio.keep_channels %}checked{% endif %}>
|
||||||
|
Kanalanzahl beibehalten (kein Downmix)
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- Untertitel -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Untertitel</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="subtitle_languages">Sprachen</label>
|
||||||
|
<input type="text" name="subtitle_languages" id="subtitle_languages"
|
||||||
|
value="{{ settings.subtitle.languages | join(', ') }}"
|
||||||
|
placeholder="ger, eng">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- TVDB / Bibliothek -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Bibliothek / TVDB</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="tvdb_api_key">TVDB API Key</label>
|
||||||
|
<input type="text" name="tvdb_api_key" id="tvdb_api_key"
|
||||||
|
value="{{ settings.library.tvdb_api_key if settings.library else '' }}"
|
||||||
|
placeholder="API Key von thetvdb.com">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="tvdb_pin">TVDB PIN</label>
|
||||||
|
<input type="text" name="tvdb_pin" id="tvdb_pin"
|
||||||
|
value="{{ settings.library.tvdb_pin if settings.library else '' }}"
|
||||||
|
placeholder="Subscriber PIN (optional)">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="tvdb_language">TVDB Sprache</label>
|
||||||
|
<select name="tvdb_language" id="tvdb_language">
|
||||||
|
{% set lang = settings.library.tvdb_language if settings.library and settings.library.tvdb_language else 'deu' %}
|
||||||
|
<option value="deu" {% if lang == 'deu' %}selected{% endif %}>Deutsch</option>
|
||||||
|
<option value="eng" {% if lang == 'eng' %}selected{% endif %}>English</option>
|
||||||
|
<option value="fra" {% if lang == 'fra' %}selected{% endif %}>Francais</option>
|
||||||
|
<option value="spa" {% if lang == 'spa' %}selected{% endif %}>Espanol</option>
|
||||||
|
<option value="ita" {% if lang == 'ita' %}selected{% endif %}>Italiano</option>
|
||||||
|
<option value="jpn" {% if lang == 'jpn' %}selected{% endif %}>Japanese</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<!-- Logging -->
|
||||||
|
<fieldset>
|
||||||
|
<legend>Logging</legend>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="log_level">Log-Level</label>
|
||||||
|
<select name="log_level" id="log_level">
|
||||||
|
{% for level in ['DEBUG', 'INFO', 'WARNING', 'ERROR'] %}
|
||||||
|
<option value="{{ level }}" {% if level == settings.logging.level %}selected{% endif %}>{{ level }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</fieldset>
|
||||||
|
|
||||||
|
<div class="form-actions">
|
||||||
|
<button type="submit" class="btn-primary">Speichern</button>
|
||||||
|
</div>
|
||||||
|
<div id="save-result"></div>
|
||||||
|
</form>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- Scan-Pfade -->
|
||||||
|
<section class="admin-section">
|
||||||
|
<h2>Bibliothek - Scan-Pfade</h2>
|
||||||
|
<div id="library-paths">
|
||||||
|
<div class="loading-msg">Lade Pfade...</div>
|
||||||
|
</div>
|
||||||
|
<div class="form-grid" style="margin-top:1rem">
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Name</label>
|
||||||
|
<input type="text" id="new-path-name" placeholder="z.B. Serien">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Pfad</label>
|
||||||
|
<input type="text" id="new-path-path" placeholder="/mnt/30 - Media/Serien">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Typ</label>
|
||||||
|
<select id="new-path-type">
|
||||||
|
<option value="series">Serien</option>
|
||||||
|
<option value="movie">Filme</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div class="form-group" style="justify-content:flex-end">
|
||||||
|
<button class="btn-primary" onclick="addLibraryPath()">Pfad hinzufuegen</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- Presets -->
|
||||||
|
<section class="admin-section">
|
||||||
|
<h2>Encoding-Presets</h2>
|
||||||
|
<div class="presets-grid">
|
||||||
|
{% for key, preset in presets.items() %}
|
||||||
|
<div class="preset-card">
|
||||||
|
<h3>{{ preset.name }}</h3>
|
||||||
|
<div class="preset-details">
|
||||||
|
<span class="tag">{{ preset.video_codec }}</span>
|
||||||
|
<span class="tag">{{ preset.container }}</span>
|
||||||
|
<span class="tag">{{ preset.quality_param }}={{ preset.quality_value }}</span>
|
||||||
|
{% if preset.hw_init %}<span class="tag gpu">GPU</span>{% else %}<span class="tag cpu">CPU</span>{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endfor %}
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
// Scan-Pfade Verwaltung
|
||||||
|
function loadLibraryPaths() {
|
||||||
|
fetch("/api/library/paths")
|
||||||
|
.then(r => r.json())
|
||||||
|
.then(data => {
|
||||||
|
const container = document.getElementById("library-paths");
|
||||||
|
const paths = data.paths || [];
|
||||||
|
if (!paths.length) {
|
||||||
|
container.innerHTML = '<div class="loading-msg">Keine Scan-Pfade konfiguriert</div>';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
container.innerHTML = paths.map(p => `
|
||||||
|
<div class="preset-card" style="display:flex;justify-content:space-between;align-items:center;margin-bottom:0.5rem">
|
||||||
|
<div>
|
||||||
|
<strong>${p.name}</strong>
|
||||||
|
<span class="tag">${p.media_type === 'series' ? 'Serien' : 'Filme'}</span>
|
||||||
|
<br><span style="font-size:0.8rem;color:#888">${p.path}</span>
|
||||||
|
${p.last_scan ? '<br><span style="font-size:0.75rem;color:#666">Letzter Scan: ' + p.last_scan + '</span>' : ''}
|
||||||
|
</div>
|
||||||
|
<div style="display:flex;gap:0.3rem">
|
||||||
|
<button class="btn-small btn-secondary" onclick="scanPath(${p.id})">Scannen</button>
|
||||||
|
<button class="btn-small btn-danger" onclick="deletePath(${p.id})">Loeschen</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`).join("");
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
document.getElementById("library-paths").innerHTML =
|
||||||
|
'<div style="text-align:center;color:#666;padding:1rem">Fehler beim Laden</div>';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function addLibraryPath() {
|
||||||
|
const name = document.getElementById("new-path-name").value.trim();
|
||||||
|
const path = document.getElementById("new-path-path").value.trim();
|
||||||
|
const mediaType = document.getElementById("new-path-type").value;
|
||||||
|
if (!name || !path) {
|
||||||
|
alert("Name und Pfad erforderlich");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
fetch("/api/library/paths", {
|
||||||
|
method: "POST",
|
||||||
|
headers: {"Content-Type": "application/json"},
|
||||||
|
body: JSON.stringify({name: name, path: path, media_type: mediaType}),
|
||||||
|
})
|
||||||
|
.then(r => r.json())
|
||||||
|
.then(data => {
|
||||||
|
if (data.error) {
|
||||||
|
alert("Fehler: " + data.error);
|
||||||
|
} else {
|
||||||
|
document.getElementById("new-path-name").value = "";
|
||||||
|
document.getElementById("new-path-path").value = "";
|
||||||
|
loadLibraryPaths();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(e => alert("Fehler: " + e));
|
||||||
|
}
|
||||||
|
|
||||||
|
function deletePath(pathId) {
|
||||||
|
if (!confirm("Scan-Pfad und alle zugehoerigen Daten loeschen?")) return;
|
||||||
|
fetch("/api/library/paths/" + pathId, {method: "DELETE"})
|
||||||
|
.then(r => r.json())
|
||||||
|
.then(() => loadLibraryPaths())
|
||||||
|
.catch(e => alert("Fehler: " + e));
|
||||||
|
}
|
||||||
|
|
||||||
|
function scanPath(pathId) {
|
||||||
|
fetch("/api/library/scan/" + pathId, {method: "POST"})
|
||||||
|
.then(r => r.json())
|
||||||
|
.then(data => alert(data.message || "Scan gestartet"))
|
||||||
|
.catch(e => alert("Fehler: " + e));
|
||||||
|
}
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", loadLibraryPaths);
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
32
app/templates/base.html
Normal file
32
app/templates/base.html
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="de">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>{% block title %}VideoKonverter{% endblock %}</title>
|
||||||
|
<link rel="stylesheet" href="/static/css/style.css">
|
||||||
|
<script src="https://unpkg.com/htmx.org@2.0.4"></script>
|
||||||
|
{% block head %}{% endblock %}
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<header>
|
||||||
|
<div class="header-left">
|
||||||
|
<h1>VideoKonverter</h1>
|
||||||
|
</div>
|
||||||
|
<nav>
|
||||||
|
<a href="/" class="nav-link {% if request.path == '/' %}active{% endif %}">Dashboard</a>
|
||||||
|
<a href="/library" class="nav-link {% if request.path.startswith('/library') %}active{% endif %}">Bibliothek</a>
|
||||||
|
<a href="/admin" class="nav-link {% if request.path == '/admin' %}active{% endif %}">Einstellungen</a>
|
||||||
|
<a href="/statistics" class="nav-link {% if request.path == '/statistics' %}active{% endif %}">Statistik</a>
|
||||||
|
</nav>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<main>
|
||||||
|
{% block content %}{% endblock %}
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<div id="toast-container"></div>
|
||||||
|
|
||||||
|
{% block scripts %}{% endblock %}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
89
app/templates/dashboard.html
Normal file
89
app/templates/dashboard.html
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Dashboard - VideoKonverter{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<!-- Aktionen -->
|
||||||
|
<section id="actions-section">
|
||||||
|
<div class="action-bar">
|
||||||
|
<button class="btn-primary" onclick="openFileBrowser()">Dateien durchsuchen</button>
|
||||||
|
<button class="btn-secondary" onclick="openUpload()">Video hochladen</button>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- Aktive Konvertierungen -->
|
||||||
|
<section id="active-section">
|
||||||
|
<h2>Aktive Konvertierungen</h2>
|
||||||
|
<div id="active-conversions">
|
||||||
|
<!-- Wird dynamisch via WebSocket gefuellt -->
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- Warteschlange -->
|
||||||
|
<section id="queue-section">
|
||||||
|
<h2>Warteschlange</h2>
|
||||||
|
<div id="queue">
|
||||||
|
<!-- Wird dynamisch via WebSocket gefuellt -->
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- Filebrowser Modal -->
|
||||||
|
<div id="filebrowser-overlay" class="modal-overlay" style="display:none" onclick="closeBrowserOnOverlay(event)">
|
||||||
|
<div class="modal">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Dateien durchsuchen</h2>
|
||||||
|
<button class="btn-close" onclick="closeFileBrowser()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-breadcrumb" id="fb-breadcrumb"></div>
|
||||||
|
<div class="modal-body" id="fb-content">
|
||||||
|
Lade...
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<span id="fb-selected-count">0 ausgewaehlt</span>
|
||||||
|
<div>
|
||||||
|
<button class="btn-secondary" id="fb-select-all" onclick="fbSelectAll()">Alle auswaehlen</button>
|
||||||
|
<button class="btn-primary" id="fb-convert" onclick="fbConvertSelected()" disabled>Konvertieren</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Upload Modal -->
|
||||||
|
<div id="upload-overlay" class="modal-overlay" style="display:none" onclick="closeUploadOnOverlay(event)">
|
||||||
|
<div class="modal modal-small">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Video hochladen</h2>
|
||||||
|
<button class="btn-close" onclick="closeUpload()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">
|
||||||
|
<div class="upload-zone" id="upload-zone"
|
||||||
|
ondrop="handleDrop(event)" ondragover="handleDragOver(event)" ondragleave="handleDragLeave(event)">
|
||||||
|
<p>Videodateien hierher ziehen</p>
|
||||||
|
<p class="upload-hint">oder</p>
|
||||||
|
<label class="btn-secondary upload-btn">
|
||||||
|
Dateien waehlen
|
||||||
|
<input type="file" id="upload-input" multiple accept="video/*" onchange="handleFileSelect(event)" style="display:none">
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div id="upload-list" class="upload-list"></div>
|
||||||
|
<div id="upload-progress" class="upload-progress" style="display:none">
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-bar" id="upload-bar"></div>
|
||||||
|
</div>
|
||||||
|
<span id="upload-status">Wird hochgeladen...</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button class="btn-primary" id="upload-start" onclick="startUpload()" disabled>Hochladen & Konvertieren</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
var WS_URL = "{{ ws_url }}";
|
||||||
|
</script>
|
||||||
|
<script src="/static/js/websocket.js"></script>
|
||||||
|
<script src="/static/js/filebrowser.js"></script>
|
||||||
|
{% endblock %}
|
||||||
392
app/templates/library.html
Normal file
392
app/templates/library.html
Normal file
|
|
@ -0,0 +1,392 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Bibliothek - VideoKonverter{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<section class="library-section">
|
||||||
|
<div class="library-header">
|
||||||
|
<h2>Video-Bibliothek</h2>
|
||||||
|
<div class="library-actions">
|
||||||
|
<button class="btn-primary" onclick="startScan()">Scan starten</button>
|
||||||
|
<button class="btn-secondary" onclick="openPathsModal()">Pfade verwalten</button>
|
||||||
|
<button class="btn-secondary" onclick="openCleanModal()">Aufraeumen</button>
|
||||||
|
<button class="btn-secondary" onclick="openImportModal()">Importieren</button>
|
||||||
|
<button class="btn-secondary" onclick="showDuplicates()">Duplikate</button>
|
||||||
|
<button class="btn-secondary" onclick="startAutoMatch()">TVDB Auto-Match</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Scan-Progress -->
|
||||||
|
<div id="scan-progress" class="scan-progress" style="display:none">
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-bar" id="scan-bar"></div>
|
||||||
|
</div>
|
||||||
|
<span class="scan-status" id="scan-status">Scanne...</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Auto-Match Progress -->
|
||||||
|
<div id="auto-match-progress" class="scan-progress" style="display:none">
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-bar" id="auto-match-bar"></div>
|
||||||
|
</div>
|
||||||
|
<span class="scan-status" id="auto-match-status">TVDB Auto-Match...</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Statistik-Leiste -->
|
||||||
|
<div class="library-stats" id="library-stats">
|
||||||
|
<div class="lib-stat"><span class="lib-stat-value" id="stat-videos">-</span><span class="lib-stat-label">Videos</span></div>
|
||||||
|
<div class="lib-stat"><span class="lib-stat-value" id="stat-series">-</span><span class="lib-stat-label">Serien</span></div>
|
||||||
|
<div class="lib-stat"><span class="lib-stat-value" id="stat-size">-</span><span class="lib-stat-label">Gesamt</span></div>
|
||||||
|
<div class="lib-stat"><span class="lib-stat-value" id="stat-duration">-</span><span class="lib-stat-label">Spielzeit</span></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="library-layout">
|
||||||
|
<!-- Pfad-Navigation -->
|
||||||
|
<nav class="library-nav" id="library-nav">
|
||||||
|
<h3>Bibliotheken</h3>
|
||||||
|
<div id="nav-paths-list">
|
||||||
|
<div class="loading-msg" style="padding:0.5rem;font-size:0.75rem">Lade...</div>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<!-- Filter-Sidebar -->
|
||||||
|
<aside class="library-filters" id="filters">
|
||||||
|
<h3>Filter</h3>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Suche</label>
|
||||||
|
<input type="text" id="filter-search" placeholder="Dateiname..." oninput="debounceFilter()">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Aufloesung</label>
|
||||||
|
<select id="filter-resolution" onchange="applyFilters()">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="3840">4K (3840+)</option>
|
||||||
|
<option value="1920">1080p (1920+)</option>
|
||||||
|
<option value="1280">720p (1280+)</option>
|
||||||
|
<option value="720">SD (720+)</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Video-Codec</label>
|
||||||
|
<select id="filter-codec" onchange="applyFilters()">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="hevc">HEVC/H.265</option>
|
||||||
|
<option value="h264">H.264</option>
|
||||||
|
<option value="av1">AV1</option>
|
||||||
|
<option value="mpeg4">MPEG-4</option>
|
||||||
|
<option value="mpeg2video">MPEG-2</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Container</label>
|
||||||
|
<select id="filter-container" onchange="applyFilters()">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="mkv">MKV</option>
|
||||||
|
<option value="mp4">MP4</option>
|
||||||
|
<option value="avi">AVI</option>
|
||||||
|
<option value="webm">WebM</option>
|
||||||
|
<option value="ts">TS</option>
|
||||||
|
<option value="wmv">WMV</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Audio-Sprache</label>
|
||||||
|
<select id="filter-audio-lang" onchange="applyFilters()">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="ger">Deutsch</option>
|
||||||
|
<option value="eng">Englisch</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Audio-Kanaele</label>
|
||||||
|
<select id="filter-audio-ch" onchange="applyFilters()">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
<option value="2">Stereo (2.0)</option>
|
||||||
|
<option value="6">5.1 Surround</option>
|
||||||
|
<option value="8">7.1 Surround</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label><input type="checkbox" id="filter-10bit" onchange="applyFilters()"> Nur 10-Bit</label>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="filter-group">
|
||||||
|
<label>Sortierung</label>
|
||||||
|
<select id="filter-sort" onchange="applyFilters()">
|
||||||
|
<option value="file_name">Name</option>
|
||||||
|
<option value="file_size">Groesse</option>
|
||||||
|
<option value="width">Aufloesung</option>
|
||||||
|
<option value="duration_sec">Dauer</option>
|
||||||
|
<option value="video_codec">Codec</option>
|
||||||
|
<option value="scanned_at">Scan-Datum</option>
|
||||||
|
</select>
|
||||||
|
<select id="filter-order" onchange="applyFilters()">
|
||||||
|
<option value="asc">Aufsteigend</option>
|
||||||
|
<option value="desc">Absteigend</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</aside>
|
||||||
|
|
||||||
|
<!-- Hauptbereich: Dynamische Bereiche pro Library-Pfad -->
|
||||||
|
<div class="library-content" id="library-content">
|
||||||
|
<div class="loading-msg">Lade Bibliothek...</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<!-- === MODALS === -->
|
||||||
|
|
||||||
|
<!-- Pfade-Verwaltung Modal -->
|
||||||
|
<div id="paths-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Scan-Pfade verwalten</h2>
|
||||||
|
<button class="btn-close" onclick="closePathsModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<div id="paths-list"></div>
|
||||||
|
<hr style="border-color:#333; margin:1rem 0">
|
||||||
|
<h3 style="font-size:0.9rem; margin-bottom:0.5rem">Neuen Pfad hinzufuegen</h3>
|
||||||
|
<div class="form-grid">
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Name</label>
|
||||||
|
<input type="text" id="new-path-name" placeholder="z.B. Serien">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Pfad</label>
|
||||||
|
<input type="text" id="new-path-path" placeholder="/mnt/30 - Media/10 - Serien">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Typ</label>
|
||||||
|
<select id="new-path-type">
|
||||||
|
<option value="series">Serien</option>
|
||||||
|
<option value="movie">Filme</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="form-actions">
|
||||||
|
<button class="btn-primary" onclick="addPath()">Hinzufuegen</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- TVDB Such-Modal -->
|
||||||
|
<div id="tvdb-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal modal-small">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>TVDB zuordnen</h2>
|
||||||
|
<button class="btn-close" onclick="closeTvdbModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<input type="hidden" id="tvdb-series-id">
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Serie suchen</label>
|
||||||
|
<input type="text" id="tvdb-search-input" placeholder="Serienname..."
|
||||||
|
oninput="debounceTvdbSearch()">
|
||||||
|
</div>
|
||||||
|
<div id="tvdb-results" class="tvdb-results"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Duplikate-Modal -->
|
||||||
|
<div id="duplicates-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Duplikate</h2>
|
||||||
|
<button class="btn-close" onclick="closeDuplicatesModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<div id="duplicates-list" class="duplicates-list">
|
||||||
|
<div class="loading-msg">Suche Duplikate...</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Serien-Detail-Modal -->
|
||||||
|
<div id="series-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal" style="max-width:1000px">
|
||||||
|
<div class="modal-header">
|
||||||
|
<div style="flex:1">
|
||||||
|
<h2 id="series-modal-title">Serie</h2>
|
||||||
|
<span id="series-modal-genres" class="series-genres-line"></span>
|
||||||
|
</div>
|
||||||
|
<div class="modal-header-actions">
|
||||||
|
<button class="btn-small btn-secondary" id="btn-tvdb-refresh" onclick="tvdbRefresh()" style="display:none">TVDB aktualisieren</button>
|
||||||
|
<button class="btn-small btn-secondary" id="btn-tvdb-unlink" onclick="tvdbUnlink()" style="display:none">TVDB loesen</button>
|
||||||
|
<button class="btn-small btn-secondary" id="btn-metadata-dl" onclick="downloadMetadata()" style="display:none">Metadaten laden</button>
|
||||||
|
<button class="btn-small btn-secondary" id="btn-series-delete-db" onclick="deleteSeries(false)">Aus DB loeschen</button>
|
||||||
|
<button class="btn-small btn-danger" id="btn-series-delete-all" onclick="deleteSeries(true)">Komplett loeschen</button>
|
||||||
|
<button class="btn-close" onclick="closeSeriesModal()">×</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:0">
|
||||||
|
<!-- Detail-Tabs -->
|
||||||
|
<div class="detail-tabs">
|
||||||
|
<button class="detail-tab active" onclick="switchDetailTab('episodes')">Episoden</button>
|
||||||
|
<button class="detail-tab" onclick="switchDetailTab('cast')">Darsteller</button>
|
||||||
|
<button class="detail-tab" onclick="switchDetailTab('artworks')">Bilder</button>
|
||||||
|
</div>
|
||||||
|
<div id="series-modal-body" style="padding:1rem">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Film-Detail-Modal -->
|
||||||
|
<div id="movie-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal" style="max-width:900px">
|
||||||
|
<div class="modal-header">
|
||||||
|
<div style="flex:1">
|
||||||
|
<h2 id="movie-modal-title">Film</h2>
|
||||||
|
<span id="movie-modal-genres" class="series-genres-line"></span>
|
||||||
|
</div>
|
||||||
|
<div class="modal-header-actions">
|
||||||
|
<button class="btn-small btn-secondary" id="btn-movie-tvdb-unlink" onclick="movieTvdbUnlink()" style="display:none">TVDB loesen</button>
|
||||||
|
<button class="btn-small btn-secondary" onclick="deleteMovie(false)">Aus DB loeschen</button>
|
||||||
|
<button class="btn-small btn-danger" onclick="deleteMovie(true)">Komplett loeschen</button>
|
||||||
|
<button class="btn-close" onclick="closeMovieModal()">×</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<div id="movie-modal-body"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Film-TVDB Such-Modal -->
|
||||||
|
<div id="movie-tvdb-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal modal-small">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Film TVDB zuordnen</h2>
|
||||||
|
<button class="btn-close" onclick="closeMovieTvdbModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<input type="hidden" id="movie-tvdb-id">
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Film suchen</label>
|
||||||
|
<input type="text" id="movie-tvdb-search-input" placeholder="Filmname..."
|
||||||
|
oninput="debounceMovieTvdbSearch()">
|
||||||
|
</div>
|
||||||
|
<div id="movie-tvdb-results" class="tvdb-results"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Clean-Modal -->
|
||||||
|
<div id="clean-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal" style="max-width:1000px">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Bibliothek aufraeumen</h2>
|
||||||
|
<button class="btn-close" onclick="closeCleanModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:1rem">
|
||||||
|
<div class="clean-actions" style="margin-bottom:1rem; display:flex; gap:0.5rem; align-items:center;">
|
||||||
|
<button class="btn-primary" onclick="scanForJunk()">Junk scannen</button>
|
||||||
|
<button class="btn-secondary" onclick="deleteSelectedJunk()">Ausgewaehlte loeschen</button>
|
||||||
|
<button class="btn-secondary" onclick="deleteEmptyDirs()">Leere Ordner loeschen</button>
|
||||||
|
<span id="clean-info" class="text-muted" style="margin-left:auto"></span>
|
||||||
|
</div>
|
||||||
|
<div class="clean-filter" style="margin-bottom:0.5rem">
|
||||||
|
<label style="font-size:0.8rem; color:#aaa;">Filter Extension:</label>
|
||||||
|
<select id="clean-ext-filter" onchange="filterCleanList()" style="background:#252525;color:#ddd;border:1px solid #333;border-radius:4px;padding:0.2rem;font-size:0.8rem;">
|
||||||
|
<option value="">Alle</option>
|
||||||
|
</select>
|
||||||
|
<label style="margin-left:0.5rem; font-size:0.8rem; cursor:pointer; color:#ccc;">
|
||||||
|
<input type="checkbox" id="clean-select-all" onchange="toggleCleanSelectAll()"> Alle auswaehlen
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div id="clean-list" class="clean-list">
|
||||||
|
<div class="loading-msg">Klicke "Junk scannen" um zu starten</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Import-Modal -->
|
||||||
|
<div id="import-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal" style="max-width:1100px">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h2>Videos importieren</h2>
|
||||||
|
<button class="btn-close" onclick="closeImportModal()">×</button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:0">
|
||||||
|
<!-- Schritt 1: Ordner waehlen -->
|
||||||
|
<div id="import-setup">
|
||||||
|
<!-- Filebrowser -->
|
||||||
|
<div class="import-browser-bar">
|
||||||
|
<input type="text" id="import-source" placeholder="/mnt/..." oninput="debounceImportPath()"
|
||||||
|
style="flex:1;background:#252525;color:#ddd;border:1px solid #333;border-radius:5px;padding:0.4rem 0.6rem;font-size:0.85rem">
|
||||||
|
<button class="btn-small btn-secondary" onclick="importBrowse(document.getElementById('import-source').value || '/mnt')">Oeffnen</button>
|
||||||
|
</div>
|
||||||
|
<div id="import-browser" class="import-browser"></div>
|
||||||
|
<!-- Einstellungen + Analysieren -->
|
||||||
|
<div class="import-setup-footer">
|
||||||
|
<div class="import-setup-opts">
|
||||||
|
<label>Ziel:</label>
|
||||||
|
<select id="import-target"></select>
|
||||||
|
<label>Modus:</label>
|
||||||
|
<select id="import-mode">
|
||||||
|
<option value="copy">Kopieren</option>
|
||||||
|
<option value="move">Verschieben</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<span id="import-folder-info" class="text-muted"></span>
|
||||||
|
<button class="btn-primary" id="btn-analyze-import" onclick="createImportJob()" disabled>Analysieren</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- Schritt 2: Vorschau -->
|
||||||
|
<div id="import-preview" style="display:none">
|
||||||
|
<div class="import-actions" style="padding:0.6rem 1rem; display:flex; gap:0.5rem; align-items:center; border-bottom:1px solid #2a2a2a;">
|
||||||
|
<button class="btn-primary" id="btn-start-import" onclick="executeImport()">Import starten</button>
|
||||||
|
<button class="btn-secondary" onclick="resetImport()">Zurueck</button>
|
||||||
|
<span id="import-info" class="text-muted" style="margin-left:auto"></span>
|
||||||
|
</div>
|
||||||
|
<div id="import-items-list" class="import-items-list"></div>
|
||||||
|
</div>
|
||||||
|
<!-- Schritt 3: Fortschritt -->
|
||||||
|
<div id="import-progress" style="display:none; padding:1rem;">
|
||||||
|
<div class="progress-container">
|
||||||
|
<div class="progress-bar" id="import-bar"></div>
|
||||||
|
</div>
|
||||||
|
<span class="text-muted" id="import-status-text">Importiere...</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<!-- TVDB Review-Modal -->
|
||||||
|
<div id="tvdb-review-modal" class="modal-overlay" style="display:none">
|
||||||
|
<div class="modal" style="max-width:1100px">
|
||||||
|
<div class="modal-header">
|
||||||
|
<div style="flex:1">
|
||||||
|
<h2>TVDB Vorschlaege pruefen</h2>
|
||||||
|
<span id="tvdb-review-info" class="text-muted" style="font-size:0.8rem"></span>
|
||||||
|
</div>
|
||||||
|
<div class="modal-header-actions">
|
||||||
|
<button class="btn-small btn-secondary" id="btn-review-skip-all" onclick="skipAllReviewItems()">Alle ueberspringen</button>
|
||||||
|
<button class="btn-close" onclick="closeTvdbReviewModal()">×</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" style="padding:0">
|
||||||
|
<div id="tvdb-review-list" class="tvdb-review-list">
|
||||||
|
<div class="loading-msg">Keine Vorschlaege</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script src="/static/js/library.js"></script>
|
||||||
|
{% endblock %}
|
||||||
47
app/templates/partials/stats_table.html
Normal file
47
app/templates/partials/stats_table.html
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
<table class="data-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Datei</th>
|
||||||
|
<th>Groesse (Quelle)</th>
|
||||||
|
<th>Groesse (Ziel)</th>
|
||||||
|
<th>Dauer</th>
|
||||||
|
<th>FPS</th>
|
||||||
|
<th>Speed</th>
|
||||||
|
<th>Status</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for entry in entries %}
|
||||||
|
<tr>
|
||||||
|
<td title="{{ entry.source_path }}">{{ entry.source_filename }}</td>
|
||||||
|
<td>{{ "%.1f"|format(entry.source_size_bytes / 1048576) }} MiB</td>
|
||||||
|
<td>{{ "%.1f"|format((entry.target_size_bytes or 0) / 1048576) }} MiB</td>
|
||||||
|
<td>{{ "%.0f"|format(entry.duration_sec or 0) }}s</td>
|
||||||
|
<td>{{ "%.1f"|format(entry.avg_fps or 0) }}</td>
|
||||||
|
<td>{{ "%.2f"|format(entry.avg_speed or 0) }}x</td>
|
||||||
|
<td>
|
||||||
|
{% if entry.status == 2 %}
|
||||||
|
<span class="status-badge ok">OK</span>
|
||||||
|
{% elif entry.status == 3 %}
|
||||||
|
<span class="status-badge error">Fehler</span>
|
||||||
|
{% elif entry.status == 4 %}
|
||||||
|
<span class="status-badge warn">Abgebrochen</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="status-badge">{{ entry.status }}</span>
|
||||||
|
{% endif %}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
{% if entries | length >= 25 %}
|
||||||
|
<div class="pagination">
|
||||||
|
<button hx-get="/htmx/stats?page={{ page + 1 }}"
|
||||||
|
hx-target="#stats-table"
|
||||||
|
hx-swap="innerHTML"
|
||||||
|
class="btn-secondary">
|
||||||
|
Weitere laden...
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
47
app/templates/statistics.html
Normal file
47
app/templates/statistics.html
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Statistik - VideoKonverter{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<section class="stats-section">
|
||||||
|
<h2>Statistik</h2>
|
||||||
|
|
||||||
|
<!-- Zusammenfassung -->
|
||||||
|
{% if summary %}
|
||||||
|
<div class="stats-summary">
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ summary.total }}</span>
|
||||||
|
<span class="stat-label">Gesamt</span>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ summary.finished }}</span>
|
||||||
|
<span class="stat-label">Erfolgreich</span>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ summary.failed }}</span>
|
||||||
|
<span class="stat-label">Fehlgeschlagen</span>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ "%.1f"|format(summary.space_saved / 1073741824) }} GiB</span>
|
||||||
|
<span class="stat-label">Platz gespart</span>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ "%.1f"|format(summary.avg_fps) }}</span>
|
||||||
|
<span class="stat-label">Avg FPS</span>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<span class="stat-value">{{ "%.2f"|format(summary.avg_speed) }}x</span>
|
||||||
|
<span class="stat-label">Avg Speed</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<!-- Tabelle -->
|
||||||
|
<div id="stats-table"
|
||||||
|
hx-get="/htmx/stats?page=1"
|
||||||
|
hx-trigger="load"
|
||||||
|
hx-swap="innerHTML">
|
||||||
|
Lade Statistiken...
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
{% endblock %}
|
||||||
52
docker-compose.yml
Normal file
52
docker-compose.yml
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
services:
|
||||||
|
# === GPU-Modus (Produktion auf Unraid) ===
|
||||||
|
# Starten mit: docker compose --profile gpu up --build
|
||||||
|
# Unraid: nobody:users = 99:100
|
||||||
|
video-konverter:
|
||||||
|
build: .
|
||||||
|
container_name: video-konverter
|
||||||
|
restart: unless-stopped
|
||||||
|
user: "${PUID:-99}:${PGID:-100}"
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
volumes:
|
||||||
|
# Konfiguration (persistent)
|
||||||
|
- ./app/cfg:/opt/video-konverter/app/cfg
|
||||||
|
# Daten (Queue-Persistierung)
|
||||||
|
- ./data:/opt/video-konverter/data
|
||||||
|
# Logs
|
||||||
|
- ./logs:/opt/video-konverter/logs
|
||||||
|
# /mnt 1:1 durchreichen - Pfade von Dolphin stimmen dann im Container
|
||||||
|
- /mnt:/mnt:rw
|
||||||
|
devices:
|
||||||
|
# Intel A380 GPU - beide Devices noetig!
|
||||||
|
- /dev/dri/renderD128:/dev/dri/renderD128
|
||||||
|
- /dev/dri/card0:/dev/dri/card0
|
||||||
|
group_add:
|
||||||
|
- "video"
|
||||||
|
environment:
|
||||||
|
- LIBVA_DRIVER_NAME=iHD
|
||||||
|
- LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri
|
||||||
|
profiles:
|
||||||
|
- gpu
|
||||||
|
|
||||||
|
# === CPU-Modus (lokales Testen ohne GPU) ===
|
||||||
|
# Starten mit: docker compose --profile cpu up --build
|
||||||
|
# Lokal: CIFS-Mount nutzt UID 1000, daher PUID/PGID ueberschreiben:
|
||||||
|
# PUID=1000 PGID=1000 docker compose --profile cpu up --build
|
||||||
|
video-konverter-cpu:
|
||||||
|
build: .
|
||||||
|
container_name: video-konverter-cpu
|
||||||
|
user: "${PUID:-99}:${PGID:-100}"
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
volumes:
|
||||||
|
- ./app/cfg:/opt/video-konverter/app/cfg
|
||||||
|
- ./data:/opt/video-konverter/data
|
||||||
|
- ./logs:/opt/video-konverter/logs
|
||||||
|
# /mnt 1:1 durchreichen - Pfade identisch zum Host
|
||||||
|
- /mnt:/mnt:rw
|
||||||
|
environment:
|
||||||
|
- VIDEO_KONVERTER_MODE=cpu
|
||||||
|
profiles:
|
||||||
|
- cpu
|
||||||
6
requirements.txt
Normal file
6
requirements.txt
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
aiohttp>=3.9.0
|
||||||
|
aiohttp-jinja2>=1.6
|
||||||
|
jinja2>=3.1.0
|
||||||
|
PyYAML>=6.0
|
||||||
|
aiomysql>=0.2.0
|
||||||
|
tvdb-v4-official>=1.1.0
|
||||||
Loading…
Reference in a new issue