docker.videokonverter/video-konverter/app/services/library.py
data 37dff4de69 feat: VideoKonverter v2.9 - Projekt-Reset aus Docker-Image
Projekt aus Docker-Image videoconverter:2.9 extrahiert.
Enthält zweiphasigen Import-Workflow mit Serien-Zuordnung.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-27 11:41:48 +01:00

2139 lines
88 KiB
Python

"""Video-Bibliothek Service: Scanning, DB-Verwaltung, Filter, Duplikat-Finder"""
import asyncio
import json
import logging
import os
import re
from pathlib import Path
from typing import Optional, TYPE_CHECKING
import aiomysql
from app.config import Config
from app.services.probe import ProbeService
if TYPE_CHECKING:
from app.routes.ws import WebSocketManager
# Regex fuer Serien-Erkennung
# S01E02, s01e02, S1E2
RE_SXXEXX = re.compile(r'[Ss](\d{1,2})[Ee](\d{1,3})')
# Doppel-Episoden: S01E01E02, S01E01-E02, S01E01+E02
RE_SXXEXX_MULTI = re.compile(
r'[Ss](\d{1,2})[Ee](\d{1,3})(?:[-+]?[Ee](\d{1,3}))?'
)
# 1x02, 01x02
RE_XXxXX = re.compile(r'(\d{1,2})x(\d{2,3})')
# Doppel-Episoden: 1x01-02, 1x01+02
RE_XXxXX_MULTI = re.compile(r'(\d{1,2})x(\d{2,3})(?:[-+](\d{2,3}))?')
# Staffel/Season Ordner: "Season 01", "Staffel 1", "S01"
RE_SEASON_DIR = re.compile(
r'^(?:Season|Staffel|S)\s*(\d{1,2})$', re.IGNORECASE
)
# Fuehrende Nummer: "01 - Pilot.mkv", "01.mkv"
RE_LEADING_NUM = re.compile(r'^(\d{1,3})(?:\s*[-._]\s*|\.)(.+)')
# Video-Extensions fuer Bibliothek
VIDEO_EXTENSIONS = {
'.mkv', '.mp4', '.avi', '.wmv', '.vob', '.ts',
'.m4v', '.flv', '.mov', '.webm', '.mpg', '.mpeg',
}
class LibraryService:
"""Verwaltet die Video-Bibliothek mit MariaDB-Backend"""
def __init__(self, config: Config, ws_manager: 'WebSocketManager'):
self.config = config
self.ws_manager = ws_manager
self._db_pool: Optional[aiomysql.Pool] = None
self._scanning: bool = False
self._scan_progress: dict = {"status": "idle", "current": "", "total": 0, "done": 0}
@property
def library_config(self) -> dict:
return self.config.settings.get("library", {})
async def start(self) -> None:
"""Initialisiert DB-Tabellen"""
await self._init_db()
logging.info("LibraryService gestartet")
async def stop(self) -> None:
"""Schliesst DB-Pool"""
if self._db_pool is not None:
self._db_pool.close()
await self._db_pool.wait_closed()
# === DB-Pool (geteilt mit QueueService ueber gleiche Config) ===
async def _get_pool(self) -> Optional[aiomysql.Pool]:
if self._db_pool is not None:
return self._db_pool
db_cfg = self.config.settings.get("database", {})
try:
self._db_pool = await aiomysql.create_pool(
host=db_cfg.get("host", "192.168.155.11"),
port=db_cfg.get("port", 3306),
user=db_cfg.get("user", "video"),
password=db_cfg.get("password", "8715"),
db=db_cfg.get("database", "video_converter"),
charset="utf8mb4",
autocommit=True,
minsize=1,
maxsize=5,
connect_timeout=10,
)
return self._db_pool
except Exception as e:
logging.error(f"LibraryService DB-Verbindung fehlgeschlagen: {e}")
return None
async def _init_db(self) -> None:
"""Erstellt Bibliotheks-Tabellen"""
pool = await self._get_pool()
if not pool:
return
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
# Scan-Pfade
await cur.execute("""
CREATE TABLE IF NOT EXISTS library_paths (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(128) NOT NULL,
path VARCHAR(1024) NOT NULL,
media_type ENUM('series','movie') NOT NULL,
enabled TINYINT DEFAULT 1,
last_scan TIMESTAMP NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
""")
# Migration: last_video_count Spalte
try:
await cur.execute(
"ALTER TABLE library_paths "
"ADD COLUMN last_video_count INT DEFAULT 0"
)
except Exception:
pass # Spalte existiert bereits
# Serien
await cur.execute("""
CREATE TABLE IF NOT EXISTS library_series (
id INT AUTO_INCREMENT PRIMARY KEY,
library_path_id INT NOT NULL,
folder_name VARCHAR(512) NOT NULL,
folder_path VARCHAR(1024) NOT NULL,
tvdb_id INT NULL,
title VARCHAR(512) NULL,
overview TEXT NULL,
first_aired DATE NULL,
poster_url VARCHAR(512) NULL,
status VARCHAR(64) NULL,
total_seasons INT DEFAULT 0,
total_episodes INT DEFAULT 0,
local_episodes INT DEFAULT 0,
missing_episodes INT DEFAULT 0,
redundant_files INT DEFAULT 0,
last_updated TIMESTAMP NULL,
FOREIGN KEY (library_path_id)
REFERENCES library_paths(id) ON DELETE CASCADE,
INDEX idx_tvdb_id (tvdb_id),
INDEX idx_library (library_path_id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
""")
# Videos
await cur.execute("""
CREATE TABLE IF NOT EXISTS library_videos (
id INT AUTO_INCREMENT PRIMARY KEY,
library_path_id INT NOT NULL,
series_id INT NULL,
file_path VARCHAR(1024) NOT NULL,
file_name VARCHAR(512) NOT NULL,
file_size BIGINT NOT NULL,
season_number INT NULL,
episode_number INT NULL,
episode_title VARCHAR(512) NULL,
video_codec VARCHAR(64),
width INT,
height INT,
resolution VARCHAR(16),
frame_rate DOUBLE,
video_bitrate INT,
is_10bit TINYINT DEFAULT 0,
hdr VARCHAR(32) NULL,
audio_tracks JSON,
subtitle_tracks JSON,
container VARCHAR(16),
duration_sec DOUBLE,
scanned_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (library_path_id)
REFERENCES library_paths(id) ON DELETE CASCADE,
FOREIGN KEY (series_id)
REFERENCES library_series(id) ON DELETE SET NULL,
INDEX idx_series (series_id),
INDEX idx_library (library_path_id),
INDEX idx_codec (video_codec),
INDEX idx_resolution (width, height),
UNIQUE INDEX idx_filepath (file_path(768))
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
""")
# Filme (analog zu Serien, aber ohne Staffeln/Episoden)
await cur.execute("""
CREATE TABLE IF NOT EXISTS library_movies (
id INT AUTO_INCREMENT PRIMARY KEY,
library_path_id INT NOT NULL,
folder_name VARCHAR(512) NOT NULL,
folder_path VARCHAR(1024) NOT NULL,
tvdb_id INT NULL,
title VARCHAR(512) NULL,
overview TEXT NULL,
year INT NULL,
poster_url VARCHAR(512) NULL,
genres VARCHAR(512) NULL,
runtime INT NULL,
status VARCHAR(64) NULL,
video_count INT DEFAULT 0,
total_size BIGINT DEFAULT 0,
last_updated TIMESTAMP NULL,
FOREIGN KEY (library_path_id)
REFERENCES library_paths(id) ON DELETE CASCADE,
INDEX idx_tvdb_id (tvdb_id),
INDEX idx_library (library_path_id),
UNIQUE INDEX idx_folder (folder_path(768))
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
""")
# TVDB Episoden-Cache
await cur.execute("""
CREATE TABLE IF NOT EXISTS tvdb_episode_cache (
id INT AUTO_INCREMENT PRIMARY KEY,
series_tvdb_id INT NOT NULL,
season_number INT NOT NULL,
episode_number INT NOT NULL,
episode_name VARCHAR(512),
aired DATE NULL,
runtime INT NULL,
cached_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
INDEX idx_series (series_tvdb_id),
UNIQUE INDEX idx_episode (
series_tvdb_id, season_number, episode_number
)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
""")
# movie_id Spalte in library_videos (falls noch nicht vorhanden)
try:
await cur.execute(
"ALTER TABLE library_videos "
"ADD COLUMN movie_id INT NULL, "
"ADD INDEX idx_movie (movie_id), "
"ADD FOREIGN KEY (movie_id) "
"REFERENCES library_movies(id) ON DELETE SET NULL"
)
except Exception:
pass # Spalte existiert bereits
# episode_end Spalte fuer Doppel-Episoden (E01E02 in einer Datei)
try:
await cur.execute(
"ALTER TABLE library_videos "
"ADD COLUMN episode_end INT NULL AFTER episode_number"
)
logging.info("episode_end Spalte hinzugefuegt")
except Exception:
pass # Spalte existiert bereits
# redundant_files Spalte fuer Duplikat-Erkennung
try:
await cur.execute(
"ALTER TABLE library_series "
"ADD COLUMN redundant_files INT DEFAULT 0"
)
logging.info("redundant_files Spalte hinzugefuegt")
except Exception:
pass # Spalte existiert bereits
logging.info("Bibliotheks-Tabellen initialisiert")
except Exception as e:
logging.error(f"Bibliotheks-Tabellen erstellen fehlgeschlagen: {e}")
# === Scan-Pfade verwalten ===
async def get_paths(self) -> list[dict]:
"""Alle Scan-Pfade laden"""
pool = await self._get_pool()
if not pool:
return []
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(
"SELECT * FROM library_paths ORDER BY name"
)
rows = await cur.fetchall()
return [self._serialize_row(r) for r in rows]
except Exception as e:
logging.error(f"Scan-Pfade laden fehlgeschlagen: {e}")
return []
async def add_path(self, name: str, path: str,
media_type: str) -> Optional[int]:
"""Neuen Scan-Pfad hinzufuegen"""
if media_type not in ('series', 'movie'):
return None
pool = await self._get_pool()
if not pool:
return None
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"INSERT INTO library_paths (name, path, media_type) "
"VALUES (%s, %s, %s)",
(name, path, media_type)
)
logging.info(f"Scan-Pfad hinzugefuegt: {name} ({path})")
return cur.lastrowid
except Exception as e:
logging.error(f"Scan-Pfad hinzufuegen fehlgeschlagen: {e}")
return None
async def remove_path(self, path_id: int) -> bool:
"""Scan-Pfad und alle zugehoerigen Daten loeschen"""
pool = await self._get_pool()
if not pool:
return False
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"DELETE FROM library_paths WHERE id = %s", (path_id,)
)
logging.info(f"Scan-Pfad entfernt: ID {path_id}")
return cur.rowcount > 0
except Exception as e:
logging.error(f"Scan-Pfad entfernen fehlgeschlagen: {e}")
return False
async def update_path(self, path_id: int, name: str = None,
path: str = None, media_type: str = None,
enabled: bool = None) -> bool:
"""Scan-Pfad aktualisieren"""
pool = await self._get_pool()
if not pool:
return False
updates = []
params = []
if name is not None:
updates.append("name = %s")
params.append(name)
if path is not None:
updates.append("path = %s")
params.append(path)
if media_type is not None and media_type in ('series', 'movie'):
updates.append("media_type = %s")
params.append(media_type)
if enabled is not None:
updates.append("enabled = %s")
params.append(1 if enabled else 0)
if not updates:
return False
params.append(path_id)
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
f"UPDATE library_paths SET {', '.join(updates)} "
f"WHERE id = %s", params
)
return cur.rowcount > 0
except Exception as e:
logging.error(f"Scan-Pfad aktualisieren fehlgeschlagen: {e}")
return False
async def unlink_tvdb(self, series_id: int) -> bool:
"""TVDB-Zuordnung einer Serie loesen"""
pool = await self._get_pool()
if not pool:
return False
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"UPDATE library_series SET "
"tvdb_id = NULL, poster_url = NULL, "
"overview = NULL, first_aired = NULL, "
"status = NULL, total_seasons = 0, "
"total_episodes = 0, missing_episodes = 0, "
"last_updated = NOW() "
"WHERE id = %s", (series_id,)
)
logging.info(
f"TVDB-Zuordnung geloest: Serie {series_id}"
)
return cur.rowcount > 0
except Exception as e:
logging.error(f"TVDB loesen fehlgeschlagen: {e}")
return False
async def delete_series(self, series_id: int,
delete_files: bool = False) -> dict:
"""Serie aus DB loeschen. Optional auch Dateien + Ordner."""
pool = await self._get_pool()
if not pool:
return {"error": "Keine DB-Verbindung"}
try:
# Ordner-Pfad holen (vor dem Loeschen aus DB)
folder_path = None
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT folder_path FROM library_series "
"WHERE id = %s", (series_id,)
)
row = await cur.fetchone()
if not row:
return {"error": "Serie nicht gefunden"}
folder_path = row[0]
# Videos aus DB loeschen
await cur.execute(
"DELETE FROM library_videos WHERE series_id = %s",
(series_id,)
)
vids = cur.rowcount
# Serie aus DB loeschen
await cur.execute(
"DELETE FROM library_series WHERE id = %s",
(series_id,)
)
result = {"success": True, "deleted_videos_db": vids}
# Dateisystem loeschen wenn gewuenscht
if delete_files and folder_path and os.path.isdir(folder_path):
import shutil
import stat
def _rm_error(func, path, exc_info):
try:
os.chmod(path, stat.S_IRWXU)
func(path)
except Exception:
pass
try:
shutil.rmtree(folder_path, onerror=_rm_error)
result["deleted_folder"] = folder_path
logging.info(
f"Serie {series_id} komplett geloescht "
f"inkl. Ordner: {folder_path}"
)
except Exception as e:
result["folder_error"] = str(e)
logging.error(
f"Ordner loeschen fehlgeschlagen: "
f"{folder_path}: {e}"
)
else:
logging.info(
f"Serie {series_id} aus DB geloescht ({vids} Videos)"
)
return result
except Exception as e:
logging.error(f"Serie loeschen fehlgeschlagen: {e}")
return {"error": str(e)}
async def delete_video(self, video_id: int,
delete_file: bool = False) -> dict:
"""Einzelnes Video loeschen (DB + optional Datei)"""
pool = await self._get_pool()
if not pool:
return {"error": "Keine DB-Verbindung"}
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT file_path FROM library_videos WHERE id = %s",
(video_id,)
)
row = await cur.fetchone()
if not row:
return {"error": "Video nicht gefunden"}
file_path = row[0]
# Aus DB loeschen
await cur.execute(
"DELETE FROM library_videos WHERE id = %s",
(video_id,)
)
result = {"success": True, "file_path": file_path}
# Datei loeschen wenn gewuenscht
if delete_file and file_path and os.path.isfile(file_path):
try:
os.remove(file_path)
result["file_deleted"] = True
logging.info(f"Video geloescht: {file_path}")
except Exception as e:
result["file_error"] = str(e)
logging.error(
f"Video-Datei loeschen fehlgeschlagen: "
f"{file_path}: {e}"
)
elif delete_file:
result["file_deleted"] = False
result["file_error"] = "Datei nicht gefunden"
return result
except Exception as e:
logging.error(f"Video loeschen fehlgeschlagen: {e}")
return {"error": str(e)}
async def get_movies(self, filters: dict = None,
page: int = 1, limit: int = 50) -> dict:
"""Nur Filme (keine Serien) abfragen"""
filters = filters or {}
filters["media_type"] = "movie"
# series_id muss NULL sein (kein Serien-Video)
return await self.get_videos(filters, page, limit)
# === Scanning ===
async def scan_all(self) -> dict:
"""Alle aktivierten Pfade scannen"""
if self._scanning:
return {"error": "Scan laeuft bereits"}
paths = await self.get_paths()
enabled = [p for p in paths if p.get("enabled")]
if not enabled:
return {"error": "Keine aktiven Scan-Pfade konfiguriert"}
self._scanning = True
total_videos = 0
try:
for lib_path in enabled:
count = await self._scan_path(lib_path)
total_videos += count
return {"success": True, "videos_found": total_videos}
finally:
self._scanning = False
self._scan_progress = {
"status": "idle", "current": "", "total": 0, "done": 0
}
# Scan-Ende per WebSocket melden
if self.ws_manager:
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
async def scan_single_path(self, path_id: int) -> dict:
"""Einzelnen Pfad scannen"""
if self._scanning:
return {"error": "Scan laeuft bereits"}
pool = await self._get_pool()
if not pool:
return {"error": "Keine DB-Verbindung"}
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(
"SELECT * FROM library_paths WHERE id = %s",
(path_id,)
)
lib_path = await cur.fetchone()
if not lib_path:
return {"error": "Pfad nicht gefunden"}
except Exception as e:
return {"error": str(e)}
self._scanning = True
try:
lib_path = self._serialize_row(lib_path)
count = await self._scan_path(lib_path)
return {"success": True, "videos_found": count}
finally:
self._scanning = False
self._scan_progress = {
"status": "idle", "current": "", "total": 0, "done": 0
}
# Scan-Ende per WebSocket melden
if self.ws_manager:
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
async def _scan_path(self, lib_path: dict) -> int:
"""Scannt einen einzelnen Bibliotheks-Pfad"""
base_path = lib_path["path"]
path_id = lib_path["id"]
media_type = lib_path["media_type"]
if not os.path.isdir(base_path):
logging.warning(f"Scan-Pfad nicht gefunden: {base_path}")
return 0
logging.info(f"Scanne Bibliothek: {lib_path['name']} ({base_path})")
# Gecachte Dateianzahl als Startwert fuer Fortschritt
cached_count = lib_path.get("last_video_count", 0) or 0
if cached_count > 0:
self._scan_progress["total"] = cached_count
count = 0
if media_type == "series":
count = await self._scan_series_path(base_path, path_id)
else:
count = await self._scan_movie_path(base_path, path_id)
# Verwaiste Eintraege bereinigen (versteckte Ordner, geloeschte Serien)
await self._cleanup_stale_entries(path_id)
# last_scan + last_video_count aktualisieren
total_videos = self._scan_progress.get("done", count)
pool = await self._get_pool()
if pool:
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"UPDATE library_paths "
"SET last_scan = NOW(), "
"last_video_count = %s "
"WHERE id = %s",
(total_videos, path_id)
)
except Exception:
pass
logging.info(f"Scan abgeschlossen: {lib_path['name']} "
f"({count} Videos)")
return count
async def _scan_series_path(self, base_path: str,
path_id: int) -> int:
"""Scannt Serien-Ordner-Struktur"""
count = 0
try:
entries = sorted(os.listdir(base_path))
except OSError as e:
logging.error(f"Ordner lesen fehlgeschlagen: {base_path}: {e}")
return 0
for entry in entries:
# Versteckte Ordner ueberspringen
if entry.startswith("."):
continue
series_path = os.path.join(base_path, entry)
if not os.path.isdir(series_path):
continue
# Serie in DB anlegen/finden
series_id = await self._ensure_series(path_id, entry, series_path)
if not series_id:
continue
# Videos in Serie suchen (rekursiv)
video_files = self._find_videos_recursive(series_path)
# total nur erhoehen wenn neue Dateien dazukommen
new_total = self._scan_progress["done"] + len(video_files)
if new_total > self._scan_progress["total"]:
self._scan_progress["total"] = new_total
self._scan_progress.update({
"status": "scanning",
"current": entry,
})
# Broadcast Scan-Progress
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
for vf in video_files:
added = await self._add_video_to_db(
path_id, series_id, vf, series_path
)
if added:
count += 1
self._scan_progress["done"] += 1
# Haeufigere WS-Updates (alle 10 Videos)
if self._scan_progress["done"] % 10 == 0:
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
# Lokale Episoden-Zaehler aktualisieren
await self._update_series_counts(series_id)
return count
async def _cleanup_stale_entries(self, path_id: int) -> None:
"""Entfernt verwaiste DB-Eintraege (versteckte Ordner, nicht mehr vorhandene Dateien)"""
pool = await self._get_pool()
if not pool:
return
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
# 1. Videos entfernen deren Dateien nicht mehr existieren
await cur.execute(
"SELECT id, file_path, series_id FROM library_videos "
"WHERE library_path_id = %s",
(path_id,)
)
all_videos = await cur.fetchall()
missing_ids = []
affected_series = set()
for vid_id, file_path, series_id in all_videos:
if not os.path.exists(file_path):
missing_ids.append(vid_id)
if series_id:
affected_series.add(series_id)
if missing_ids:
# In Batches loeschen (max 100 pro Query)
for i in range(0, len(missing_ids), 100):
batch = missing_ids[i:i+100]
placeholders = ",".join(["%s"] * len(batch))
await cur.execute(
f"DELETE FROM library_videos WHERE id IN ({placeholders})",
batch
)
logging.info(
f"Bereinigung: {len(missing_ids)} nicht mehr "
f"vorhandene Video-Dateien entfernt"
)
# 2. Serien mit versteckten Ordnernamen entfernen
await cur.execute(
"DELETE FROM library_videos WHERE series_id IN "
"(SELECT id FROM library_series "
" WHERE library_path_id = %s AND folder_name LIKE '.%%')",
(path_id,)
)
removed_vids = cur.rowcount
await cur.execute(
"DELETE FROM library_series "
"WHERE library_path_id = %s AND folder_name LIKE '.%%'",
(path_id,)
)
removed_series = cur.rowcount
if removed_series > 0 or removed_vids > 0:
logging.info(
f"Bereinigung: {removed_series} versteckte Serien, "
f"{removed_vids} Videos entfernt"
)
# 3. Serien ohne Videos entfernen (leere Ordner)
await cur.execute(
"DELETE FROM library_series "
"WHERE library_path_id = %s AND local_episodes = 0 "
"AND tvdb_id IS NULL",
(path_id,)
)
empty = cur.rowcount
if empty > 0:
logging.info(
f"Bereinigung: {empty} leere Serien entfernt"
)
# 4. Serien-Zaehler aktualisieren fuer betroffene Serien
for series_id in affected_series:
await self._update_series_counts(series_id)
except Exception as e:
logging.warning(f"Bereinigung fehlgeschlagen: {e}")
async def _scan_movie_path(self, base_path: str,
path_id: int) -> int:
"""Scannt Film-Ordner rekursiv.
Jeder Ordner mit Video-Dateien = ein Film.
Ordner nur mit Unterordnern = Film-Reihe (wird durchlaufen).
"""
count = 0
movie_folders = self._find_movie_folders(base_path)
logging.info(f"Film-Scan: {len(movie_folders)} Film-Ordner gefunden")
self._scan_progress.update({
"status": "scanning",
"current": os.path.basename(base_path),
})
for folder_path, direct_videos in movie_folders:
folder_name = os.path.basename(folder_path)
# Film-Eintrag erstellen/finden
movie_id = await self._ensure_movie(
path_id, folder_name, folder_path
)
if not movie_id:
logging.warning(f"Film-Eintrag fehlgeschlagen: {folder_name}")
continue
# total nur erhoehen wenn neue Dateien dazukommen
new_total = self._scan_progress["done"] + len(direct_videos)
if new_total > self._scan_progress["total"]:
self._scan_progress["total"] = new_total
self._scan_progress.update({
"status": "scanning",
"current": folder_name,
})
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
for vf in direct_videos:
added = await self._add_video_to_db(
path_id, None, vf, base_path, movie_id=movie_id
)
if added:
count += 1
self._scan_progress["done"] += 1
# Haeufigere WS-Updates (alle 10 Videos)
if self._scan_progress["done"] % 10 == 0:
await self.ws_manager.broadcast({
"data_library_scan": self._scan_progress
})
await self._update_movie_counts(movie_id)
# Einzelne Video-Dateien direkt im Root
try:
for entry in os.scandir(base_path):
if entry.name.startswith(".") or not entry.is_file():
continue
ext = os.path.splitext(entry.name)[1].lower()
if ext not in VIDEO_EXTENSIONS:
continue
name_no_ext = os.path.splitext(entry.name)[0]
movie_id = await self._ensure_movie(
path_id, name_no_ext, entry.path, is_file=True
)
if movie_id:
added = await self._add_video_to_db(
path_id, None, entry.path, base_path,
movie_id=movie_id
)
if added:
count += 1
await self._update_movie_counts(movie_id)
except OSError:
pass
# Verwaiste Film-Eintraege bereinigen
await self._cleanup_stale_movies(path_id)
return count
def _find_movie_folders(self, base_path: str) -> list[tuple[str, list[str]]]:
"""Findet alle Ordner die Video-Dateien enthalten (rekursiv).
Gibt Liste von (ordner_pfad, [video_dateien]) zurueck."""
results = []
try:
for entry in sorted(os.scandir(base_path),
key=lambda e: e.name.lower()):
if entry.name.startswith(".") or not entry.is_dir():
continue
# Direkte Videos in diesem Ordner
direct_videos = []
has_subdirs = False
try:
for sub in os.scandir(entry.path):
if sub.name.startswith("."):
continue
if sub.is_file():
ext = os.path.splitext(sub.name)[1].lower()
if ext in VIDEO_EXTENSIONS:
direct_videos.append(sub.path)
elif sub.is_dir():
has_subdirs = True
except OSError:
continue
if direct_videos:
# Ordner hat Videos -> ist ein Film
results.append((entry.path, direct_videos))
if has_subdirs:
# Auch Unterordner durchsuchen (Film-Reihen)
results.extend(self._find_movie_folders(entry.path))
except OSError:
pass
return results
def _find_videos_recursive(self, directory: str) -> list[str]:
"""Findet alle Videodateien rekursiv"""
videos = []
try:
for root, _dirs, files in os.walk(directory):
for f in sorted(files):
ext = os.path.splitext(f)[1].lower()
if ext in VIDEO_EXTENSIONS:
videos.append(os.path.join(root, f))
except OSError:
pass
return videos
async def _ensure_series(self, path_id: int, folder_name: str,
folder_path: str) -> Optional[int]:
"""Serie in DB anlegen falls nicht vorhanden"""
pool = await self._get_pool()
if not pool:
return None
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT id FROM library_series "
"WHERE library_path_id = %s AND folder_path = %s",
(path_id, folder_path)
)
row = await cur.fetchone()
if row:
return row[0]
await cur.execute(
"INSERT INTO library_series "
"(library_path_id, folder_name, folder_path, title) "
"VALUES (%s, %s, %s, %s)",
(path_id, folder_name, folder_path, folder_name)
)
return cur.lastrowid
except Exception as e:
logging.error(f"Serie anlegen fehlgeschlagen: {folder_name}: {e}")
return None
async def _ensure_movie(self, path_id: int, folder_name: str,
folder_path: str,
is_file: bool = False) -> Optional[int]:
"""Film in DB anlegen falls nicht vorhanden"""
pool = await self._get_pool()
if not pool:
return None
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT id FROM library_movies "
"WHERE library_path_id = %s AND folder_path = %s",
(path_id, folder_path)
)
row = await cur.fetchone()
if row:
return row[0]
# Titel aus Ordnername extrahieren (Jahr erkennen)
title = folder_name
year = None
# "Film Name (2020)" oder "Film Name (2020) 720p"
m = re.search(r'\((\d{4})\)', folder_name)
if m:
year = int(m.group(1))
# Alles vor dem Jahr = Titel
title = folder_name[:m.start()].strip()
if not title:
title = folder_name
else:
# Punkte/Unterstriche durch Leerzeichen ersetzen
title = re.sub(r'[._]', ' ', folder_name).strip()
# Titel-Suffixe entfernen (Aufloesung etc.)
title = re.sub(
r'\s*(720p|1080p|2160p|4k|bluray|bdrip|webrip|'
r'web-dl|hdtv|x264|x265|hevc|aac|dts)\s*',
'', title, flags=re.IGNORECASE
).strip()
await cur.execute(
"INSERT INTO library_movies "
"(library_path_id, folder_name, folder_path, "
"title, year) "
"VALUES (%s, %s, %s, %s, %s)",
(path_id, folder_name, folder_path, title, year)
)
return cur.lastrowid
except Exception as e:
logging.error(f"Film anlegen fehlgeschlagen: {folder_name}: {e}")
return None
async def _update_movie_counts(self, movie_id: int) -> None:
"""Aktualisiert Video-Zaehler und Gesamtgroesse eines Films"""
pool = await self._get_pool()
if not pool:
return
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT COUNT(*), COALESCE(SUM(file_size), 0) "
"FROM library_videos WHERE movie_id = %s",
(movie_id,)
)
row = await cur.fetchone()
vid_count = row[0] if row else 0
total_size = int(row[1]) if row else 0
await cur.execute(
"UPDATE library_movies SET video_count = %s, "
"total_size = %s WHERE id = %s",
(vid_count, total_size, movie_id)
)
except Exception as e:
logging.error(f"Film-Zaehler aktualisieren fehlgeschlagen: {e}")
async def _cleanup_stale_movies(self, path_id: int) -> None:
"""Entfernt verwaiste Film-Eintraege"""
pool = await self._get_pool()
if not pool:
return
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
# Filme mit versteckten Ordnernamen
await cur.execute(
"DELETE FROM library_movies "
"WHERE library_path_id = %s "
"AND folder_name LIKE '.%%'",
(path_id,)
)
# Filme ohne Videos und ohne TVDB
await cur.execute(
"DELETE FROM library_movies "
"WHERE library_path_id = %s "
"AND video_count = 0 AND tvdb_id IS NULL",
(path_id,)
)
except Exception as e:
logging.warning(f"Film-Bereinigung fehlgeschlagen: {e}")
async def _add_video_to_db(self, path_id: int,
series_id: Optional[int],
file_path: str,
base_path: str,
movie_id: Optional[int] = None) -> bool:
"""Video analysieren und in DB speichern (UPSERT)"""
pool = await self._get_pool()
if not pool:
return False
# Pruefen ob bereits in DB und nicht geaendert
try:
file_size = os.path.getsize(file_path)
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT id, file_size FROM library_videos "
"WHERE file_path = %s",
(file_path,)
)
existing = await cur.fetchone()
if existing and existing[1] == file_size:
# Unveraendert - aber movie_id/series_id aktualisieren
await cur.execute(
"UPDATE library_videos "
"SET series_id = %s, movie_id = %s "
"WHERE id = %s",
(series_id, movie_id, existing[0])
)
return False
except Exception:
pass
# ffprobe Analyse
media = await ProbeService.analyze(file_path)
if not media:
return False
# Serien-Info aus Dateiname/Pfad parsen
season_num, episode_num, episode_end, episode_title = self._parse_episode_info(
file_path, base_path
)
# Audio/Subtitle als JSON
audio_tracks = json.dumps([
{
"codec": a.codec_name,
"lang": a.language,
"channels": a.channels,
"bitrate": a.bit_rate,
}
for a in media.audio_streams
])
subtitle_tracks = json.dumps([
{"codec": s.codec_name, "lang": s.language}
for s in media.subtitle_streams
])
# Video-Info aus erstem Stream
v = media.video_streams[0] if media.video_streams else None
video_codec = v.codec_name if v else None
width = v.width if v else 0
height = v.height if v else 0
resolution = v.resolution if v else ""
frame_rate = v.frame_rate if v else 0.0
video_bitrate = v.bit_rate if v else None
is_10bit = 1 if (v and v.is_10bit) else 0
container = media.source_extension.lstrip(".")
file_name = media.source_filename
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("""
INSERT INTO library_videos (
library_path_id, series_id, movie_id,
file_path, file_name,
file_size, season_number, episode_number,
episode_end, episode_title,
video_codec, width, height, resolution,
frame_rate, video_bitrate, is_10bit,
audio_tracks, subtitle_tracks,
container, duration_sec
) VALUES (
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
)
ON DUPLICATE KEY UPDATE
file_size = VALUES(file_size),
series_id = VALUES(series_id),
movie_id = VALUES(movie_id),
season_number = VALUES(season_number),
episode_number = VALUES(episode_number),
episode_end = VALUES(episode_end),
episode_title = VALUES(episode_title),
video_codec = VALUES(video_codec),
width = VALUES(width),
height = VALUES(height),
resolution = VALUES(resolution),
frame_rate = VALUES(frame_rate),
video_bitrate = VALUES(video_bitrate),
is_10bit = VALUES(is_10bit),
audio_tracks = VALUES(audio_tracks),
subtitle_tracks = VALUES(subtitle_tracks),
container = VALUES(container),
duration_sec = VALUES(duration_sec),
scanned_at = NOW()
""", (
path_id, series_id, movie_id,
file_path, file_name,
file_size, season_num, episode_num,
episode_end, episode_title,
video_codec, width, height, resolution,
frame_rate, video_bitrate, is_10bit,
audio_tracks, subtitle_tracks,
container, media.source_duration_sec,
))
return True
except Exception as e:
logging.error(f"Video in DB speichern fehlgeschlagen: "
f"{file_name}: {e}")
return False
def _parse_episode_info(self, file_path: str,
base_path: str) -> tuple[Optional[int],
Optional[int],
Optional[int],
Optional[str]]:
"""Staffel-, Episodennummer(n) und Episodentitel aus Pfad/Dateiname extrahieren.
Gibt zurueck: (season_number, episode_number, episode_end, episode_title)
episode_end ist die End-Episode bei Doppel-Episoden (z.B. E01E02 -> end=2)
"""
file_name = os.path.basename(file_path)
rel_path = os.path.relpath(file_path, base_path)
name_no_ext = os.path.splitext(file_name)[0]
season_num = None
episode_num = None
episode_end = None
episode_title = None
# 1. S01E02 oder Doppel-Episode S01E01E02 im Dateinamen
m = RE_SXXEXX_MULTI.search(file_name)
if m:
season_num = int(m.group(1))
episode_num = int(m.group(2))
if m.group(3):
episode_end = int(m.group(3))
# Titel extrahieren: Alles nach "SxxExx(-Exx) - " etc.
episode_title = self._extract_episode_title(name_no_ext, m.end())
# 2. 1x02 oder 1x01-02 im Dateinamen
if season_num is None:
m = RE_XXxXX_MULTI.search(file_name)
if m:
season_num = int(m.group(1))
episode_num = int(m.group(2))
if m.group(3):
episode_end = int(m.group(3))
episode_title = self._extract_episode_title(name_no_ext, m.end())
# 3. Staffel aus Ordnername + fuehrende Nummer
if season_num is None:
parts = rel_path.replace("\\", "/").split("/")
for part in parts[:-1]: # Ordner durchsuchen
m = RE_SEASON_DIR.match(part)
if m:
season_num = int(m.group(1))
break
# Episodennummer aus fuehrender Nummer im Dateinamen
m = RE_LEADING_NUM.match(file_name)
if m and season_num is not None:
episode_num = int(m.group(1))
# Titel ist der Rest nach der Nummer
episode_title = m.group(2).rsplit(".", 1)[0].strip()
return season_num, episode_num, episode_end, episode_title
def _extract_episode_title(self, name_no_ext: str,
pos_after_episode: int) -> Optional[str]:
"""Extrahiert Episodentitel aus Dateinamen nach SxxExx.
Beispiele:
"Tulsa King - S01E01 - Nach Westen, alter Mann" -> "Nach Westen, alter Mann"
"Serie.S01E02.Titel.der.Episode.720p" -> "Titel der Episode"
"Serie - S01E03" -> None
"""
if pos_after_episode >= len(name_no_ext):
return None
rest = name_no_ext[pos_after_episode:]
# Fuehrende Trennzeichen entfernen (-, _, ., Leerzeichen)
rest = rest.lstrip(" -_.")
if not rest:
return None
# Qualitaets-/Release-Tags am Ende entfernen
# z.B. "720p", "1080p", "2160p", "x264", "HEVC", "WEB-DL" etc.
quality_pattern = re.compile(
r'[\s._-]*(720p|1080p|2160p|4k|hdtv|webrip|web-dl|bluray|'
r'bdrip|x264|x265|hevc|h264|h265|aac|dts|ac3|'
r'proper|repack|german|english|dubbed|dl|'
r'web|hdr|sdr|10bit|remux).*$',
re.IGNORECASE
)
rest = quality_pattern.sub('', rest)
# Punkte/Underscores durch Leerzeichen ersetzen (Scene-Releases)
# Aber nur wenn keine normalen Leerzeichen vorhanden
if ' ' not in rest and ('.' in rest or '_' in rest):
rest = rest.replace('.', ' ').replace('_', ' ')
# Mehrfach-Leerzeichen und Trailing entfernen
rest = re.sub(r'\s+', ' ', rest).strip(' -_.')
return rest if rest else None
async def _update_series_counts(self, series_id: int) -> None:
"""Aktualisiert die lokalen Episoden-Zaehler einer Serie.
Zaehlt EINDEUTIGE Episoden (season_number, episode_number),
nicht die Anzahl der Dateien. Wenn mehrere Dateien die gleiche
Episode abdecken (z.B. .mkv und .webm), wird das als redundant gezaehlt.
Beruecksichtigt Doppel-Episoden (episode_end):
Eine Datei mit E01E02 zaehlt als 2 Episoden.
"""
pool = await self._get_pool()
if not pool:
return
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
# Gesamtzahl der Dateien mit Episode-Info
await cur.execute("""
SELECT COUNT(*) FROM library_videos
WHERE series_id = %s AND episode_number IS NOT NULL
""", (series_id,))
row = await cur.fetchone()
total_files = row[0] if row and row[0] else 0
# Anzahl EINDEUTIGER Episoden berechnen
# Gruppiert nach (season_number, episode_number)
# Bei Doppel-Episoden: MAX(episode_end) - episode_number + 1
await cur.execute("""
SELECT SUM(episode_span) as unique_episodes FROM (
SELECT
season_number,
episode_number,
CASE
WHEN MAX(episode_end) IS NOT NULL
THEN MAX(episode_end) - episode_number + 1
ELSE 1
END as episode_span
FROM library_videos
WHERE series_id = %s AND episode_number IS NOT NULL
GROUP BY season_number, episode_number
) as unique_eps
""", (series_id,))
row = await cur.fetchone()
unique_count = row[0] if row and row[0] else 0
# Redundante Dateien = Gesamtdateien - eindeutige Episoden
redundant = max(0, total_files - unique_count)
await cur.execute(
"UPDATE library_series SET local_episodes = %s, "
"missing_episodes = GREATEST(0, total_episodes - %s), "
"redundant_files = %s "
"WHERE id = %s",
(unique_count, unique_count, redundant, series_id)
)
except Exception as e:
logging.error(f"Serien-Zaehler aktualisieren fehlgeschlagen: {e}")
# === Abfragen ===
async def get_videos(self, filters: dict = None,
page: int = 1, limit: int = 50) -> dict:
"""Videos mit Filtern abfragen"""
pool = await self._get_pool()
if not pool:
return {"items": [], "total": 0, "page": page}
filters = filters or {}
where_clauses = []
params = []
if filters.get("library_path_id"):
where_clauses.append("v.library_path_id = %s")
params.append(int(filters["library_path_id"]))
if filters.get("media_type"):
where_clauses.append("lp.media_type = %s")
params.append(filters["media_type"])
if filters.get("series_id"):
where_clauses.append("v.series_id = %s")
params.append(int(filters["series_id"]))
if filters.get("video_codec"):
where_clauses.append("v.video_codec = %s")
params.append(filters["video_codec"])
if filters.get("min_width"):
where_clauses.append("v.width >= %s")
params.append(int(filters["min_width"]))
if filters.get("max_width"):
where_clauses.append("v.width <= %s")
params.append(int(filters["max_width"]))
if filters.get("container"):
where_clauses.append("v.container = %s")
params.append(filters["container"])
if filters.get("is_10bit"):
where_clauses.append("v.is_10bit = 1")
if filters.get("audio_lang"):
where_clauses.append(
"JSON_CONTAINS(v.audio_tracks, JSON_OBJECT('lang', %s))"
)
params.append(filters["audio_lang"])
if filters.get("audio_channels"):
where_clauses.append(
"JSON_CONTAINS(v.audio_tracks, "
"JSON_OBJECT('channels', CAST(%s AS SIGNED)))"
)
params.append(int(filters["audio_channels"]))
if filters.get("has_subtitle"):
where_clauses.append(
"JSON_CONTAINS(v.subtitle_tracks, JSON_OBJECT('lang', %s))"
)
params.append(filters["has_subtitle"])
if filters.get("search"):
where_clauses.append("v.file_name LIKE %s")
params.append(f"%{filters['search']}%")
# Filter: Nicht im Zielformat (Container + Codec)
if filters.get("not_converted"):
target_container = self.config.target_container # z.B. "webm"
# Videos die NICHT im Zielformat sind
where_clauses.append(
"(v.container != %s OR v.video_codec NOT IN ('av1', 'hevc'))"
)
params.append(target_container)
# Filter: Nur bestimmter Container NICHT
if filters.get("exclude_container"):
where_clauses.append("v.container != %s")
params.append(filters["exclude_container"])
# Filter: Nur bestimmter Codec NICHT
if filters.get("exclude_codec"):
where_clauses.append("v.video_codec != %s")
params.append(filters["exclude_codec"])
where_sql = ""
if where_clauses:
where_sql = "WHERE " + " AND ".join(where_clauses)
# Sortierung
sort_col = filters.get("sort", "file_name")
allowed_sorts = {
"file_name", "file_size", "width", "video_codec",
"container", "duration_sec", "scanned_at"
}
if sort_col not in allowed_sorts:
sort_col = "file_name"
order = "DESC" if filters.get("order") == "desc" else "ASC"
offset = (page - 1) * limit
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
# Count
await cur.execute(
f"SELECT COUNT(*) as cnt FROM library_videos v "
f"LEFT JOIN library_paths lp ON v.library_path_id = lp.id "
f"{where_sql}",
params
)
total = (await cur.fetchone())["cnt"]
# Daten
await cur.execute(
f"SELECT v.*, lp.name as library_name, "
f"lp.media_type, "
f"ls.title as series_title, ls.poster_url "
f"FROM library_videos v "
f"LEFT JOIN library_paths lp "
f"ON v.library_path_id = lp.id "
f"LEFT JOIN library_series ls "
f"ON v.series_id = ls.id "
f"{where_sql} "
f"ORDER BY v.{sort_col} {order} "
f"LIMIT %s OFFSET %s",
params + [limit, offset]
)
rows = await cur.fetchall()
items = []
for row in rows:
item = self._serialize_row(row)
# JSON-Strings parsen
if isinstance(item.get("audio_tracks"), str):
try:
item["audio_tracks"] = json.loads(
item["audio_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["audio_tracks"] = []
if isinstance(item.get("subtitle_tracks"), str):
try:
item["subtitle_tracks"] = json.loads(
item["subtitle_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["subtitle_tracks"] = []
items.append(item)
return {
"items": items,
"total": total,
"page": page,
"pages": (total + limit - 1) // limit if limit else 1,
}
except Exception as e:
logging.error(f"Videos abfragen fehlgeschlagen: {e}")
return {"items": [], "total": 0, "page": page}
async def get_series_list(self, path_id: int = None) -> list[dict]:
"""Alle Serien laden, optional nach Pfad gefiltert"""
pool = await self._get_pool()
if not pool:
return []
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
if path_id:
await cur.execute(
"SELECT * FROM library_series "
"WHERE library_path_id = %s "
"ORDER BY title",
(path_id,)
)
else:
await cur.execute(
"SELECT * FROM library_series ORDER BY title"
)
rows = await cur.fetchall()
return [self._serialize_row(r) for r in rows]
except Exception as e:
logging.error(f"Serien laden fehlgeschlagen: {e}")
return []
async def get_series_detail(self, series_id: int) -> Optional[dict]:
"""Serie mit Episoden laden"""
pool = await self._get_pool()
if not pool:
return None
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
# Serie
await cur.execute(
"SELECT * FROM library_series WHERE id = %s",
(series_id,)
)
series = await cur.fetchone()
if not series:
return None
series = self._serialize_row(series)
# Lokale Episoden
await cur.execute(
"SELECT * FROM library_videos "
"WHERE series_id = %s "
"ORDER BY season_number, episode_number, file_name",
(series_id,)
)
episodes = await cur.fetchall()
episode_list = []
for ep in episodes:
item = self._serialize_row(ep)
if isinstance(item.get("audio_tracks"), str):
try:
item["audio_tracks"] = json.loads(
item["audio_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["audio_tracks"] = []
if isinstance(item.get("subtitle_tracks"), str):
try:
item["subtitle_tracks"] = json.loads(
item["subtitle_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["subtitle_tracks"] = []
episode_list.append(item)
series["episodes"] = episode_list
# TVDB fehlende Episoden laden falls vorhanden
if series.get("tvdb_id"):
await cur.execute(
"SELECT * FROM tvdb_episode_cache "
"WHERE series_tvdb_id = %s "
"ORDER BY season_number, episode_number",
(series["tvdb_id"],)
)
tvdb_eps = await cur.fetchall()
series["tvdb_episodes"] = [
self._serialize_row(e) for e in tvdb_eps
]
else:
series["tvdb_episodes"] = []
return series
except Exception as e:
logging.error(f"Serien-Detail laden fehlgeschlagen: {e}")
return None
async def get_missing_episodes(self, series_id: int) -> list[dict]:
"""Fehlende Episoden einer Serie (TVDB vs. lokal)"""
pool = await self._get_pool()
if not pool:
return []
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
# TVDB-ID der Serie holen
await cur.execute(
"SELECT tvdb_id FROM library_series WHERE id = %s",
(series_id,)
)
row = await cur.fetchone()
if not row or not row["tvdb_id"]:
return []
tvdb_id = row["tvdb_id"]
# Fehlende = TVDB-Episoden die nicht lokal vorhanden sind
# Beruecksichtigt Doppel-Episoden (episode_end):
# E01E02 deckt sowohl E01 als auch E02 ab
await cur.execute("""
SELECT tc.season_number, tc.episode_number,
tc.episode_name, tc.aired
FROM tvdb_episode_cache tc
WHERE tc.series_tvdb_id = %s
AND NOT EXISTS (
SELECT 1 FROM library_videos lv
WHERE lv.series_id = %s
AND lv.season_number = tc.season_number
AND (
lv.episode_number = tc.episode_number
OR (
lv.episode_end IS NOT NULL
AND tc.episode_number >= lv.episode_number
AND tc.episode_number <= lv.episode_end
)
)
)
AND tc.season_number > 0
ORDER BY tc.season_number, tc.episode_number
""", (tvdb_id, series_id))
rows = await cur.fetchall()
return [self._serialize_row(r) for r in rows]
except Exception as e:
logging.error(f"Fehlende Episoden laden fehlgeschlagen: {e}")
return []
async def get_all_missing_episodes(self, path_id: int = None,
page: int = 1,
limit: int = 50) -> dict:
"""Alle fehlenden Episoden aller Serien laden (fuer Filter-Ansicht).
Gibt virtuelle Eintraege zurueck mit:
- series_id, series_title, poster_url
- season_number, episode_number, episode_name, aired
- is_missing = True (Marker fuer Frontend)
"""
pool = await self._get_pool()
if not pool:
return {"items": [], "total": 0, "page": page}
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
# Basis-Query: Alle fehlenden Episoden mit Serien-Info
path_filter = ""
params = []
if path_id:
path_filter = "AND ls.library_path_id = %s"
params.append(path_id)
# Count-Query
await cur.execute(f"""
SELECT COUNT(*) as cnt
FROM tvdb_episode_cache tc
JOIN library_series ls ON tc.series_tvdb_id = ls.tvdb_id
WHERE tc.season_number > 0
{path_filter}
AND NOT EXISTS (
SELECT 1 FROM library_videos lv
WHERE lv.series_id = ls.id
AND lv.season_number = tc.season_number
AND (
lv.episode_number = tc.episode_number
OR (
lv.episode_end IS NOT NULL
AND tc.episode_number >= lv.episode_number
AND tc.episode_number <= lv.episode_end
)
)
)
""", params)
total = (await cur.fetchone())["cnt"]
# Daten-Query mit Pagination
offset = (page - 1) * limit
await cur.execute(f"""
SELECT
ls.id as series_id,
ls.title as series_title,
ls.poster_url,
ls.folder_path,
tc.season_number,
tc.episode_number,
tc.episode_name,
tc.aired,
1 as is_missing
FROM tvdb_episode_cache tc
JOIN library_series ls ON tc.series_tvdb_id = ls.tvdb_id
WHERE tc.season_number > 0
{path_filter}
AND NOT EXISTS (
SELECT 1 FROM library_videos lv
WHERE lv.series_id = ls.id
AND lv.season_number = tc.season_number
AND (
lv.episode_number = tc.episode_number
OR (
lv.episode_end IS NOT NULL
AND tc.episode_number >= lv.episode_number
AND tc.episode_number <= lv.episode_end
)
)
)
ORDER BY ls.title, tc.season_number, tc.episode_number
LIMIT %s OFFSET %s
""", params + [limit, offset])
rows = await cur.fetchall()
items = [self._serialize_row(r) for r in rows]
return {
"items": items,
"total": total,
"page": page,
"pages": (total + limit - 1) // limit if limit else 1,
}
except Exception as e:
logging.error(f"Fehlende Episoden (alle) laden fehlgeschlagen: {e}")
return {"items": [], "total": 0, "page": page}
async def update_series_tvdb(self, series_id: int,
tvdb_id: int) -> bool:
"""TVDB-ID einer Serie zuordnen"""
pool = await self._get_pool()
if not pool:
return False
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"UPDATE library_series SET tvdb_id = %s "
"WHERE id = %s",
(tvdb_id, series_id)
)
return cur.rowcount > 0
except Exception as e:
logging.error(f"TVDB-ID zuordnen fehlgeschlagen: {e}")
return False
# === Ordner-Ansicht ===
async def browse_path(self, path: str = None) -> dict:
"""Ordnerstruktur aus DB-Eintraegen aufbauen.
Ohne path: Alle library_paths als Wurzeln.
Mit path: Unterordner + Videos in diesem Verzeichnis."""
pool = await self._get_pool()
if not pool:
return {"folders": [], "videos": [], "breadcrumb": []}
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
if not path:
# Wurzel: Library-Pfade anzeigen
await cur.execute(
"SELECT lp.*, "
"(SELECT COUNT(*) FROM library_videos v "
" WHERE v.library_path_id = lp.id) as video_count, "
"(SELECT COALESCE(SUM(file_size), 0) FROM library_videos v "
" WHERE v.library_path_id = lp.id) as total_size "
"FROM library_paths lp WHERE lp.enabled = 1 "
"ORDER BY lp.name"
)
roots = await cur.fetchall()
folders = []
for r in roots:
folders.append({
"name": r["name"],
"path": r["path"],
"media_type": r["media_type"],
"video_count": r["video_count"],
"total_size": int(r["total_size"] or 0),
})
return {
"folders": folders,
"videos": [],
"breadcrumb": [],
"current_path": None,
}
# Unterordner und Videos fuer gegebenen Pfad
# Normalisieren (kein trailing slash)
path = path.rstrip("/")
# Breadcrumb aufbauen: Library-Root finden
await cur.execute(
"SELECT * FROM library_paths WHERE enabled = 1"
)
all_paths = await cur.fetchall()
lib_root = None
for lp in all_paths:
if path == lp["path"] or path.startswith(lp["path"] + "/"):
lib_root = lp
break
breadcrumb = []
if lib_root:
breadcrumb.append({
"name": lib_root["name"],
"path": lib_root["path"]
})
# Zwischenordner
if path != lib_root["path"]:
rel = path[len(lib_root["path"]):].strip("/")
parts = rel.split("/")
acc = lib_root["path"]
for part in parts:
acc = acc + "/" + part
breadcrumb.append({
"name": part,
"path": acc
})
# Alle file_paths die mit diesem Pfad beginnen
prefix = path + "/"
await cur.execute(
"SELECT file_path, file_name, file_size, "
"video_codec, width, height, is_10bit, "
"audio_tracks, subtitle_tracks, container, "
"duration_sec, id "
"FROM library_videos "
"WHERE file_path LIKE %s "
"ORDER BY file_path",
(prefix + "%",)
)
rows = await cur.fetchall()
# Unterordner und direkte Videos trennen
folder_map = {} # ordnername -> {count, size}
direct_videos = []
for row in rows:
rel = row["file_path"][len(prefix):]
if "/" in rel:
# Unterordner
subfolder = rel.split("/")[0]
if subfolder not in folder_map:
folder_map[subfolder] = {
"video_count": 0, "total_size": 0
}
folder_map[subfolder]["video_count"] += 1
folder_map[subfolder]["total_size"] += (
row["file_size"] or 0
)
else:
# Direkte Datei
item = self._serialize_row(row)
if isinstance(item.get("audio_tracks"), str):
try:
item["audio_tracks"] = json.loads(
item["audio_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["audio_tracks"] = []
if isinstance(item.get("subtitle_tracks"), str):
try:
item["subtitle_tracks"] = json.loads(
item["subtitle_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["subtitle_tracks"] = []
direct_videos.append(item)
folders = []
for name in sorted(folder_map.keys()):
folders.append({
"name": name,
"path": prefix + name,
"video_count": folder_map[name]["video_count"],
"total_size": folder_map[name]["total_size"],
})
return {
"folders": folders,
"videos": direct_videos,
"breadcrumb": breadcrumb,
"current_path": path,
}
except Exception as e:
logging.error(f"Ordner-Ansicht fehlgeschlagen: {e}")
return {"folders": [], "videos": [], "breadcrumb": []}
# === Film-Abfragen ===
async def get_movie_list(self, path_id: int = None) -> list[dict]:
"""Alle Filme laden, optional nach Pfad gefiltert"""
pool = await self._get_pool()
if not pool:
return []
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
if path_id:
await cur.execute(
"SELECT m.*, "
"(SELECT v.duration_sec FROM library_videos v "
" WHERE v.movie_id = m.id "
" ORDER BY v.file_size DESC LIMIT 1) "
"as duration_sec "
"FROM library_movies m "
"WHERE m.library_path_id = %s "
"ORDER BY m.title",
(path_id,)
)
else:
await cur.execute(
"SELECT m.*, "
"(SELECT v.duration_sec FROM library_videos v "
" WHERE v.movie_id = m.id "
" ORDER BY v.file_size DESC LIMIT 1) "
"as duration_sec "
"FROM library_movies m "
"ORDER BY m.title"
)
rows = await cur.fetchall()
return [self._serialize_row(r) for r in rows]
except Exception as e:
logging.error(f"Filme laden fehlgeschlagen: {e}")
return []
async def get_movie_detail(self, movie_id: int) -> Optional[dict]:
"""Film mit seinen Video-Dateien laden"""
pool = await self._get_pool()
if not pool:
return None
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(
"SELECT * FROM library_movies WHERE id = %s",
(movie_id,)
)
movie = await cur.fetchone()
if not movie:
return None
movie = self._serialize_row(movie)
# Video-Dateien des Films
await cur.execute(
"SELECT * FROM library_videos "
"WHERE movie_id = %s ORDER BY file_name",
(movie_id,)
)
videos = await cur.fetchall()
video_list = []
for v in videos:
item = self._serialize_row(v)
if isinstance(item.get("audio_tracks"), str):
try:
item["audio_tracks"] = json.loads(
item["audio_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["audio_tracks"] = []
if isinstance(item.get("subtitle_tracks"), str):
try:
item["subtitle_tracks"] = json.loads(
item["subtitle_tracks"]
)
except (json.JSONDecodeError, TypeError):
item["subtitle_tracks"] = []
video_list.append(item)
movie["videos"] = video_list
return movie
except Exception as e:
logging.error(f"Film-Detail laden fehlgeschlagen: {e}")
return None
async def delete_movie(self, movie_id: int,
delete_files: bool = False) -> dict:
"""Film aus DB loeschen. Optional auch Dateien + Ordner."""
pool = await self._get_pool()
if not pool:
return {"error": "Keine DB-Verbindung"}
try:
folder_path = None
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"SELECT folder_path FROM library_movies "
"WHERE id = %s", (movie_id,)
)
row = await cur.fetchone()
if not row:
return {"error": "Film nicht gefunden"}
folder_path = row[0]
# Videos aus DB loeschen (movie_id nullen)
await cur.execute(
"UPDATE library_videos SET movie_id = NULL "
"WHERE movie_id = %s", (movie_id,)
)
vids = cur.rowcount
# Film aus DB loeschen
await cur.execute(
"DELETE FROM library_movies WHERE id = %s",
(movie_id,)
)
result = {"success": True, "updated_videos": vids}
if delete_files and folder_path and os.path.isdir(folder_path):
import shutil
import stat
def _rm_error(func, path, exc_info):
try:
os.chmod(path, stat.S_IRWXU)
func(path)
except Exception:
pass
try:
shutil.rmtree(folder_path, onerror=_rm_error)
result["deleted_folder"] = folder_path
except Exception as e:
result["folder_error"] = str(e)
return result
except Exception as e:
logging.error(f"Film loeschen fehlgeschlagen: {e}")
return {"error": str(e)}
async def unlink_movie_tvdb(self, movie_id: int) -> bool:
"""TVDB-Zuordnung eines Films loesen"""
pool = await self._get_pool()
if not pool:
return False
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(
"UPDATE library_movies SET "
"tvdb_id = NULL, poster_url = NULL, "
"overview = NULL, genres = NULL, "
"runtime = NULL, status = NULL, "
"last_updated = NOW() "
"WHERE id = %s", (movie_id,)
)
return cur.rowcount > 0
except Exception as e:
logging.error(f"Film-TVDB loesen fehlgeschlagen: {e}")
return False
# === Duplikat-Finder ===
async def find_duplicates(self) -> list[dict]:
"""Findet potentielle Duplikate (gleiche Episode oder aehnliche Duration)"""
pool = await self._get_pool()
if not pool:
return []
try:
async with pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
# Serien-Duplikate: gleiche Serie + Staffel + Episode
await cur.execute("""
SELECT v1.id as id1, v1.file_name as name1,
v1.file_path as path1,
v1.video_codec as codec1,
v1.width as width1, v1.height as height1,
v1.file_size as size1, v1.container as container1,
v2.id as id2, v2.file_name as name2,
v2.file_path as path2,
v2.video_codec as codec2,
v2.width as width2, v2.height as height2,
v2.file_size as size2, v2.container as container2
FROM library_videos v1
JOIN library_videos v2
ON v1.id < v2.id
AND v1.series_id = v2.series_id
AND v1.season_number = v2.season_number
AND v1.episode_number = v2.episode_number
AND v1.series_id IS NOT NULL
AND v1.season_number IS NOT NULL
ORDER BY v1.file_name
LIMIT 200
""")
rows = await cur.fetchall()
return [self._serialize_row(r) for r in rows]
except Exception as e:
logging.error(f"Duplikat-Suche fehlgeschlagen: {e}")
return []
# === Statistiken ===
async def get_stats(self) -> dict:
"""Bibliotheks-Statistiken"""
pool = await self._get_pool()
if not pool:
return {}
try:
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("""
SELECT
COUNT(*) as total_videos,
COUNT(DISTINCT series_id) as total_series,
SUM(file_size) as total_size,
SUM(duration_sec) as total_duration,
COUNT(DISTINCT video_codec) as codec_count
FROM library_videos
""")
row = await cur.fetchone()
if not row:
return {}
# Codec-Verteilung
await cur.execute("""
SELECT video_codec, COUNT(*) as cnt
FROM library_videos
WHERE video_codec IS NOT NULL
GROUP BY video_codec
ORDER BY cnt DESC
""")
codec_rows = await cur.fetchall()
# Aufloesung-Verteilung
await cur.execute("""
SELECT
CASE
WHEN width >= 3840 THEN '4K'
WHEN width >= 1920 THEN '1080p'
WHEN width >= 1280 THEN '720p'
ELSE 'SD'
END as res_group,
COUNT(*) as cnt
FROM library_videos
WHERE width > 0
GROUP BY res_group
ORDER BY cnt DESC
""")
res_rows = await cur.fetchall()
return {
"total_videos": row[0] or 0,
"total_series": row[1] or 0,
"total_size": int(row[2] or 0),
"total_duration": float(row[3] or 0),
"codecs": {r[0]: r[1] for r in codec_rows},
"resolutions": {r[0]: r[1] for r in res_rows},
}
except Exception as e:
logging.error(f"Bibliotheks-Statistiken fehlgeschlagen: {e}")
return {}
# === Hilfsfunktionen ===
@staticmethod
def _serialize_row(row: dict) -> dict:
"""MariaDB-Row fuer JSON serialisierbar machen"""
result = {}
for k, v in row.items():
if hasattr(v, "isoformat"):
result[k] = str(v)
else:
result[k] = v
return result