Thumbnails: - Negative Zaehlung gefixt (-23 von 5789): INNER JOIN statt separate COUNT - Verwaiste Thumbnail-Eintraege werden automatisch bereinigt - TVDB-Bilder werden lokal heruntergeladen statt extern verlinkt - Template nutzt nur noch lokale API, keine externen TVDB-URLs - Cache-Control: Thumbnails werden 7 Tage gecacht (Middleware ueberschreibt nicht mehr) - Fortschrittsbalken ins globale Progress-System verschoben (Thumbnails + Auto-Match) Watch-Status: - Feldnamen-Bug gefixt: position/duration -> position_sec/duration_sec - saveProgress(completed) setzt Position=Duration bei Video-Ende - Backend wertet completed-Flag aus Player: - Error-Recovery: Auto-Retry bei Video-Fehlern (2x) - Toast-Benachrichtigungen bei Stream-Fehlern (HLS, Netzwerk, Fallback) - onPlaying() Reset des Retry-Zaehlers Transcoding: - Neue Einstellung "Immer transcodieren" (force_transcode) im TV-Admin - Erzwingt H.264+AAC Transcoding fuer maximale Client-Kompatibilitaet - Kein Copy-Modus wenn aktiviert Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2530 lines
96 KiB
Python
2530 lines
96 KiB
Python
"""REST API Endpoints fuer die Video-Bibliothek"""
|
|
import asyncio
|
|
import json
|
|
import logging
|
|
import aiomysql
|
|
from aiohttp import web
|
|
from app.config import Config
|
|
from app.services.library import LibraryService
|
|
from app.services.tvdb import TVDBService
|
|
from app.services.queue import QueueService
|
|
from app.services.cleaner import CleanerService
|
|
from app.services.importer import ImporterService
|
|
|
|
|
|
def setup_library_routes(app: web.Application, config: Config,
|
|
library_service: LibraryService,
|
|
tvdb_service: TVDBService,
|
|
queue_service: QueueService,
|
|
cleaner_service: CleanerService = None,
|
|
importer_service: ImporterService = None
|
|
) -> None:
|
|
"""Registriert Bibliotheks-API-Routes"""
|
|
|
|
# === Scan-Pfade ===
|
|
|
|
async def get_paths(request: web.Request) -> web.Response:
|
|
"""GET /api/library/paths"""
|
|
paths = await library_service.get_paths()
|
|
return web.json_response({"paths": paths})
|
|
|
|
async def post_path(request: web.Request) -> web.Response:
|
|
"""POST /api/library/paths"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
name = data.get("name", "").strip()
|
|
path = data.get("path", "").strip()
|
|
media_type = data.get("media_type", "").strip()
|
|
|
|
if not name or not path:
|
|
return web.json_response(
|
|
{"error": "Name und Pfad erforderlich"}, status=400
|
|
)
|
|
if media_type not in ("series", "movie"):
|
|
return web.json_response(
|
|
{"error": "media_type muss 'series' oder 'movie' sein"},
|
|
status=400,
|
|
)
|
|
|
|
path_id = await library_service.add_path(name, path, media_type)
|
|
if path_id:
|
|
return web.json_response(
|
|
{"message": "Pfad hinzugefuegt", "id": path_id}
|
|
)
|
|
return web.json_response(
|
|
{"error": "Pfad konnte nicht hinzugefuegt werden"}, status=500
|
|
)
|
|
|
|
async def put_path(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/paths/{path_id}"""
|
|
path_id = int(request.match_info["path_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
success = await library_service.update_path(
|
|
path_id,
|
|
name=data.get("name"),
|
|
path=data.get("path"),
|
|
media_type=data.get("media_type"),
|
|
enabled=data.get("enabled"),
|
|
)
|
|
if success:
|
|
return web.json_response({"message": "Pfad aktualisiert"})
|
|
return web.json_response(
|
|
{"error": "Pfad nicht gefunden"}, status=404
|
|
)
|
|
|
|
async def delete_path(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/paths/{path_id}"""
|
|
path_id = int(request.match_info["path_id"])
|
|
success = await library_service.remove_path(path_id)
|
|
if success:
|
|
return web.json_response({"message": "Pfad entfernt"})
|
|
return web.json_response(
|
|
{"error": "Pfad nicht gefunden"}, status=404
|
|
)
|
|
|
|
# === Scanning ===
|
|
|
|
async def post_scan_all(request: web.Request) -> web.Response:
|
|
"""POST /api/library/scan - Alle Pfade scannen"""
|
|
asyncio.create_task(_run_scan_all())
|
|
return web.json_response({"message": "Scan gestartet"})
|
|
|
|
async def _run_scan_all():
|
|
result = await library_service.scan_all()
|
|
logging.info(f"Komplett-Scan Ergebnis: {result}")
|
|
|
|
async def post_scan_single(request: web.Request) -> web.Response:
|
|
"""POST /api/library/scan/{path_id}"""
|
|
path_id = int(request.match_info["path_id"])
|
|
asyncio.create_task(_run_scan_single(path_id))
|
|
return web.json_response({"message": "Scan gestartet"})
|
|
|
|
async def _run_scan_single(path_id: int):
|
|
result = await library_service.scan_single_path(path_id)
|
|
logging.info(f"Einzel-Scan Ergebnis: {result}")
|
|
|
|
# === Videos abfragen ===
|
|
|
|
async def get_videos(request: web.Request) -> web.Response:
|
|
"""GET /api/library/videos?filter-params..."""
|
|
filters = {}
|
|
for key in ("library_path_id", "media_type", "series_id",
|
|
"video_codec", "min_width", "max_width",
|
|
"container", "audio_lang", "audio_channels",
|
|
"has_subtitle", "is_10bit", "sort", "order",
|
|
"search", "not_converted", "exclude_container",
|
|
"exclude_codec"):
|
|
val = request.query.get(key)
|
|
if val:
|
|
filters[key] = val
|
|
|
|
page = int(request.query.get("page", 1))
|
|
limit = int(request.query.get("limit", 50))
|
|
|
|
result = await library_service.get_videos(filters, page, limit)
|
|
return web.json_response(result)
|
|
|
|
async def get_movies(request: web.Request) -> web.Response:
|
|
"""GET /api/library/movies - Nur Filme (keine Serien)"""
|
|
filters = {}
|
|
for key in ("video_codec", "min_width", "max_width",
|
|
"container", "audio_lang", "audio_channels",
|
|
"is_10bit", "sort", "order", "search"):
|
|
val = request.query.get(key)
|
|
if val:
|
|
filters[key] = val
|
|
|
|
page = int(request.query.get("page", 1))
|
|
limit = int(request.query.get("limit", 50))
|
|
|
|
result = await library_service.get_movies(filters, page, limit)
|
|
return web.json_response(result)
|
|
|
|
# === Serien ===
|
|
|
|
async def get_series(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series"""
|
|
path_id = request.query.get("path_id")
|
|
if path_id:
|
|
path_id = int(path_id)
|
|
series = await library_service.get_series_list(path_id)
|
|
return web.json_response({"series": series})
|
|
|
|
async def get_series_detail(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series/{series_id}"""
|
|
series_id = int(request.match_info["series_id"])
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if detail:
|
|
return web.json_response(detail)
|
|
return web.json_response(
|
|
{"error": "Serie nicht gefunden"}, status=404
|
|
)
|
|
|
|
async def delete_series(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/series/{series_id}?delete_files=1"""
|
|
series_id = int(request.match_info["series_id"])
|
|
delete_files = request.query.get("delete_files") == "1"
|
|
result = await library_service.delete_series(
|
|
series_id, delete_files=delete_files
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=404)
|
|
return web.json_response(result)
|
|
|
|
async def get_missing_episodes(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series/{series_id}/missing"""
|
|
series_id = int(request.match_info["series_id"])
|
|
missing = await library_service.get_missing_episodes(series_id)
|
|
return web.json_response({"missing": missing})
|
|
|
|
async def get_all_missing_episodes(request: web.Request) -> web.Response:
|
|
"""GET /api/library/missing-episodes?path_id=&page=&limit=
|
|
Alle fehlenden Episoden aller Serien (fuer Filter-Ansicht)."""
|
|
path_id = request.query.get("path_id")
|
|
page = int(request.query.get("page", 1))
|
|
limit = int(request.query.get("limit", 50))
|
|
|
|
result = await library_service.get_all_missing_episodes(
|
|
int(path_id) if path_id else None,
|
|
page,
|
|
limit
|
|
)
|
|
return web.json_response(result)
|
|
|
|
# === TVDB ===
|
|
|
|
async def post_tvdb_match(request: web.Request) -> web.Response:
|
|
"""POST /api/library/series/{series_id}/tvdb-match"""
|
|
series_id = int(request.match_info["series_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
tvdb_id = data.get("tvdb_id")
|
|
if not tvdb_id:
|
|
return web.json_response(
|
|
{"error": "tvdb_id erforderlich"}, status=400
|
|
)
|
|
|
|
result = await tvdb_service.match_and_update_series(
|
|
series_id, int(tvdb_id), library_service
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def delete_tvdb_link(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/series/{series_id}/tvdb"""
|
|
series_id = int(request.match_info["series_id"])
|
|
success = await library_service.unlink_tvdb(series_id)
|
|
if success:
|
|
return web.json_response({"message": "TVDB-Zuordnung geloest"})
|
|
return web.json_response(
|
|
{"error": "Serie nicht gefunden"}, status=404
|
|
)
|
|
|
|
async def post_tvdb_refresh(request: web.Request) -> web.Response:
|
|
"""POST /api/library/series/{series_id}/tvdb-refresh"""
|
|
series_id = int(request.match_info["series_id"])
|
|
# TVDB-ID aus DB holen
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if not detail or not detail.get("tvdb_id"):
|
|
return web.json_response(
|
|
{"error": "Keine TVDB-Zuordnung vorhanden"}, status=400
|
|
)
|
|
result = await tvdb_service.match_and_update_series(
|
|
series_id, detail["tvdb_id"], library_service
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def get_tvdb_search(request: web.Request) -> web.Response:
|
|
"""GET /api/tvdb/search?q=Breaking+Bad&lang=eng
|
|
lang: Sprache fuer Ergebnisse (deu, eng, etc.)
|
|
Standard: konfigurierte Sprache
|
|
"""
|
|
query = request.query.get("q", "").strip()
|
|
lang = request.query.get("lang", "").strip() or None
|
|
if not query:
|
|
return web.json_response(
|
|
{"error": "Suchbegriff erforderlich"}, status=400
|
|
)
|
|
if not tvdb_service.is_configured:
|
|
return web.json_response(
|
|
{"error": "TVDB nicht konfiguriert (API Key fehlt)"},
|
|
status=400,
|
|
)
|
|
results = await tvdb_service.search_series(query, language=lang)
|
|
return web.json_response({"results": results})
|
|
|
|
# === TVDB Metadaten ===
|
|
|
|
async def get_series_cast(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series/{series_id}/cast"""
|
|
series_id = int(request.match_info["series_id"])
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if not detail or not detail.get("tvdb_id"):
|
|
return web.json_response({"cast": []})
|
|
cast = await tvdb_service.get_series_characters(detail["tvdb_id"])
|
|
return web.json_response({"cast": cast})
|
|
|
|
async def get_series_artworks(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series/{series_id}/artworks"""
|
|
series_id = int(request.match_info["series_id"])
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if not detail or not detail.get("tvdb_id"):
|
|
return web.json_response({"artworks": []})
|
|
artworks = await tvdb_service.get_series_artworks(detail["tvdb_id"])
|
|
return web.json_response({"artworks": artworks})
|
|
|
|
async def post_metadata_download(request: web.Request) -> web.Response:
|
|
"""POST /api/library/series/{series_id}/metadata-download"""
|
|
series_id = int(request.match_info["series_id"])
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if not detail:
|
|
return web.json_response(
|
|
{"error": "Serie nicht gefunden"}, status=404
|
|
)
|
|
if not detail.get("tvdb_id"):
|
|
return web.json_response(
|
|
{"error": "Keine TVDB-Zuordnung"}, status=400
|
|
)
|
|
result = await tvdb_service.download_metadata(
|
|
series_id, detail["tvdb_id"], detail.get("folder_path", "")
|
|
)
|
|
return web.json_response(result)
|
|
|
|
async def post_metadata_download_all(request: web.Request) -> web.Response:
|
|
"""POST /api/library/metadata-download-all"""
|
|
series_list = await library_service.get_series_list()
|
|
results = {"success": 0, "skipped": 0, "errors": 0}
|
|
for s in series_list:
|
|
if not s.get("tvdb_id"):
|
|
results["skipped"] += 1
|
|
continue
|
|
try:
|
|
await tvdb_service.download_metadata(
|
|
s["id"], s["tvdb_id"], s.get("folder_path", "")
|
|
)
|
|
results["success"] += 1
|
|
except Exception:
|
|
results["errors"] += 1
|
|
return web.json_response(results)
|
|
|
|
async def get_metadata_image(request: web.Request) -> web.Response:
|
|
"""GET /api/library/metadata/{series_id}/{filename}?w=300
|
|
Laedt Bilder lokal aus .metadata/ oder downloaded on-demand von TVDB.
|
|
Optionaler Parameter w= verkleinert auf angegebene Breite (gecacht)."""
|
|
import os
|
|
import aiohttp as aiohttp_client
|
|
|
|
series_id = int(request.match_info["series_id"])
|
|
filename = request.match_info["filename"]
|
|
detail = await library_service.get_series_detail(series_id)
|
|
if not detail:
|
|
return web.json_response(
|
|
{"error": "Nicht gefunden"}, status=404
|
|
)
|
|
|
|
folder_path = detail.get("folder_path", "")
|
|
meta_dir = os.path.join(folder_path, ".metadata") if folder_path else ""
|
|
file_path = os.path.join(meta_dir, filename) if meta_dir else ""
|
|
|
|
# Lokale Datei nicht vorhanden? On-demand von TVDB downloaden
|
|
if not file_path or not os.path.isfile(file_path):
|
|
poster_url = detail.get("poster_url", "")
|
|
if filename.startswith("poster") and poster_url and folder_path:
|
|
os.makedirs(meta_dir, exist_ok=True)
|
|
try:
|
|
async with aiohttp_client.ClientSession() as session:
|
|
async with session.get(
|
|
poster_url,
|
|
timeout=aiohttp_client.ClientTimeout(total=15)
|
|
) as resp:
|
|
if resp.status == 200:
|
|
data = await resp.read()
|
|
with open(file_path, "wb") as f:
|
|
f.write(data)
|
|
logging.info(
|
|
f"Poster heruntergeladen: Serie {series_id}"
|
|
f" ({len(data)} Bytes)")
|
|
else:
|
|
# Download fehlgeschlagen -> Redirect
|
|
raise web.HTTPFound(poster_url)
|
|
except web.HTTPFound:
|
|
raise
|
|
except Exception as e:
|
|
logging.warning(
|
|
f"Poster-Download fehlgeschlagen fuer Serie "
|
|
f"{series_id}: {e}")
|
|
if poster_url:
|
|
raise web.HTTPFound(poster_url)
|
|
return web.json_response(
|
|
{"error": "Datei nicht gefunden"}, status=404
|
|
)
|
|
elif filename.startswith("poster") and poster_url:
|
|
# Kein Ordner -> Redirect zur externen URL
|
|
raise web.HTTPFound(poster_url)
|
|
else:
|
|
return web.json_response(
|
|
{"error": "Datei nicht gefunden"}, status=404
|
|
)
|
|
|
|
# Resize-Parameter: ?w=300 verkleinert auf max. 300px Breite
|
|
width_param = request.query.get("w")
|
|
if width_param:
|
|
try:
|
|
target_w = int(width_param)
|
|
if 50 <= target_w <= 1000:
|
|
base, _ = os.path.splitext(filename)
|
|
cache_name = f"{base}_w{target_w}.jpg"
|
|
cache_path = os.path.join(meta_dir, cache_name)
|
|
if not os.path.isfile(cache_path):
|
|
try:
|
|
from PIL import Image
|
|
with Image.open(file_path) as img:
|
|
if img.width > target_w:
|
|
ratio = target_w / img.width
|
|
new_h = int(img.height * ratio)
|
|
img = img.resize(
|
|
(target_w, new_h), Image.LANCZOS
|
|
)
|
|
img = img.convert("RGB")
|
|
img.save(cache_path, "JPEG", quality=80)
|
|
except Exception as e:
|
|
logging.warning(
|
|
f"Poster-Resize fehlgeschlagen: {e}")
|
|
return web.FileResponse(file_path)
|
|
return web.FileResponse(cache_path)
|
|
except ValueError:
|
|
pass
|
|
|
|
return web.FileResponse(file_path)
|
|
|
|
# === Filme ===
|
|
|
|
async def get_movies_list(request: web.Request) -> web.Response:
|
|
"""GET /api/library/movies-list?path_id=X"""
|
|
path_id = request.query.get("path_id")
|
|
if path_id:
|
|
path_id = int(path_id)
|
|
movies = await library_service.get_movie_list(path_id)
|
|
return web.json_response({"movies": movies})
|
|
|
|
async def get_movie_detail(request: web.Request) -> web.Response:
|
|
"""GET /api/library/movies/{movie_id}"""
|
|
movie_id = int(request.match_info["movie_id"])
|
|
detail = await library_service.get_movie_detail(movie_id)
|
|
if detail:
|
|
return web.json_response(detail)
|
|
return web.json_response(
|
|
{"error": "Film nicht gefunden"}, status=404
|
|
)
|
|
|
|
async def delete_movie(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/movies/{movie_id}?delete_files=1"""
|
|
movie_id = int(request.match_info["movie_id"])
|
|
delete_files = request.query.get("delete_files") == "1"
|
|
result = await library_service.delete_movie(
|
|
movie_id, delete_files=delete_files
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=404)
|
|
return web.json_response(result)
|
|
|
|
async def post_movie_tvdb_match(request: web.Request) -> web.Response:
|
|
"""POST /api/library/movies/{movie_id}/tvdb-match"""
|
|
movie_id = int(request.match_info["movie_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
tvdb_id = data.get("tvdb_id")
|
|
if not tvdb_id:
|
|
return web.json_response(
|
|
{"error": "tvdb_id erforderlich"}, status=400
|
|
)
|
|
result = await tvdb_service.match_and_update_movie(
|
|
movie_id, int(tvdb_id), library_service
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def delete_movie_tvdb_link(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/movies/{movie_id}/tvdb"""
|
|
movie_id = int(request.match_info["movie_id"])
|
|
success = await library_service.unlink_movie_tvdb(movie_id)
|
|
if success:
|
|
return web.json_response({"message": "TVDB-Zuordnung geloest"})
|
|
return web.json_response(
|
|
{"error": "Film nicht gefunden"}, status=404
|
|
)
|
|
|
|
async def get_tvdb_movie_search(request: web.Request) -> web.Response:
|
|
"""GET /api/tvdb/search-movies?q=Inception"""
|
|
query = request.query.get("q", "").strip()
|
|
if not query:
|
|
return web.json_response(
|
|
{"error": "Suchbegriff erforderlich"}, status=400
|
|
)
|
|
if not tvdb_service.is_configured:
|
|
return web.json_response(
|
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
|
)
|
|
results = await tvdb_service.search_movies(query)
|
|
return web.json_response({"results": results})
|
|
|
|
# === TVDB Auto-Match (Review-Modus) ===
|
|
|
|
_auto_match_state = {
|
|
"active": False,
|
|
"phase": "",
|
|
"done": 0,
|
|
"total": 0,
|
|
"current": "",
|
|
"suggestions": None,
|
|
}
|
|
|
|
async def post_tvdb_auto_match(request: web.Request) -> web.Response:
|
|
"""POST /api/library/tvdb-auto-match?type=series|movies|all
|
|
Sammelt TVDB-Vorschlaege (matched NICHT automatisch)."""
|
|
if _auto_match_state["active"]:
|
|
return web.json_response(
|
|
{"error": "Suche laeuft bereits"}, status=409
|
|
)
|
|
if not tvdb_service.is_configured:
|
|
return web.json_response(
|
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
|
)
|
|
|
|
match_type = request.query.get("type", "all")
|
|
_auto_match_state.update({
|
|
"active": True,
|
|
"phase": "starting",
|
|
"done": 0, "total": 0,
|
|
"current": "",
|
|
"suggestions": None,
|
|
})
|
|
|
|
async def run_collect():
|
|
try:
|
|
async def progress_cb(done, total, name, count):
|
|
_auto_match_state.update({
|
|
"done": done,
|
|
"total": total,
|
|
"current": name,
|
|
})
|
|
|
|
all_suggestions = []
|
|
|
|
if match_type in ("series", "all"):
|
|
_auto_match_state["phase"] = "series"
|
|
_auto_match_state["done"] = 0
|
|
s = await tvdb_service.collect_suggestions(
|
|
"series", progress_cb
|
|
)
|
|
all_suggestions.extend(s)
|
|
|
|
if match_type in ("movies", "all"):
|
|
_auto_match_state["phase"] = "movies"
|
|
_auto_match_state["done"] = 0
|
|
s = await tvdb_service.collect_suggestions(
|
|
"movies", progress_cb
|
|
)
|
|
all_suggestions.extend(s)
|
|
|
|
_auto_match_state["suggestions"] = all_suggestions
|
|
_auto_match_state["phase"] = "done"
|
|
except Exception as e:
|
|
logging.error(f"TVDB Vorschlaege sammeln fehlgeschlagen: {e}")
|
|
_auto_match_state["phase"] = "error"
|
|
_auto_match_state["suggestions"] = []
|
|
finally:
|
|
_auto_match_state["active"] = False
|
|
|
|
asyncio.create_task(run_collect())
|
|
return web.json_response({"message": "TVDB-Suche gestartet"})
|
|
|
|
async def get_tvdb_auto_match_status(
|
|
request: web.Request
|
|
) -> web.Response:
|
|
"""GET /api/library/tvdb-auto-match-status"""
|
|
# Vorschlaege nur bei "done" mitschicken
|
|
result = {
|
|
"active": _auto_match_state["active"],
|
|
"phase": _auto_match_state["phase"],
|
|
"done": _auto_match_state["done"],
|
|
"total": _auto_match_state["total"],
|
|
"current": _auto_match_state["current"],
|
|
}
|
|
if _auto_match_state["phase"] == "done":
|
|
result["suggestions"] = _auto_match_state["suggestions"]
|
|
return web.json_response(result)
|
|
|
|
async def post_tvdb_confirm(request: web.Request) -> web.Response:
|
|
"""POST /api/library/tvdb-confirm - Einzelnen Vorschlag bestaetigen.
|
|
Body: {id, type: 'series'|'movies', tvdb_id}"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
item_id = data.get("id")
|
|
media_type = data.get("type")
|
|
tvdb_id = data.get("tvdb_id")
|
|
|
|
if not item_id or not media_type or not tvdb_id:
|
|
return web.json_response(
|
|
{"error": "id, type und tvdb_id erforderlich"}, status=400
|
|
)
|
|
|
|
if media_type == "series":
|
|
result = await tvdb_service.match_and_update_series(
|
|
int(item_id), int(tvdb_id), library_service
|
|
)
|
|
elif media_type == "movies":
|
|
result = await tvdb_service.match_and_update_movie(
|
|
int(item_id), int(tvdb_id), library_service
|
|
)
|
|
else:
|
|
return web.json_response(
|
|
{"error": "type muss 'series' oder 'movies' sein"},
|
|
status=400,
|
|
)
|
|
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
# === TVDB Sprache ===
|
|
|
|
async def get_tvdb_language(request: web.Request) -> web.Response:
|
|
"""GET /api/tvdb/language"""
|
|
lang = config.settings.get("library", {}).get(
|
|
"tvdb_language", "deu"
|
|
)
|
|
return web.json_response({"language": lang})
|
|
|
|
async def put_tvdb_language(request: web.Request) -> web.Response:
|
|
"""PUT /api/tvdb/language - TVDB-Sprache aendern"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
lang = data.get("language", "").strip()
|
|
if not lang or len(lang) != 3:
|
|
return web.json_response(
|
|
{"error": "Sprache muss 3-Buchstaben-Code sein (z.B. deu)"},
|
|
status=400,
|
|
)
|
|
# In Config speichern
|
|
if "library" not in config.settings:
|
|
config.settings["library"] = {}
|
|
config.settings["library"]["tvdb_language"] = lang
|
|
config.save_settings()
|
|
return web.json_response(
|
|
{"message": f"TVDB-Sprache auf '{lang}' gesetzt"}
|
|
)
|
|
|
|
async def post_tvdb_refresh_all_episodes(
|
|
request: web.Request,
|
|
) -> web.Response:
|
|
"""POST /api/library/tvdb-refresh-episodes
|
|
Laedt alle Episoden-Caches neu (z.B. nach Sprachswitch)."""
|
|
if not tvdb_service.is_configured:
|
|
return web.json_response(
|
|
{"error": "TVDB nicht konfiguriert"}, status=400
|
|
)
|
|
series_list = await library_service.get_series_list()
|
|
refreshed = 0
|
|
for s in series_list:
|
|
if not s.get("tvdb_id"):
|
|
continue
|
|
try:
|
|
await tvdb_service.fetch_episodes(s["tvdb_id"])
|
|
await tvdb_service._update_episode_titles(
|
|
s["id"], s["tvdb_id"]
|
|
)
|
|
refreshed += 1
|
|
except Exception:
|
|
pass
|
|
return web.json_response({
|
|
"message": f"{refreshed} Serien-Episoden aktualisiert"
|
|
})
|
|
|
|
# === Ordner-Ansicht ===
|
|
|
|
async def get_browse(request: web.Request) -> web.Response:
|
|
"""GET /api/library/browse?path=..."""
|
|
path = request.query.get("path")
|
|
result = await library_service.browse_path(path or None)
|
|
return web.json_response(result)
|
|
|
|
# === Duplikate ===
|
|
|
|
async def get_duplicates(request: web.Request) -> web.Response:
|
|
"""GET /api/library/duplicates"""
|
|
dupes = await library_service.find_duplicates()
|
|
return web.json_response({"duplicates": dupes})
|
|
|
|
# === Video loeschen ===
|
|
|
|
async def delete_video(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/videos/{video_id}?delete_file=1"""
|
|
video_id = int(request.match_info["video_id"])
|
|
delete_file = request.query.get("delete_file") == "1"
|
|
result = await library_service.delete_video(
|
|
video_id, delete_file=delete_file
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=404)
|
|
return web.json_response(result)
|
|
|
|
# === Konvertierung aus Bibliothek ===
|
|
|
|
async def post_convert_video(request: web.Request) -> web.Response:
|
|
"""POST /api/library/videos/{video_id}/convert"""
|
|
video_id = int(request.match_info["video_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
await cur.execute(
|
|
"SELECT file_path FROM library_videos WHERE id = %s",
|
|
(video_id,)
|
|
)
|
|
row = await cur.fetchone()
|
|
if not row:
|
|
return web.json_response(
|
|
{"error": "Video nicht gefunden"}, status=404
|
|
)
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
file_path = row[0]
|
|
preset = None
|
|
try:
|
|
data = await request.json()
|
|
preset = data.get("preset")
|
|
except Exception:
|
|
pass
|
|
|
|
# Pruefen ob Datei existiert
|
|
import os
|
|
if not os.path.exists(file_path):
|
|
return web.json_response(
|
|
{"error": f"Datei nicht gefunden: {file_path}"}, status=404
|
|
)
|
|
|
|
jobs = await queue_service.add_paths([file_path], preset)
|
|
if jobs:
|
|
return web.json_response({
|
|
"message": "Konvertierung gestartet",
|
|
"job_id": jobs[0].id,
|
|
})
|
|
return web.json_response(
|
|
{"error": f"Job konnte nicht erstellt werden fuer: {file_path}"}, status=500
|
|
)
|
|
|
|
# === Batch-Konvertierung Serie ===
|
|
|
|
async def post_convert_series(request: web.Request) -> web.Response:
|
|
"""POST /api/library/series/{series_id}/convert
|
|
Konvertiert alle Episoden einer Serie die nicht im Zielformat sind.
|
|
Body: {preset, target_codec, force_all, delete_old}
|
|
- preset: Encoding-Preset (optional, nimmt default)
|
|
- target_codec: Ziel-Codec zum Vergleich (z.B. 'av1', 'hevc')
|
|
- force_all: true = alle konvertieren, false = nur nicht-Zielformat
|
|
- delete_old: true = alte Quelldateien nach Konvertierung loeschen
|
|
"""
|
|
import os
|
|
series_id = int(request.match_info["series_id"])
|
|
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
data = {}
|
|
|
|
preset = data.get("preset")
|
|
target_codec = data.get("target_codec", "av1").lower()
|
|
force_all = data.get("force_all", False)
|
|
delete_old = data.get("delete_old", False)
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
# Alle Videos der Serie laden
|
|
await cur.execute(
|
|
"SELECT id, file_path, video_codec "
|
|
"FROM library_videos WHERE series_id = %s",
|
|
(series_id,)
|
|
)
|
|
videos = await cur.fetchall()
|
|
|
|
# Serien-Ordner fuer Cleanup
|
|
await cur.execute(
|
|
"SELECT folder_path FROM library_series WHERE id = %s",
|
|
(series_id,)
|
|
)
|
|
series_row = await cur.fetchone()
|
|
series_folder = series_row[0] if series_row else None
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
if not videos:
|
|
return web.json_response(
|
|
{"error": "Keine Videos gefunden"}, status=404
|
|
)
|
|
|
|
# Codec-Mapping fuer Vergleich
|
|
codec_aliases = {
|
|
"av1": ["av1", "libaom-av1", "libsvtav1", "av1_vaapi"],
|
|
"hevc": ["hevc", "h265", "libx265", "hevc_vaapi"],
|
|
"h264": ["h264", "avc", "libx264", "h264_vaapi"],
|
|
}
|
|
target_codecs = codec_aliases.get(target_codec, [target_codec])
|
|
|
|
to_convert = []
|
|
already_done = 0
|
|
|
|
for vid_id, file_path, current_codec in videos:
|
|
current = (current_codec or "").lower()
|
|
is_target = any(tc in current for tc in target_codecs)
|
|
|
|
if force_all or not is_target:
|
|
to_convert.append(file_path)
|
|
else:
|
|
already_done += 1
|
|
|
|
if not to_convert:
|
|
return web.json_response({
|
|
"message": "Alle Episoden sind bereits im Zielformat",
|
|
"already_done": already_done,
|
|
"queued": 0,
|
|
})
|
|
|
|
# Jobs erstellen mit delete_source Option
|
|
jobs = await queue_service.add_paths(
|
|
to_convert, preset, delete_source=delete_old
|
|
)
|
|
|
|
return web.json_response({
|
|
"message": f"{len(jobs)} Episoden zur Konvertierung hinzugefuegt",
|
|
"queued": len(jobs),
|
|
"already_done": already_done,
|
|
"skipped": len(videos) - len(jobs) - already_done,
|
|
"delete_old": delete_old,
|
|
})
|
|
|
|
async def post_cleanup_series_folder(request: web.Request) -> web.Response:
|
|
"""POST /api/library/series/{series_id}/cleanup
|
|
Loescht alle Dateien im Serien-Ordner AUSSER:
|
|
- Videos die in der Bibliothek sind
|
|
- .metadata Verzeichnis und dessen Inhalt
|
|
- .nfo Dateien
|
|
"""
|
|
import os
|
|
series_id = int(request.match_info["series_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
# Serien-Ordner
|
|
await cur.execute(
|
|
"SELECT folder_path FROM library_series WHERE id = %s",
|
|
(series_id,)
|
|
)
|
|
row = await cur.fetchone()
|
|
if not row:
|
|
return web.json_response(
|
|
{"error": "Serie nicht gefunden"}, status=404
|
|
)
|
|
series_folder = row[0]
|
|
|
|
# Alle Videos der Serie (diese behalten)
|
|
await cur.execute(
|
|
"SELECT file_path FROM library_videos WHERE series_id = %s",
|
|
(series_id,)
|
|
)
|
|
keep_files = {r[0] for r in await cur.fetchall()}
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
if not series_folder or not os.path.isdir(series_folder):
|
|
return web.json_response(
|
|
{"error": "Serien-Ordner nicht gefunden"}, status=404
|
|
)
|
|
|
|
# Geschuetzte Pfade/Dateien
|
|
protected_dirs = {".metadata", "@eaDir", ".AppleDouble"}
|
|
protected_extensions = {".nfo", ".jpg", ".jpeg", ".png", ".xml"}
|
|
|
|
deleted = 0
|
|
errors = []
|
|
|
|
for root, dirs, files in os.walk(series_folder, topdown=True):
|
|
# Geschuetzte Verzeichnisse ueberspringen
|
|
dirs[:] = [d for d in dirs if d not in protected_dirs]
|
|
|
|
for f in files:
|
|
file_path = os.path.join(root, f)
|
|
ext = os.path.splitext(f)[1].lower()
|
|
|
|
# Behalten wenn:
|
|
# - In der Bibliothek registriert
|
|
# - Geschuetzte Extension
|
|
# - Versteckte Datei
|
|
if file_path in keep_files:
|
|
continue
|
|
if ext in protected_extensions:
|
|
continue
|
|
if f.startswith("."):
|
|
continue
|
|
|
|
# Loeschen
|
|
try:
|
|
os.remove(file_path)
|
|
deleted += 1
|
|
logging.info(f"Cleanup geloescht: {file_path}")
|
|
except Exception as e:
|
|
errors.append(f"{f}: {e}")
|
|
|
|
return web.json_response({
|
|
"deleted": deleted,
|
|
"errors": len(errors),
|
|
"error_details": errors[:10], # Max 10 Fehler anzeigen
|
|
})
|
|
|
|
async def post_delete_folder(request: web.Request) -> web.Response:
|
|
"""POST /api/library/delete-folder
|
|
Loescht einen kompletten Ordner (Season-Ordner etc.) inkl. DB-Eintraege.
|
|
Body: {folder_path: "/mnt/.../Season 01"}
|
|
ACHTUNG: Unwiderruflich!
|
|
"""
|
|
import os
|
|
import shutil
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
folder_path = data.get("folder_path", "").strip()
|
|
if not folder_path:
|
|
return web.json_response(
|
|
{"error": "folder_path erforderlich"}, status=400
|
|
)
|
|
|
|
# Sicherheitspruefung: Muss unter einem Library-Pfad liegen
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
allowed = False
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
await cur.execute(
|
|
"SELECT path FROM library_paths WHERE enabled = 1"
|
|
)
|
|
paths = await cur.fetchall()
|
|
for (lib_path,) in paths:
|
|
if folder_path.startswith(lib_path):
|
|
allowed = True
|
|
break
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
if not allowed:
|
|
return web.json_response(
|
|
{"error": "Ordner liegt nicht in einem Bibliothekspfad"},
|
|
status=403
|
|
)
|
|
|
|
if not os.path.isdir(folder_path):
|
|
return web.json_response(
|
|
{"error": "Ordner nicht gefunden"}, status=404
|
|
)
|
|
|
|
# Zaehlen was geloescht wird
|
|
deleted_files = 0
|
|
deleted_dirs = 0
|
|
errors = []
|
|
|
|
# Zuerst alle Dateien zaehlen
|
|
for root, dirs, files in os.walk(folder_path):
|
|
deleted_files += len(files)
|
|
deleted_dirs += len(dirs)
|
|
|
|
# DB-Eintraege loeschen (Videos in diesem Ordner)
|
|
db_removed = 0
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
# Videos loeschen deren file_path mit folder_path beginnt
|
|
await cur.execute(
|
|
"DELETE FROM library_videos "
|
|
"WHERE file_path LIKE %s",
|
|
(folder_path + "%",)
|
|
)
|
|
db_removed = cur.rowcount
|
|
except Exception as e:
|
|
errors.append(f"DB-Fehler: {e}")
|
|
|
|
# Ordner loeschen (onerror fuer SMB/CIFS Permission-Probleme)
|
|
def _rm_error(func, path, exc_info):
|
|
"""Bei Permission-Fehler: Schreibrechte setzen und nochmal versuchen"""
|
|
import stat
|
|
try:
|
|
os.chmod(path, stat.S_IRWXU)
|
|
func(path)
|
|
except Exception as e2:
|
|
errors.append(f"{path}: {e2}")
|
|
|
|
try:
|
|
shutil.rmtree(folder_path, onerror=_rm_error)
|
|
if os.path.exists(folder_path):
|
|
# Ordner existiert noch -> nicht alles geloescht
|
|
logging.warning(
|
|
f"Ordner teilweise geloescht: {folder_path} "
|
|
f"({len(errors)} Fehler)"
|
|
)
|
|
else:
|
|
logging.info(f"Ordner geloescht: {folder_path}")
|
|
except Exception as e:
|
|
logging.error(f"Ordner loeschen fehlgeschlagen: {e}")
|
|
return web.json_response(
|
|
{"error": f"Loeschen fehlgeschlagen: {e}"}, status=500
|
|
)
|
|
|
|
return web.json_response({
|
|
"deleted_files": deleted_files,
|
|
"deleted_dirs": deleted_dirs,
|
|
"db_removed": db_removed,
|
|
"errors": errors,
|
|
})
|
|
|
|
async def get_series_convert_status(request: web.Request) -> web.Response:
|
|
"""GET /api/library/series/{series_id}/convert-status
|
|
Zeigt Codec-Status aller Episoden einer Serie."""
|
|
series_id = int(request.match_info["series_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
await cur.execute(
|
|
"SELECT id, file_name, video_codec, season_number, "
|
|
"episode_number FROM library_videos "
|
|
"WHERE series_id = %s ORDER BY season_number, episode_number",
|
|
(series_id,)
|
|
)
|
|
videos = await cur.fetchall()
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
# Codec-Statistik
|
|
codec_counts = {}
|
|
episodes = []
|
|
for vid_id, name, codec, season, episode in videos:
|
|
codec_lower = (codec or "unknown").lower()
|
|
codec_counts[codec_lower] = codec_counts.get(codec_lower, 0) + 1
|
|
episodes.append({
|
|
"id": vid_id,
|
|
"name": name,
|
|
"codec": codec,
|
|
"season": season,
|
|
"episode": episode,
|
|
})
|
|
|
|
return web.json_response({
|
|
"total": len(videos),
|
|
"codec_counts": codec_counts,
|
|
"episodes": episodes,
|
|
})
|
|
|
|
# === Statistiken ===
|
|
|
|
async def get_library_stats(request: web.Request) -> web.Response:
|
|
"""GET /api/library/stats"""
|
|
stats = await library_service.get_stats()
|
|
return web.json_response(stats)
|
|
|
|
# === Scan-Status ===
|
|
|
|
async def get_scan_status(request: web.Request) -> web.Response:
|
|
"""GET /api/library/scan-status"""
|
|
return web.json_response(library_service._scan_progress)
|
|
|
|
# === Clean-Funktion ===
|
|
|
|
async def get_clean_scan(request: web.Request) -> web.Response:
|
|
"""GET /api/library/clean/scan?path_id="""
|
|
if not cleaner_service:
|
|
return web.json_response(
|
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
|
)
|
|
path_id = request.query.get("path_id")
|
|
result = await cleaner_service.scan_for_junk(
|
|
int(path_id) if path_id else None
|
|
)
|
|
return web.json_response(result)
|
|
|
|
async def post_clean_delete(request: web.Request) -> web.Response:
|
|
"""POST /api/library/clean/delete"""
|
|
if not cleaner_service:
|
|
return web.json_response(
|
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
files = data.get("files", [])
|
|
if not files:
|
|
return web.json_response(
|
|
{"error": "Keine Dateien angegeben"}, status=400
|
|
)
|
|
result = await cleaner_service.delete_files(files)
|
|
return web.json_response(result)
|
|
|
|
async def post_clean_empty_dirs(request: web.Request) -> web.Response:
|
|
"""POST /api/library/clean/empty-dirs"""
|
|
if not cleaner_service:
|
|
return web.json_response(
|
|
{"error": "Clean-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
data = {}
|
|
path_id = data.get("path_id")
|
|
count = await cleaner_service.delete_empty_dirs(
|
|
int(path_id) if path_id else None
|
|
)
|
|
return web.json_response({"deleted_dirs": count})
|
|
|
|
# === Filesystem-Browser (fuer Import) ===
|
|
|
|
async def get_browse_fs(request: web.Request) -> web.Response:
|
|
"""GET /api/library/browse-fs?path=... - Echten Filesystem-Browser"""
|
|
import os
|
|
from app.services.library import VIDEO_EXTENSIONS
|
|
|
|
path = request.query.get("path", "/mnt")
|
|
|
|
# Sicherheits-Check: Nur unter /mnt erlauben
|
|
real = os.path.realpath(path)
|
|
if not real.startswith("/mnt"):
|
|
return web.json_response(
|
|
{"error": "Zugriff nur auf /mnt erlaubt"}, status=403
|
|
)
|
|
|
|
if not os.path.isdir(real):
|
|
return web.json_response(
|
|
{"error": "Ordner nicht gefunden"}, status=404
|
|
)
|
|
|
|
folders = []
|
|
video_count = 0
|
|
video_size = 0
|
|
|
|
try:
|
|
entries = sorted(os.scandir(real), key=lambda e: e.name.lower())
|
|
for entry in entries:
|
|
if entry.name.startswith("."):
|
|
continue
|
|
if entry.is_dir(follow_symlinks=True):
|
|
# Schnelle Zaehlung: Videos im Unterordner
|
|
sub_vids = 0
|
|
try:
|
|
for sub in os.scandir(entry.path):
|
|
if sub.is_file():
|
|
ext = os.path.splitext(sub.name)[1].lower()
|
|
if ext in VIDEO_EXTENSIONS:
|
|
sub_vids += 1
|
|
except PermissionError:
|
|
pass
|
|
folders.append({
|
|
"name": entry.name,
|
|
"path": entry.path,
|
|
"video_count": sub_vids,
|
|
})
|
|
elif entry.is_file():
|
|
ext = os.path.splitext(entry.name)[1].lower()
|
|
if ext in VIDEO_EXTENSIONS:
|
|
video_count += 1
|
|
try:
|
|
video_size += entry.stat().st_size
|
|
except OSError:
|
|
pass
|
|
except PermissionError:
|
|
return web.json_response(
|
|
{"error": "Keine Berechtigung"}, status=403
|
|
)
|
|
|
|
# Breadcrumb
|
|
parts = real.split("/")
|
|
breadcrumb = []
|
|
for i in range(1, len(parts)):
|
|
crumb_path = "/".join(parts[:i + 1]) or "/"
|
|
breadcrumb.append({
|
|
"name": parts[i],
|
|
"path": crumb_path,
|
|
})
|
|
|
|
return web.json_response({
|
|
"current_path": real,
|
|
"folders": folders,
|
|
"video_count": video_count,
|
|
"video_size": video_size,
|
|
"breadcrumb": breadcrumb,
|
|
})
|
|
|
|
# === Import-Funktion ===
|
|
|
|
async def post_create_import(request: web.Request) -> web.Response:
|
|
"""POST /api/library/import"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
source = data.get("source_path", "").strip()
|
|
target_id = data.get("target_library_id")
|
|
mode = data.get("mode", "copy")
|
|
|
|
if not source or not target_id:
|
|
return web.json_response(
|
|
{"error": "source_path und target_library_id erforderlich"},
|
|
status=400,
|
|
)
|
|
|
|
job_id = await importer_service.create_job(
|
|
source, int(target_id), mode
|
|
)
|
|
if job_id:
|
|
return web.json_response(
|
|
{"message": "Import-Job erstellt", "job_id": job_id}
|
|
)
|
|
return web.json_response(
|
|
{"error": "Keine Videos gefunden oder Fehler"}, status=400
|
|
)
|
|
|
|
async def post_analyze_import(request: web.Request) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/analyze"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
job_id = int(request.match_info["job_id"])
|
|
except (ValueError, KeyError):
|
|
return web.json_response(
|
|
{"error": "Ungueltige Job-ID"}, status=400
|
|
)
|
|
result = await importer_service.analyze_job(job_id)
|
|
return web.json_response(result)
|
|
|
|
async def get_import_jobs(request: web.Request) -> web.Response:
|
|
"""GET /api/library/import - Liste aller Import-Jobs"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
jobs = await importer_service.get_all_jobs()
|
|
return web.json_response({"jobs": jobs})
|
|
|
|
async def get_import_status(request: web.Request) -> web.Response:
|
|
"""GET /api/library/import/{job_id}"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
job_id = int(request.match_info["job_id"])
|
|
except (ValueError, KeyError):
|
|
return web.json_response(
|
|
{"error": "Ungueltige Job-ID"}, status=400
|
|
)
|
|
result = await importer_service.get_job_status(job_id)
|
|
return web.json_response(result)
|
|
|
|
async def delete_import_job(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/import/{job_id}"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
job_id = int(request.match_info["job_id"])
|
|
except (ValueError, KeyError):
|
|
return web.json_response(
|
|
{"error": "Ungueltige Job-ID"}, status=400
|
|
)
|
|
result = await importer_service.delete_job(job_id)
|
|
if "error" in result:
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
# Task-Referenzen gegen GC schuetzen
|
|
_background_tasks: set = set()
|
|
|
|
async def post_execute_import(request: web.Request) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/execute
|
|
Startet Import async im Hintergrund, antwortet sofort."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
try:
|
|
job_id = int(request.match_info["job_id"])
|
|
except (ValueError, KeyError):
|
|
return web.json_response(
|
|
{"error": "Ungueltige Job-ID"}, status=400
|
|
)
|
|
|
|
# Import im Hintergrund starten (blockiert nicht den Response)
|
|
async def _run_import():
|
|
try:
|
|
await importer_service.execute_import(job_id)
|
|
except Exception as e:
|
|
logging.error(f"Hintergrund-Import {job_id} fehlgeschlagen: {e}")
|
|
|
|
task = asyncio.create_task(_run_import())
|
|
_background_tasks.add(task)
|
|
task.add_done_callback(_background_tasks.discard)
|
|
return web.json_response({"ok": True, "job_id": job_id})
|
|
|
|
async def put_import_item(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/import/items/{item_id}"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
item_id = int(request.match_info["item_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
success = await importer_service.update_item(item_id, **data)
|
|
if success:
|
|
return web.json_response({"message": "Item aktualisiert"})
|
|
return web.json_response(
|
|
{"error": "Aktualisierung fehlgeschlagen"}, status=400
|
|
)
|
|
|
|
async def put_resolve_conflict(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/import/items/{item_id}/resolve"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
item_id = int(request.match_info["item_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
action = data.get("action", "")
|
|
success = await importer_service.resolve_conflict(item_id, action)
|
|
if success:
|
|
return web.json_response({"message": "Konflikt geloest"})
|
|
return web.json_response(
|
|
{"error": "Ungueltige Aktion"}, status=400
|
|
)
|
|
|
|
# === Video-Streaming ===
|
|
|
|
# Browser-kompatible Audio-Codecs (kein Transcoding noetig)
|
|
_BROWSER_AUDIO_CODECS = {"aac", "mp3", "opus", "vorbis", "flac"}
|
|
|
|
async def get_stream_video(request: web.Request) -> web.StreamResponse:
|
|
"""GET /api/library/videos/{video_id}/stream?quality=hd&audio=0&t=0
|
|
Streamt Video mit konfigurierbarer Qualitaet und Audio-Spur.
|
|
|
|
Parameter:
|
|
quality: uhd|hd|sd|low (Default: hd)
|
|
audio: Audio-Track-Index (Default: 0)
|
|
t: Seek-Position in Sekunden (Default: 0)
|
|
sound: stereo|surround|original (Default: stereo)
|
|
"""
|
|
import os
|
|
import asyncio as _asyncio
|
|
|
|
video_id = int(request.match_info["video_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500
|
|
)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
await cur.execute(
|
|
"SELECT file_path, width, height, video_codec, "
|
|
"audio_tracks, container, file_size "
|
|
"FROM library_videos WHERE id = %s",
|
|
(video_id,)
|
|
)
|
|
video = await cur.fetchone()
|
|
if not video:
|
|
return web.json_response(
|
|
{"error": "Video nicht gefunden"}, status=404
|
|
)
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
file_path = video["file_path"]
|
|
if not os.path.isfile(file_path):
|
|
return web.json_response(
|
|
{"error": "Datei nicht gefunden"}, status=404
|
|
)
|
|
|
|
# Audio-Tracks parsen
|
|
audio_tracks = video.get("audio_tracks") or "[]"
|
|
if isinstance(audio_tracks, str):
|
|
audio_tracks = json.loads(audio_tracks)
|
|
|
|
# Parameter aus Query
|
|
quality = request.query.get("quality", "hd")
|
|
audio_idx = int(request.query.get("audio", "0"))
|
|
seek_sec = float(request.query.get("t", "0"))
|
|
sound_mode = request.query.get("sound", "stereo")
|
|
|
|
# Audio-Track bestimmen
|
|
if audio_idx >= len(audio_tracks):
|
|
audio_idx = 0
|
|
audio_info = audio_tracks[audio_idx] if audio_tracks else {}
|
|
audio_codec = audio_info.get("codec", "unknown")
|
|
audio_channels = audio_info.get("channels", 2)
|
|
|
|
# Ziel-Aufloesung bestimmen
|
|
orig_h = video.get("height") or 1080
|
|
quality_heights = {"uhd": 2160, "hd": 1080, "sd": 720, "low": 480}
|
|
target_h = quality_heights.get(quality, 1080)
|
|
needs_video_scale = orig_h > target_h and quality != "uhd"
|
|
|
|
# Audio-Transcoding: noetig wenn Codec nicht browser-kompatibel
|
|
needs_audio_transcode = audio_codec not in _BROWSER_AUDIO_CODECS
|
|
|
|
# Sound-Modus: Kanalanzahl bestimmen
|
|
if sound_mode == "stereo":
|
|
out_channels = 2
|
|
elif sound_mode == "surround":
|
|
out_channels = min(audio_channels, 8)
|
|
else: # original
|
|
out_channels = audio_channels
|
|
|
|
# Wenn Kanalanzahl sich aendert -> Transcoding noetig
|
|
if out_channels != audio_channels:
|
|
needs_audio_transcode = True
|
|
|
|
# ffmpeg-Kommando zusammenbauen
|
|
cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error"]
|
|
if seek_sec > 0:
|
|
cmd += ["-ss", str(seek_sec)]
|
|
cmd += ["-i", file_path]
|
|
|
|
# Video-Mapping und Codec
|
|
cmd += ["-map", "0:v:0"]
|
|
if needs_video_scale:
|
|
crf = {"sd": "23", "low": "28"}.get(quality, "20")
|
|
cmd += [
|
|
"-c:v", "libx264", "-preset", "fast",
|
|
"-crf", crf,
|
|
"-vf", f"scale=-2:{target_h}",
|
|
]
|
|
else:
|
|
cmd += ["-c:v", "copy"]
|
|
|
|
# Audio-Mapping und Codec
|
|
cmd += ["-map", f"0:a:{audio_idx}"]
|
|
if needs_audio_transcode:
|
|
bitrate = {1: "96k", 2: "192k"}.get(
|
|
out_channels, f"{out_channels * 64}k")
|
|
cmd += ["-c:a", "aac", "-ac", str(out_channels),
|
|
"-b:a", bitrate]
|
|
else:
|
|
cmd += ["-c:a", "copy"]
|
|
|
|
# Container: Fragmentiertes MP4 fuer Streaming
|
|
cmd += [
|
|
"-movflags", "frag_keyframe+empty_moov+default_base_moof",
|
|
"-frag_duration", "1000000",
|
|
"-f", "mp4",
|
|
"pipe:1",
|
|
]
|
|
|
|
resp = web.StreamResponse(
|
|
status=200,
|
|
headers={
|
|
"Content-Type": "video/mp4",
|
|
"Cache-Control": "no-cache",
|
|
"Transfer-Encoding": "chunked",
|
|
},
|
|
)
|
|
await resp.prepare(request)
|
|
|
|
proc = None
|
|
try:
|
|
proc = await _asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=_asyncio.subprocess.PIPE,
|
|
stderr=_asyncio.subprocess.PIPE,
|
|
)
|
|
|
|
chunk_size = 256 * 1024 # 256 KB
|
|
while True:
|
|
chunk = await proc.stdout.read(chunk_size)
|
|
if not chunk:
|
|
break
|
|
try:
|
|
await resp.write(chunk)
|
|
except (ConnectionResetError, ConnectionAbortedError):
|
|
break
|
|
|
|
except Exception as e:
|
|
logging.error(f"Stream-Fehler: {e}")
|
|
finally:
|
|
if proc and proc.returncode is None:
|
|
proc.kill()
|
|
await proc.wait()
|
|
|
|
await resp.write_eof()
|
|
return resp
|
|
|
|
# === Untertitel-Extraktion ===
|
|
|
|
async def get_subtitle_track(request: web.Request) -> web.Response:
|
|
"""GET /api/library/videos/{video_id}/subtitles/{track_index}
|
|
Extrahiert Untertitel als WebVTT per ffmpeg."""
|
|
import os
|
|
import asyncio as _asyncio
|
|
|
|
video_id = int(request.match_info["video_id"])
|
|
track_idx = int(request.match_info["track_index"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
await cur.execute(
|
|
"SELECT file_path, subtitle_tracks "
|
|
"FROM library_videos WHERE id = %s", (video_id,))
|
|
video = await cur.fetchone()
|
|
if not video:
|
|
return web.json_response(
|
|
{"error": "Video nicht gefunden"}, status=404)
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
file_path = video["file_path"]
|
|
if not os.path.isfile(file_path):
|
|
return web.json_response(
|
|
{"error": "Datei nicht gefunden"}, status=404)
|
|
|
|
sub_tracks = video.get("subtitle_tracks") or "[]"
|
|
if isinstance(sub_tracks, str):
|
|
sub_tracks = json.loads(sub_tracks)
|
|
|
|
if track_idx >= len(sub_tracks):
|
|
return web.json_response(
|
|
{"error": "Untertitel-Track nicht gefunden"}, status=404)
|
|
|
|
sub = sub_tracks[track_idx]
|
|
if sub.get("codec") in ("hdmv_pgs_subtitle", "dvd_subtitle",
|
|
"pgs", "vobsub"):
|
|
return web.json_response(
|
|
{"error": "Bild-basierte Untertitel nicht unterstuetzt"},
|
|
status=400)
|
|
|
|
cmd = [
|
|
"ffmpeg", "-hide_banner", "-loglevel", "error",
|
|
"-i", file_path,
|
|
"-map", f"0:s:{track_idx}",
|
|
"-f", "webvtt", "pipe:1",
|
|
]
|
|
|
|
try:
|
|
proc = await _asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=_asyncio.subprocess.PIPE,
|
|
stderr=_asyncio.subprocess.PIPE,
|
|
)
|
|
stdout, stderr = await proc.communicate()
|
|
|
|
if proc.returncode != 0:
|
|
logging.error(
|
|
f"Untertitel-Extraktion fehlgeschlagen: "
|
|
f"{stderr.decode('utf-8', errors='replace')}")
|
|
return web.json_response(
|
|
{"error": "Extraktion fehlgeschlagen"}, status=500)
|
|
|
|
return web.Response(
|
|
body=stdout,
|
|
content_type="text/vtt",
|
|
charset="utf-8",
|
|
headers={"Cache-Control": "public, max-age=86400"},
|
|
)
|
|
except Exception as e:
|
|
logging.error(f"Untertitel-Fehler: {e}")
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
# === Video-Info API (fuer Player-UI) ===
|
|
|
|
async def get_video_info(request: web.Request) -> web.Response:
|
|
"""GET /api/library/videos/{video_id}/info
|
|
Audio-/Untertitel-Tracks und Video-Infos fuer Player-Overlay."""
|
|
video_id = int(request.match_info["video_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500)
|
|
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
await cur.execute("""
|
|
SELECT id, file_name, width, height, video_codec,
|
|
audio_tracks, subtitle_tracks, container,
|
|
duration_sec, video_bitrate, is_10bit, hdr,
|
|
series_id, season_number, episode_number
|
|
FROM library_videos WHERE id = %s
|
|
""", (video_id,))
|
|
video = await cur.fetchone()
|
|
if not video:
|
|
return web.json_response(
|
|
{"error": "Video nicht gefunden"}, status=404)
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
# JSON-Felder parsen
|
|
for field in ("audio_tracks", "subtitle_tracks"):
|
|
val = video.get(field)
|
|
if isinstance(val, str):
|
|
video[field] = json.loads(val)
|
|
elif val is None:
|
|
video[field] = []
|
|
|
|
# Bild-basierte Untertitel rausfiltern
|
|
video["subtitle_tracks"] = [
|
|
s for s in video["subtitle_tracks"]
|
|
if s.get("codec") not in (
|
|
"hdmv_pgs_subtitle", "dvd_subtitle", "pgs", "vobsub"
|
|
)
|
|
]
|
|
|
|
return web.json_response(video)
|
|
|
|
# === Episoden-Thumbnails ===
|
|
|
|
async def _save_thumbnail_to_db(pool, video_id, thumb_path, source):
|
|
"""Speichert Thumbnail-Pfad in der DB."""
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
await cur.execute("""
|
|
INSERT INTO tv_episode_thumbnails
|
|
(video_id, thumbnail_path, source)
|
|
VALUES (%s, %s, %s)
|
|
ON DUPLICATE KEY UPDATE
|
|
thumbnail_path = VALUES(thumbnail_path),
|
|
source = VALUES(source)
|
|
""", (video_id, thumb_path, source))
|
|
|
|
async def _download_tvdb_image(url, thumb_path):
|
|
"""Laedt TVDB-Bild herunter und speichert es lokal."""
|
|
import aiohttp as _aiohttp
|
|
try:
|
|
async with _aiohttp.ClientSession() as session:
|
|
async with session.get(url, timeout=_aiohttp.ClientTimeout(total=15)) as resp:
|
|
if resp.status == 200:
|
|
data = await resp.read()
|
|
if len(data) > 100: # Kein leeres/fehlerhaftes Bild
|
|
with open(thumb_path, "wb") as f:
|
|
f.write(data)
|
|
return True
|
|
except Exception as e:
|
|
logging.debug(f"TVDB-Bild Download fehlgeschlagen: {e}")
|
|
return False
|
|
|
|
async def get_video_thumbnail(request: web.Request) -> web.Response:
|
|
"""GET /api/library/videos/{video_id}/thumbnail
|
|
Gibt Thumbnail zurueck. Prioritaet: Lokal > TVDB-Download > ffmpeg."""
|
|
import os
|
|
import asyncio as _asyncio
|
|
|
|
video_id = int(request.match_info["video_id"])
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500)
|
|
|
|
# Pruefen ob bereits lokal vorhanden
|
|
try:
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
await cur.execute(
|
|
"SELECT thumbnail_path FROM tv_episode_thumbnails "
|
|
"WHERE video_id = %s", (video_id,))
|
|
cached = await cur.fetchone()
|
|
|
|
if cached and os.path.isfile(cached["thumbnail_path"]):
|
|
return web.FileResponse(
|
|
cached["thumbnail_path"],
|
|
headers={"Cache-Control": "public, max-age=604800"})
|
|
|
|
# Video-Info + TVDB-Bild-URL laden
|
|
await cur.execute("""
|
|
SELECT v.file_path, v.duration_sec,
|
|
v.series_id, v.season_number, v.episode_number
|
|
FROM library_videos v
|
|
WHERE v.id = %s
|
|
""", (video_id,))
|
|
video = await cur.fetchone()
|
|
if not video or not os.path.isfile(video["file_path"]):
|
|
return web.json_response(
|
|
{"error": "Video nicht gefunden"}, status=404)
|
|
|
|
# TVDB-Bild-URL pruefen
|
|
tvdb_image_url = None
|
|
if video.get("series_id"):
|
|
await cur.execute(
|
|
"SELECT tvdb_id FROM library_series "
|
|
"WHERE id = %s", (video["series_id"],))
|
|
s = await cur.fetchone()
|
|
if s and s.get("tvdb_id"):
|
|
await cur.execute("""
|
|
SELECT image_url FROM tvdb_episode_cache
|
|
WHERE series_tvdb_id = %s
|
|
AND season_number = %s
|
|
AND episode_number = %s
|
|
""", (s["tvdb_id"],
|
|
video.get("season_number") or 0,
|
|
video.get("episode_number") or 0))
|
|
tc = await cur.fetchone()
|
|
if tc and tc.get("image_url"):
|
|
tvdb_image_url = tc["image_url"]
|
|
except Exception as e:
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
# Zielverzeichnis: .metadata/thumbnails/ neben der Videodatei
|
|
file_path = video["file_path"]
|
|
video_dir = os.path.dirname(file_path)
|
|
thumb_dir = os.path.join(video_dir, ".metadata", "thumbnails")
|
|
os.makedirs(thumb_dir, exist_ok=True)
|
|
thumb_path = os.path.join(thumb_dir, f"{video_id}.jpg")
|
|
|
|
# Versuch 1: TVDB-Bild herunterladen
|
|
if tvdb_image_url:
|
|
if await _download_tvdb_image(tvdb_image_url, thumb_path):
|
|
await _save_thumbnail_to_db(pool, video_id, thumb_path, "tvdb")
|
|
return web.FileResponse(
|
|
thumb_path,
|
|
headers={"Cache-Control": "public, max-age=604800"})
|
|
|
|
# Versuch 2: Per ffmpeg generieren (Frame bei 25%)
|
|
duration = video.get("duration_sec") or 0
|
|
seek_pos = duration * 0.25 if duration > 10 else 5
|
|
|
|
cmd = [
|
|
"ffmpeg", "-hide_banner", "-loglevel", "error",
|
|
"-ss", str(int(seek_pos)),
|
|
"-i", file_path,
|
|
"-vframes", "1",
|
|
"-q:v", "5",
|
|
"-vf", "scale=480:-1",
|
|
"-y", thumb_path,
|
|
]
|
|
|
|
try:
|
|
proc = await _asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=_asyncio.subprocess.PIPE,
|
|
stderr=_asyncio.subprocess.PIPE,
|
|
)
|
|
await proc.communicate()
|
|
|
|
if proc.returncode == 0 and os.path.isfile(thumb_path):
|
|
await _save_thumbnail_to_db(
|
|
pool, video_id, thumb_path, "ffmpeg")
|
|
return web.FileResponse(
|
|
thumb_path,
|
|
headers={"Cache-Control": "public, max-age=604800"})
|
|
else:
|
|
return web.json_response(
|
|
{"error": "Thumbnail-Generierung fehlgeschlagen"},
|
|
status=500)
|
|
except Exception as e:
|
|
logging.error(f"Thumbnail-Fehler: {e}")
|
|
return web.json_response({"error": str(e)}, status=500)
|
|
|
|
# === Batch-Thumbnail-Generierung ===
|
|
|
|
_thumbnail_task = None # Hintergrund-Task fuer Batch-Generierung
|
|
|
|
async def post_generate_thumbnails(request: web.Request) -> web.Response:
|
|
"""POST /api/library/generate-thumbnails
|
|
Generiert fehlende Thumbnails fuer alle Videos im Hintergrund.
|
|
Optional: ?series_id=123 fuer nur eine Serie."""
|
|
import os
|
|
import asyncio as _asyncio
|
|
nonlocal _thumbnail_task
|
|
|
|
# Laeuft bereits?
|
|
if _thumbnail_task and not _thumbnail_task.done():
|
|
return web.json_response({
|
|
"status": "running",
|
|
"message": "Thumbnail-Generierung laeuft bereits"
|
|
})
|
|
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500)
|
|
|
|
series_id = request.query.get("series_id")
|
|
|
|
async def _generate_batch():
|
|
"""Hintergrund-Task: Fehlende Thumbnails erzeugen."""
|
|
generated = 0
|
|
errors = 0
|
|
try:
|
|
# Verwaiste Thumbnail-Eintraege bereinigen
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
await cur.execute("""
|
|
DELETE FROM tv_episode_thumbnails
|
|
WHERE video_id NOT IN (
|
|
SELECT id FROM library_videos
|
|
)
|
|
""")
|
|
orphaned = cur.rowcount
|
|
if orphaned:
|
|
logging.info(
|
|
f"Thumbnail-Batch: {orphaned} verwaiste "
|
|
f"Eintraege bereinigt"
|
|
)
|
|
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
# Videos ohne Thumbnail finden (mit TVDB-Bild-URL)
|
|
sql = """
|
|
SELECT v.id, v.file_path, v.duration_sec,
|
|
tc.image_url AS tvdb_image_url
|
|
FROM library_videos v
|
|
LEFT JOIN tv_episode_thumbnails t
|
|
ON v.id = t.video_id
|
|
LEFT JOIN library_series s
|
|
ON v.series_id = s.id
|
|
LEFT JOIN tvdb_episode_cache tc
|
|
ON tc.series_tvdb_id = s.tvdb_id
|
|
AND tc.season_number = v.season_number
|
|
AND tc.episode_number = v.episode_number
|
|
WHERE t.video_id IS NULL
|
|
"""
|
|
params = []
|
|
if series_id:
|
|
sql += " AND v.series_id = %s"
|
|
params.append(int(series_id))
|
|
sql += " ORDER BY v.id"
|
|
await cur.execute(sql, params)
|
|
videos = await cur.fetchall()
|
|
|
|
logging.info(
|
|
f"Thumbnail-Batch: {len(videos)} Videos ohne Thumbnail"
|
|
)
|
|
downloaded = 0
|
|
|
|
for video in videos:
|
|
vid = video["id"]
|
|
fp = video["file_path"]
|
|
dur = video.get("duration_sec") or 0
|
|
|
|
if not os.path.isfile(fp):
|
|
continue
|
|
|
|
vdir = os.path.dirname(fp)
|
|
tdir = os.path.join(vdir, ".metadata", "thumbnails")
|
|
os.makedirs(tdir, exist_ok=True)
|
|
tpath = os.path.join(tdir, f"{vid}.jpg")
|
|
|
|
# Prioritaet 1: TVDB-Bild herunterladen
|
|
tvdb_url = video.get("tvdb_image_url")
|
|
if tvdb_url:
|
|
if await _download_tvdb_image(tvdb_url, tpath):
|
|
await _save_thumbnail_to_db(
|
|
pool, vid, tpath, "tvdb")
|
|
generated += 1
|
|
downloaded += 1
|
|
continue
|
|
|
|
# Prioritaet 2: Per ffmpeg generieren
|
|
seek = dur * 0.25 if dur > 10 else 5
|
|
cmd = [
|
|
"ffmpeg", "-hide_banner", "-loglevel", "error",
|
|
"-ss", str(int(seek)),
|
|
"-i", fp,
|
|
"-vframes", "1", "-q:v", "5",
|
|
"-vf", "scale=480:-1",
|
|
"-y", tpath,
|
|
]
|
|
try:
|
|
proc = await _asyncio.create_subprocess_exec(
|
|
*cmd,
|
|
stdout=_asyncio.subprocess.PIPE,
|
|
stderr=_asyncio.subprocess.PIPE,
|
|
)
|
|
await proc.communicate()
|
|
|
|
if proc.returncode == 0 and os.path.isfile(tpath):
|
|
await _save_thumbnail_to_db(
|
|
pool, vid, tpath, "ffmpeg")
|
|
generated += 1
|
|
else:
|
|
errors += 1
|
|
except Exception as e:
|
|
logging.warning(f"Thumbnail-Fehler Video {vid}: {e}")
|
|
errors += 1
|
|
|
|
logging.info(
|
|
f"Thumbnail-Batch fertig: {generated} erzeugt "
|
|
f"({downloaded} TVDB, {generated - downloaded} ffmpeg), "
|
|
f"{errors} Fehler"
|
|
)
|
|
except Exception as e:
|
|
logging.error(f"Thumbnail-Batch Fehler: {e}")
|
|
|
|
import asyncio
|
|
_thumbnail_task = asyncio.ensure_future(_generate_batch())
|
|
|
|
return web.json_response({
|
|
"status": "started",
|
|
"message": "Thumbnail-Generierung gestartet"
|
|
})
|
|
|
|
async def get_thumbnail_status(request: web.Request) -> web.Response:
|
|
"""GET /api/library/thumbnail-status
|
|
Zeigt Fortschritt der Thumbnail-Generierung."""
|
|
pool = await library_service._get_pool()
|
|
if not pool:
|
|
return web.json_response(
|
|
{"error": "Keine DB-Verbindung"}, status=500)
|
|
|
|
running = bool(_thumbnail_task and not _thumbnail_task.done())
|
|
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor(aiomysql.DictCursor) as cur:
|
|
await cur.execute(
|
|
"SELECT COUNT(*) AS cnt FROM library_videos")
|
|
total = (await cur.fetchone())["cnt"]
|
|
# Nur Thumbnails zaehlen die auch ein existierendes Video haben
|
|
await cur.execute("""
|
|
SELECT COUNT(*) AS cnt
|
|
FROM tv_episode_thumbnails t
|
|
INNER JOIN library_videos v ON t.video_id = v.id
|
|
""")
|
|
done = (await cur.fetchone())["cnt"]
|
|
|
|
missing = max(0, total - done)
|
|
return web.json_response({
|
|
"running": running,
|
|
"generated": done,
|
|
"total": total,
|
|
"missing": missing,
|
|
})
|
|
|
|
# === Import: Item zuordnen / ueberspringen ===
|
|
|
|
async def post_reassign_import_item(
|
|
request: web.Request,
|
|
) -> web.Response:
|
|
"""POST /api/library/import/items/{item_id}/reassign
|
|
Weist einem nicht-erkannten Item eine Serie zu."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
item_id = int(request.match_info["item_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
series_name = data.get("series_name", "").strip()
|
|
season = data.get("season")
|
|
episode = data.get("episode")
|
|
tvdb_id = data.get("tvdb_id")
|
|
|
|
if not series_name or season is None or episode is None:
|
|
return web.json_response(
|
|
{"error": "series_name, season und episode erforderlich"},
|
|
status=400,
|
|
)
|
|
|
|
result = await importer_service.reassign_item(
|
|
item_id, series_name,
|
|
int(season), int(episode),
|
|
int(tvdb_id) if tvdb_id else None
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def post_skip_import_item(
|
|
request: web.Request,
|
|
) -> web.Response:
|
|
"""POST /api/library/import/items/{item_id}/skip"""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
item_id = int(request.match_info["item_id"])
|
|
success = await importer_service.skip_item(item_id)
|
|
if success:
|
|
return web.json_response({"message": "Item uebersprungen"})
|
|
return web.json_response(
|
|
{"error": "Fehlgeschlagen"}, status=400
|
|
)
|
|
|
|
# === Serie-Level-Zuordnung (neuer Workflow) ===
|
|
|
|
async def get_pending_series(request: web.Request) -> web.Response:
|
|
"""GET /api/library/import/{job_id}/pending-series
|
|
Gibt alle erkannten Serien zurueck, die noch zugeordnet werden muessen."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
result = await importer_service.get_pending_series(job_id)
|
|
return web.json_response(result)
|
|
|
|
async def post_assign_series(request: web.Request) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/assign-series
|
|
Ordnet eine erkannte Serie einer TVDB-Serie zu und berechnet Zielpfade."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
detected_series = data.get("detected_series", "").strip()
|
|
tvdb_id = data.get("tvdb_id")
|
|
tvdb_name = data.get("tvdb_name", "").strip()
|
|
|
|
if not detected_series or not tvdb_name:
|
|
return web.json_response(
|
|
{"error": "detected_series und tvdb_name erforderlich"}, status=400
|
|
)
|
|
|
|
result = await importer_service.assign_series_mapping(
|
|
job_id, detected_series,
|
|
int(tvdb_id) if tvdb_id else None,
|
|
tvdb_name
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def post_resolve_all_conflicts(request: web.Request) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/resolve-all-conflicts
|
|
Loest alle Konflikte auf einmal (overwrite oder skip)."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
action = data.get("action", "").strip()
|
|
if action not in ("overwrite", "skip"):
|
|
return web.json_response(
|
|
{"error": "action muss 'overwrite' oder 'skip' sein"}, status=400
|
|
)
|
|
|
|
result = await importer_service.resolve_all_conflicts(job_id, action)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def put_overwrite_mode(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/import/{job_id}/overwrite-mode
|
|
Setzt ob alle Konflikte automatisch ueberschrieben werden sollen."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
overwrite = data.get("overwrite", False)
|
|
result = await importer_service.set_overwrite_mode(job_id, bool(overwrite))
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
# === Alte Serien-Zuordnung (Kompatibilitaet) ===
|
|
|
|
async def post_reassign_import_series(
|
|
request: web.Request,
|
|
) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/reassign-series
|
|
Ordnet alle Items mit gleichem detected_series zu."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
detected_series = data.get("detected_series", "").strip()
|
|
tvdb_id = data.get("tvdb_id")
|
|
series_name = data.get("series_name", "").strip()
|
|
|
|
if not detected_series:
|
|
return web.json_response(
|
|
{"error": "detected_series erforderlich"}, status=400
|
|
)
|
|
|
|
# Neuen Workflow nutzen
|
|
result = await importer_service.assign_series_mapping(
|
|
job_id, detected_series,
|
|
int(tvdb_id) if tvdb_id else None,
|
|
series_name or detected_series
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
async def post_skip_import_series(
|
|
request: web.Request,
|
|
) -> web.Response:
|
|
"""POST /api/library/import/{job_id}/skip-series
|
|
Ueberspringt alle Items einer Serie."""
|
|
if not importer_service:
|
|
return web.json_response(
|
|
{"error": "Import-Service nicht verfuegbar"}, status=500
|
|
)
|
|
job_id = int(request.match_info["job_id"])
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
|
|
detected_series = data.get("detected_series", "").strip()
|
|
if not detected_series:
|
|
return web.json_response(
|
|
{"error": "detected_series erforderlich"}, status=400
|
|
)
|
|
|
|
result = await importer_service.skip_series(
|
|
job_id, detected_series
|
|
)
|
|
if result.get("error"):
|
|
return web.json_response(result, status=400)
|
|
return web.json_response(result)
|
|
|
|
# === Filter-Presets ===
|
|
|
|
async def get_filter_presets(request: web.Request) -> web.Response:
|
|
"""GET /api/library/filter-presets"""
|
|
lib_cfg = config.settings.get("library", {})
|
|
presets = lib_cfg.get("filter_presets", {})
|
|
default_view = lib_cfg.get("default_view", "all")
|
|
return web.json_response({
|
|
"presets": presets,
|
|
"default_view": default_view,
|
|
})
|
|
|
|
async def put_filter_presets(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/filter-presets - Presets speichern"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
presets = data.get("presets", {})
|
|
default_view = data.get("default_view")
|
|
|
|
if "library" not in config.settings:
|
|
config.settings["library"] = {}
|
|
if presets:
|
|
config.settings["library"]["filter_presets"] = presets
|
|
if default_view:
|
|
config.settings["library"]["default_view"] = default_view
|
|
config.save_settings()
|
|
return web.json_response({"message": "Filter-Presets gespeichert"})
|
|
|
|
async def post_filter_preset(request: web.Request) -> web.Response:
|
|
"""POST /api/library/filter-presets - Neues Preset hinzufuegen"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
preset_id = data.get("id", "").strip()
|
|
preset_name = data.get("name", "").strip()
|
|
filters = data.get("filters", {})
|
|
|
|
if not preset_id or not preset_name:
|
|
return web.json_response(
|
|
{"error": "id und name erforderlich"}, status=400
|
|
)
|
|
|
|
if "library" not in config.settings:
|
|
config.settings["library"] = {}
|
|
if "filter_presets" not in config.settings["library"]:
|
|
config.settings["library"]["filter_presets"] = {}
|
|
|
|
config.settings["library"]["filter_presets"][preset_id] = {
|
|
"name": preset_name,
|
|
**filters,
|
|
}
|
|
config.save_settings()
|
|
return web.json_response({"message": f"Preset '{preset_name}' gespeichert"})
|
|
|
|
async def delete_filter_preset(request: web.Request) -> web.Response:
|
|
"""DELETE /api/library/filter-presets/{preset_id}"""
|
|
preset_id = request.match_info["preset_id"]
|
|
presets = config.settings.get("library", {}).get("filter_presets", {})
|
|
if preset_id in presets:
|
|
del config.settings["library"]["filter_presets"][preset_id]
|
|
config.save_settings()
|
|
return web.json_response({"message": "Preset geloescht"})
|
|
return web.json_response({"error": "Preset nicht gefunden"}, status=404)
|
|
|
|
async def put_default_view(request: web.Request) -> web.Response:
|
|
"""PUT /api/library/default-view - Standard-Ansicht setzen"""
|
|
try:
|
|
data = await request.json()
|
|
except Exception:
|
|
return web.json_response(
|
|
{"error": "Ungueltiges JSON"}, status=400
|
|
)
|
|
default_view = data.get("default_view", "all")
|
|
if "library" not in config.settings:
|
|
config.settings["library"] = {}
|
|
config.settings["library"]["default_view"] = default_view
|
|
config.save_settings()
|
|
return web.json_response({
|
|
"message": f"Standard-Ansicht auf '{default_view}' gesetzt"
|
|
})
|
|
|
|
# === Server-Logs ===
|
|
|
|
async def get_server_logs(request: web.Request) -> web.Response:
|
|
"""GET /api/logs?lines=100 - Letzte N Zeilen aus server.log"""
|
|
import os
|
|
from collections import deque
|
|
lines_count = int(request.query.get("lines", "100"))
|
|
lines_count = min(lines_count, 1000)
|
|
|
|
log_file = config.log_file_path
|
|
if not os.path.isfile(log_file):
|
|
return web.json_response(
|
|
{"lines": [], "error": "Log-Datei nicht gefunden"}
|
|
)
|
|
|
|
try:
|
|
# deque mit maxlen liest nur die letzten N Zeilen (speicherschonend)
|
|
with open(log_file, "r", encoding="utf-8",
|
|
errors="replace") as f:
|
|
result_lines = deque(f, maxlen=lines_count)
|
|
|
|
return web.json_response({
|
|
"lines": [line.rstrip("\n") for line in result_lines],
|
|
"total": len(result_lines),
|
|
})
|
|
except Exception as e:
|
|
return web.json_response(
|
|
{"error": str(e)}, status=500
|
|
)
|
|
|
|
# === Routes registrieren ===
|
|
# Filter-Presets
|
|
app.router.add_get("/api/library/filter-presets", get_filter_presets)
|
|
app.router.add_put("/api/library/filter-presets", put_filter_presets)
|
|
app.router.add_post("/api/library/filter-presets", post_filter_preset)
|
|
app.router.add_delete(
|
|
"/api/library/filter-presets/{preset_id}", delete_filter_preset
|
|
)
|
|
app.router.add_put("/api/library/default-view", put_default_view)
|
|
# Pfade
|
|
app.router.add_get("/api/library/paths", get_paths)
|
|
app.router.add_post("/api/library/paths", post_path)
|
|
app.router.add_put("/api/library/paths/{path_id}", put_path)
|
|
app.router.add_delete("/api/library/paths/{path_id}", delete_path)
|
|
# Scanning
|
|
app.router.add_post("/api/library/scan", post_scan_all)
|
|
app.router.add_post("/api/library/scan/{path_id}", post_scan_single)
|
|
app.router.add_get("/api/library/scan-status", get_scan_status)
|
|
# Videos / Filme
|
|
app.router.add_get("/api/library/videos", get_videos)
|
|
app.router.add_get("/api/library/movies", get_movies)
|
|
app.router.add_delete(
|
|
"/api/library/videos/{video_id}", delete_video
|
|
)
|
|
# Serien
|
|
app.router.add_get("/api/library/series", get_series)
|
|
app.router.add_get("/api/library/series/{series_id}", get_series_detail)
|
|
app.router.add_delete(
|
|
"/api/library/series/{series_id}", delete_series
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/series/{series_id}/missing", get_missing_episodes
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/missing-episodes", get_all_missing_episodes
|
|
)
|
|
# TVDB
|
|
app.router.add_post(
|
|
"/api/library/series/{series_id}/tvdb-match", post_tvdb_match
|
|
)
|
|
app.router.add_delete(
|
|
"/api/library/series/{series_id}/tvdb", delete_tvdb_link
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/series/{series_id}/tvdb-refresh", post_tvdb_refresh
|
|
)
|
|
app.router.add_get("/api/tvdb/search", get_tvdb_search)
|
|
# TVDB Metadaten
|
|
app.router.add_get(
|
|
"/api/library/series/{series_id}/cast", get_series_cast
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/series/{series_id}/artworks", get_series_artworks
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/series/{series_id}/metadata-download",
|
|
post_metadata_download,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/metadata-download-all", post_metadata_download_all
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/metadata/{series_id}/{filename}", get_metadata_image
|
|
)
|
|
# Filme
|
|
app.router.add_get("/api/library/movies-list", get_movies_list)
|
|
app.router.add_get("/api/library/movies/{movie_id}", get_movie_detail)
|
|
app.router.add_delete("/api/library/movies/{movie_id}", delete_movie)
|
|
app.router.add_post(
|
|
"/api/library/movies/{movie_id}/tvdb-match", post_movie_tvdb_match
|
|
)
|
|
app.router.add_delete(
|
|
"/api/library/movies/{movie_id}/tvdb", delete_movie_tvdb_link
|
|
)
|
|
app.router.add_get("/api/tvdb/search-movies", get_tvdb_movie_search)
|
|
# Browse / Duplikate
|
|
app.router.add_get("/api/library/browse", get_browse)
|
|
app.router.add_get("/api/library/duplicates", get_duplicates)
|
|
# Konvertierung
|
|
app.router.add_post(
|
|
"/api/library/videos/{video_id}/convert", post_convert_video
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/series/{series_id}/convert", post_convert_series
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/series/{series_id}/convert-status",
|
|
get_series_convert_status
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/series/{series_id}/cleanup",
|
|
post_cleanup_series_folder
|
|
)
|
|
app.router.add_post("/api/library/delete-folder", post_delete_folder)
|
|
# Statistiken
|
|
app.router.add_get("/api/library/stats", get_library_stats)
|
|
# Clean
|
|
app.router.add_get("/api/library/clean/scan", get_clean_scan)
|
|
app.router.add_post("/api/library/clean/delete", post_clean_delete)
|
|
app.router.add_post(
|
|
"/api/library/clean/empty-dirs", post_clean_empty_dirs
|
|
)
|
|
# Filesystem-Browser
|
|
app.router.add_get("/api/library/browse-fs", get_browse_fs)
|
|
# Import
|
|
app.router.add_get("/api/library/import", get_import_jobs)
|
|
app.router.add_post("/api/library/import", post_create_import)
|
|
app.router.add_delete(
|
|
"/api/library/import/{job_id}", delete_import_job
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/analyze", post_analyze_import
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/import/{job_id}", get_import_status
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/execute", post_execute_import
|
|
)
|
|
app.router.add_put(
|
|
"/api/library/import/items/{item_id}", put_import_item
|
|
)
|
|
app.router.add_put(
|
|
"/api/library/import/items/{item_id}/resolve", put_resolve_conflict
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/items/{item_id}/reassign",
|
|
post_reassign_import_item,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/items/{item_id}/skip",
|
|
post_skip_import_item,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/reassign-series",
|
|
post_reassign_import_series,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/skip-series",
|
|
post_skip_import_series,
|
|
)
|
|
# Neuer Workflow: Serien-Zuordnung VOR Konflikt-Check
|
|
app.router.add_get(
|
|
"/api/library/import/{job_id}/pending-series",
|
|
get_pending_series,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/assign-series",
|
|
post_assign_series,
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/import/{job_id}/resolve-all-conflicts",
|
|
post_resolve_all_conflicts,
|
|
)
|
|
app.router.add_put(
|
|
"/api/library/import/{job_id}/overwrite-mode",
|
|
put_overwrite_mode,
|
|
)
|
|
# Video-Streaming, Untertitel, Video-Info
|
|
app.router.add_get(
|
|
"/api/library/videos/{video_id}/stream", get_stream_video
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/videos/{video_id}/subtitles/{track_index}",
|
|
get_subtitle_track
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/videos/{video_id}/info", get_video_info
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/videos/{video_id}/thumbnail", get_video_thumbnail
|
|
)
|
|
# Batch-Thumbnails
|
|
app.router.add_post(
|
|
"/api/library/generate-thumbnails", post_generate_thumbnails
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/thumbnail-status", get_thumbnail_status
|
|
)
|
|
# TVDB Auto-Match (Review-Modus)
|
|
app.router.add_post(
|
|
"/api/library/tvdb-auto-match", post_tvdb_auto_match
|
|
)
|
|
app.router.add_get(
|
|
"/api/library/tvdb-auto-match-status", get_tvdb_auto_match_status
|
|
)
|
|
app.router.add_post(
|
|
"/api/library/tvdb-confirm", post_tvdb_confirm
|
|
)
|
|
# TVDB Sprache
|
|
app.router.add_get("/api/tvdb/language", get_tvdb_language)
|
|
app.router.add_put("/api/tvdb/language", put_tvdb_language)
|
|
app.router.add_post(
|
|
"/api/library/tvdb-refresh-episodes",
|
|
post_tvdb_refresh_all_episodes,
|
|
)
|
|
# Server-Logs
|
|
app.router.add_get("/api/logs", get_server_logs)
|