From 37faf87c98e36d6569ca523ffb3accf7c3750986 Mon Sep 17 00:00:00 2001 From: Linrador <68631622+Linrador@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:28:46 +0100 Subject: [PATCH] updated --- backend/assets_generate.go | 207 ++ backend/cors.go | 30 + backend/file_ops_windows.go | 90 + backend/frontend.go | 153 + backend/http_teaser.go | 202 ++ backend/main.go | 5424 ------------------------------ backend/preview_hls.go | 391 +++ backend/preview_jpeg.go | 135 + backend/preview_m3u8_rewrite.go | 134 + backend/preview_status_svg.go | 86 + backend/record_handlers.go | 1611 +++++++++ backend/record_helpers_paths.go | 193 ++ backend/record_start.go | 456 +++ backend/record_stream_cb.go | 338 ++ backend/record_stream_mfc.go | 439 +++ backend/routes.go | 104 + backend/serve_video.go | 218 ++ backend/server.go | 55 + backend/tasks_assets.go | 364 ++ backend/teaser_preview_ffmpeg.go | 422 +++ 20 files changed, 5628 insertions(+), 5424 deletions(-) create mode 100644 backend/assets_generate.go create mode 100644 backend/cors.go create mode 100644 backend/file_ops_windows.go create mode 100644 backend/frontend.go create mode 100644 backend/http_teaser.go create mode 100644 backend/preview_hls.go create mode 100644 backend/preview_jpeg.go create mode 100644 backend/preview_m3u8_rewrite.go create mode 100644 backend/preview_status_svg.go create mode 100644 backend/record_handlers.go create mode 100644 backend/record_helpers_paths.go create mode 100644 backend/record_start.go create mode 100644 backend/record_stream_cb.go create mode 100644 backend/record_stream_mfc.go create mode 100644 backend/routes.go create mode 100644 backend/serve_video.go create mode 100644 backend/server.go create mode 100644 backend/tasks_assets.go create mode 100644 backend/teaser_preview_ffmpeg.go diff --git a/backend/assets_generate.go b/backend/assets_generate.go new file mode 100644 index 0000000..5502b7a --- /dev/null +++ b/backend/assets_generate.go @@ -0,0 +1,207 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" +) + +func formatBytesSI(b int64) string { + if b < 0 { + b = 0 + } + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + suffix := []string{"KB", "MB", "GB", "TB", "PB"} + v := float64(b) / float64(div) + // 1 Nachkommastelle, außer sehr große ganze Zahlen + if v >= 10 { + return fmt.Sprintf("%.0f %s", v, suffix[exp]) + } + return fmt.Sprintf("%.1f %s", v, suffix[exp]) +} + +func u64ToI64(x uint64) int64 { + if x > uint64(maxInt64) { + return maxInt64 + } + return int64(x) +} + +func ensureAssetsForVideo(videoPath string) error { + // Default: keine SourceURL (für Covers egal) + return ensureAssetsForVideoWithProgress(videoPath, "", nil) +} + +// Optional: für Stellen, wo du die URL hast (z.B. Postwork / Jobs) +func ensureAssetsForVideoWithSource(videoPath string, sourceURL string) error { + return ensureAssetsForVideoWithProgress(videoPath, sourceURL, nil) +} + +// onRatio: 0..1 (Assets-Gesamtfortschritt) +func ensureAssetsForVideoWithProgress(videoPath string, sourceURL string, onRatio func(r float64)) error { + videoPath = strings.TrimSpace(videoPath) + if videoPath == "" { + return nil + } + + fi, statErr := os.Stat(videoPath) + if statErr != nil || fi.IsDir() || fi.Size() <= 0 { + return nil + } + + // ✅ ID = Dateiname ohne Endung (immer OHNE "HOT " Prefix) + base := filepath.Base(videoPath) + id := strings.TrimSuffix(base, filepath.Ext(base)) + id = stripHotPrefix(id) + if strings.TrimSpace(id) == "" { + return nil + } + + assetDir, gerr := ensureGeneratedDir(id) + if gerr != nil || strings.TrimSpace(assetDir) == "" { + return fmt.Errorf("generated dir: %v", gerr) + } + + metaPath := filepath.Join(assetDir, "meta.json") + + // ---- Meta / Duration ---- + durSec := 0.0 + if d, ok := readVideoMetaDuration(metaPath, fi); ok { + durSec = d + } else { + dctx, cancel := context.WithTimeout(context.Background(), 6*time.Second) + d, derr := durationSecondsCached(dctx, videoPath) + cancel() + + if derr == nil && d > 0 { + durSec = d + // ✅ Duration-only meta schreiben (inkl. sourceURL) + _ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL) + } + } + + // ✅ Wenn Duration aus Meta kam, aber SourceURL jetzt neu vorhanden ist, + // dann Meta "anreichern" (ohne ffprobe). + if durSec > 0 && strings.TrimSpace(sourceURL) != "" { + if u, ok := readVideoMetaSourceURL(metaPath, fi); !ok || strings.TrimSpace(u) == "" { + _ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL) + } + } + + // Gewichte: thumbs klein, preview groß + const ( + thumbsW = 0.25 + previewW = 0.75 + ) + + progress := func(r float64) { + if onRatio == nil { + return + } + if r < 0 { + r = 0 + } + if r > 1 { + r = 1 + } + onRatio(r) + } + + progress(0) + + // ---------------- + // Thumbs + // ---------------- + thumbPath := filepath.Join(assetDir, "thumbs.jpg") + if tfi, err := os.Stat(thumbPath); err == nil && !tfi.IsDir() && tfi.Size() > 0 { + progress(thumbsW) + } else { + progress(0.05) + + genCtx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + + if err := thumbSem.Acquire(genCtx); err != nil { + // best-effort + progress(thumbsW) + goto PREVIEW + } + defer thumbSem.Release() + + progress(0.10) + + t := 0.0 + if durSec > 0 { + t = durSec * 0.5 + } + + progress(0.15) + + img, e1 := extractFrameAtTimeJPEG(videoPath, t) + if e1 != nil || len(img) == 0 { + img, e1 = extractLastFrameJPEG(videoPath) + if e1 != nil || len(img) == 0 { + img, e1 = extractFirstFrameJPEG(videoPath) + } + } + + progress(0.20) + + if e1 == nil && len(img) > 0 { + if err := atomicWriteFile(thumbPath, img); err != nil { + fmt.Println("⚠️ thumb write:", err) + } + } + + progress(thumbsW) + } + +PREVIEW: + // ---------------- + // Preview + // ---------------- + previewPath := filepath.Join(assetDir, "preview.mp4") + if pfi, err := os.Stat(previewPath); err == nil && !pfi.IsDir() && pfi.Size() > 0 { + progress(1) + return nil + } + + genCtx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + progress(thumbsW + 0.02) + + if err := genSem.Acquire(genCtx); err != nil { + progress(1) + return nil + } + defer genSem.Release() + + progress(thumbsW + 0.05) + + if err := generateTeaserClipsMP4WithProgress(genCtx, videoPath, previewPath, 1.0, 18, func(r float64) { + if r < 0 { + r = 0 + } + if r > 1 { + r = 1 + } + progress(thumbsW + r*previewW) + }); err != nil { + fmt.Println("⚠️ preview clips:", err) + } + + progress(1) + return nil +} diff --git a/backend/cors.go b/backend/cors.go new file mode 100644 index 0000000..bb3485f --- /dev/null +++ b/backend/cors.go @@ -0,0 +1,30 @@ +package main + +import ( + "net/http" + "strings" +) + +func withCORS(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + origin := strings.TrimSpace(r.Header.Get("Origin")) + + // Dev-Origins erlauben + if origin == "http://localhost:5173" || origin == "http://127.0.0.1:5173" { + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + w.Header().Set("Access-Control-Allow-Methods", "GET,POST,DELETE,HEAD,OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Range, Last-Event-ID") + w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges") + // Nur wenn du wirklich Cookies/Authorization cross-origin brauchst: + // w.Header().Set("Access-Control-Allow-Credentials", "true") + } + + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/backend/file_ops_windows.go b/backend/file_ops_windows.go new file mode 100644 index 0000000..d562cf6 --- /dev/null +++ b/backend/file_ops_windows.go @@ -0,0 +1,90 @@ +package main + +import ( + "errors" + "io" + "os" + "runtime" + "strings" + "syscall" + "time" +) + +func moveFile(src, dst string) error { + // zuerst Rename (schnell) + if err := os.Rename(src, dst); err == nil { + return nil + } else { + // Fallback: Copy+Remove (z.B. bei EXDEV) + in, err2 := os.Open(src) + if err2 != nil { + return err + } + defer in.Close() + + out, err2 := os.Create(dst) + if err2 != nil { + return err + } + if _, err2 := io.Copy(out, in); err2 != nil { + out.Close() + return err2 + } + if err2 := out.Close(); err2 != nil { + return err2 + } + return os.Remove(src) + } +} + +const windowsSharingViolation syscall.Errno = 32 // ERROR_SHARING_VIOLATION + +func isSharingViolation(err error) bool { + if runtime.GOOS != "windows" { + return false + } + // Windows: ERROR_SHARING_VIOLATION = 32, ERROR_LOCK_VIOLATION = 33 + var pe *os.PathError + if errors.As(err, &pe) { + if errno, ok := pe.Err.(syscall.Errno); ok { + return errno == syscall.Errno(32) || errno == syscall.Errno(33) + } + } + // Fallback über Text + s := strings.ToLower(err.Error()) + return strings.Contains(s, "sharing violation") || + strings.Contains(s, "used by another process") || + strings.Contains(s, "wird von einem anderen prozess verwendet") +} + +func removeWithRetry(path string) error { + var err error + for i := 0; i < 40; i++ { // ~4s bei 100ms + err = os.Remove(path) + if err == nil { + return nil + } + if isSharingViolation(err) { + time.Sleep(100 * time.Millisecond) + continue + } + return err + } + return err +} + +func renameWithRetry(oldPath, newPath string) error { + var err error + for i := 0; i < 40; i++ { + err = os.Rename(oldPath, newPath) + if err == nil { + return nil + } + if isSharingViolation(err) { + time.Sleep(100 * time.Millisecond) + continue + } + return err + } + return err +} diff --git a/backend/frontend.go b/backend/frontend.go new file mode 100644 index 0000000..c98bdde --- /dev/null +++ b/backend/frontend.go @@ -0,0 +1,153 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "path" + "path/filepath" + "strings" +) + +// Frontend (Vite build) als SPA ausliefern: Dateien aus dist, sonst index.html +func registerFrontend(mux *http.ServeMux) { + // Kandidaten: zuerst ENV, dann typische Ordner + candidates := []string{ + strings.TrimSpace(os.Getenv("FRONTEND_DIST")), + "web/dist", + "dist", + } + + var distAbs string + for _, c := range candidates { + if c == "" { + continue + } + abs, err := resolvePathRelativeToApp(c) + if err != nil { + continue + } + if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() { + distAbs = abs + break + } + } + + if distAbs == "" { + fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, frontend/dist, dist) – API läuft trotzdem.") + return + } + + fmt.Println("🖼️ Frontend dist:", distAbs) + + fileServer := http.FileServer(http.Dir(distAbs)) + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + // /api bleibt bei deinen API-Routen (längeres Pattern gewinnt), + // aber falls mal was durchrutscht: + if strings.HasPrefix(r.URL.Path, "/api/") { + http.NotFound(w, r) + return + } + + // 1) Wenn echte Datei existiert -> ausliefern + reqPath := r.URL.Path + if reqPath == "" || reqPath == "/" { + // index.html + w.Header().Set("Cache-Control", "no-store") + http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) + return + } + + // URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal) + clean := path.Clean("/" + reqPath) // path.Clean (für URL-Slashes) + rel := strings.TrimPrefix(clean, "/") + onDisk := filepath.Join(distAbs, filepath.FromSlash(rel)) + + if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() { + // Statische Assets ruhig cachen (Vite hashed assets) + ext := strings.ToLower(filepath.Ext(onDisk)) + if ext != "" && ext != ".html" { + w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") + } else { + w.Header().Set("Cache-Control", "no-store") + } + fileServer.ServeHTTP(w, r) + return + } + + // 2) SPA-Fallback: alle "Routen" ohne Datei -> index.html + w.Header().Set("Cache-Control", "no-store") + http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) + }) +} + +func makeFrontendHandler() (http.Handler, bool) { + // Kandidaten: zuerst ENV, dann typische Ordner + candidates := []string{ + strings.TrimSpace(os.Getenv("FRONTEND_DIST")), + "web/dist", + "dist", + } + + var distAbs string + for _, c := range candidates { + if c == "" { + continue + } + abs, err := resolvePathRelativeToApp(c) + if err != nil { + continue + } + if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() { + distAbs = abs + break + } + } + + if distAbs == "" { + fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, web/dist, dist) – API läuft trotzdem.") + return nil, false + } + + fmt.Println("🖼️ Frontend dist:", distAbs) + + fileServer := http.FileServer(http.Dir(distAbs)) + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // /api bleibt API + if strings.HasPrefix(r.URL.Path, "/api/") { + http.NotFound(w, r) + return + } + + reqPath := r.URL.Path + if reqPath == "" || reqPath == "/" { + w.Header().Set("Cache-Control", "no-store") + http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) + return + } + + // URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal) + clean := path.Clean("/" + reqPath) + rel := strings.TrimPrefix(clean, "/") + onDisk := filepath.Join(distAbs, filepath.FromSlash(rel)) + + if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() { + ext := strings.ToLower(filepath.Ext(onDisk)) + if ext != "" && ext != ".html" { + w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") + } else { + w.Header().Set("Cache-Control", "no-store") + } + fileServer.ServeHTTP(w, r) + return + } + + // SPA-Fallback + w.Header().Set("Cache-Control", "no-store") + http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) + }) + + return h, true +} diff --git a/backend/http_teaser.go b/backend/http_teaser.go new file mode 100644 index 0000000..72100d3 --- /dev/null +++ b/backend/http_teaser.go @@ -0,0 +1,202 @@ +package main + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +func serveTeaserFile(w http.ResponseWriter, r *http.Request, path string) { + f, err := openForReadShareDelete(path) + if err != nil { + http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil || fi.IsDir() || fi.Size() == 0 { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + + w.Header().Set("Cache-Control", "public, max-age=31536000") + w.Header().Set("Content-Type", "video/mp4") + http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f) +} + +// tolerante Input-Flags für kaputte/abgeschnittene H264/TS Streams +var ffmpegInputTol = []string{ + "-fflags", "+discardcorrupt+genpts", + "-err_detect", "ignore_err", + "-max_error_rate", "1.0", +} + +var coverModelStore *ModelStore + +func setCoverModelStore(s *ModelStore) { + coverModelStore = s + // random seed (einmalig) + rand.Seed(time.Now().UnixNano()) +} + +func generateTeaserMP4(ctx context.Context, srcPath, outPath string, startSec, durSec float64) error { + if durSec <= 0 { + durSec = 8 + } + if startSec < 0 { + startSec = 0 + } + + // temp schreiben -> rename + tmp := outPath + ".tmp.mp4" + + args := []string{ + "-y", + "-hide_banner", + "-loglevel", "error", + } + args = append(args, ffmpegInputTol...) + args = append(args, + "-ss", fmt.Sprintf("%.3f", startSec), + "-i", srcPath, + "-t", fmt.Sprintf("%.3f", durSec), + + // Video + "-vf", "scale=720:-2", + "-map", "0:v:0", + + // Audio (optional: falls kein Audio vorhanden ist, bricht ffmpeg NICHT ab) + "-map", "0:a:0", + "-c:a", "aac", + "-b:a", "128k", + "-ac", "2", + + "-c:v", "libx264", + "-preset", "veryfast", + "-crf", "28", + "-pix_fmt", "yuv420p", + + // Wenn Audio minimal kürzer/länger ist, sauber beenden + "-shortest", + + "-movflags", "+faststart", + "-f", "mp4", + tmp, + ) + + cmd := exec.CommandContext(ctx, ffmpegPath, args...) + if out, err := cmd.CombinedOutput(); err != nil { + _ = os.Remove(tmp) + return fmt.Errorf("ffmpeg teaser failed: %v (%s)", err, strings.TrimSpace(string(out))) + } + + _ = os.Remove(outPath) + return os.Rename(tmp, outPath) +} + +func generatedTeaser(w http.ResponseWriter, r *http.Request) { + id := strings.TrimSpace(r.URL.Query().Get("id")) + if id == "" { + http.Error(w, "id fehlt", http.StatusBadRequest) + return + } + + var err error + id, err = sanitizeID(id) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + outPath, err := findFinishedFileByID(id) + if err != nil { + http.Error(w, "preview nicht verfügbar", http.StatusNotFound) + return + } + + if err := ensureGeneratedDirs(); err != nil { + http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError) + return + } + + assetID := stripHotPrefix(id) + if assetID == "" { + assetID = id + } + + assetDir, err := ensureGeneratedDir(assetID) + if err != nil { + http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError) + return + } + + previewPath := filepath.Join(assetDir, "preview.mp4") + + // ✅ NEU: noGenerate=1 -> niemals on-the-fly erzeugen, nur liefern wenn vorhanden + qNoGen := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("noGenerate"))) + noGen := qNoGen == "1" || qNoGen == "true" || qNoGen == "yes" + + // Cache hit (neu) + if fi, err := os.Stat(previewPath); err == nil && !fi.IsDir() && fi.Size() > 0 { + serveTeaserFile(w, r, previewPath) + return + } + + // Legacy: generated/teaser/_teaser.mp4 oder .mp4 + if teaserLegacy, _ := generatedTeaserRoot(); strings.TrimSpace(teaserLegacy) != "" { + cids := []string{assetID, id} + for _, cid := range cids { + candidates := []string{ + filepath.Join(teaserLegacy, cid+"_teaser.mp4"), + filepath.Join(teaserLegacy, cid+".mp4"), + } + for _, c := range candidates { + if fi, err := os.Stat(c); err == nil && !fi.IsDir() && fi.Size() > 0 { + if _, err2 := os.Stat(previewPath); os.IsNotExist(err2) { + _ = os.MkdirAll(filepath.Dir(previewPath), 0o755) + _ = os.Rename(c, previewPath) + } + if fi2, err2 := os.Stat(previewPath); err2 == nil && !fi2.IsDir() && fi2.Size() > 0 { + serveTeaserFile(w, r, previewPath) + return + } + serveTeaserFile(w, r, c) + return + } + } + } + } + + // ✅ NEU: wenn noGenerate aktiv und bisher kein Teaser gefunden -> 404 + if noGen { + http.Error(w, "preview nicht verfügbar", http.StatusNotFound) + return + } + + // Neu erzeugen + if err := genSem.Acquire(r.Context()); err != nil { + http.Error(w, "abgebrochen: "+err.Error(), http.StatusRequestTimeout) + return + } + defer genSem.Release() + + genCtx, cancel := context.WithTimeout(r.Context(), 3*time.Minute) + defer cancel() + + if err := generateTeaserClipsMP4(genCtx, outPath, previewPath, 1.0, 18); err != nil { + // Fallback: einzelner kurzer Teaser ab Anfang (trifft seltener kaputte Stellen) + if err2 := generateTeaserMP4(genCtx, outPath, previewPath, 0, 8); err2 != nil { + http.Error(w, "konnte preview nicht erzeugen: "+err.Error()+" (fallback ebenfalls fehlgeschlagen: "+err2.Error()+")", http.StatusInternalServerError) + return + } + } + + serveTeaserFile(w, r, previewPath) +} diff --git a/backend/main.go b/backend/main.go index 117f39d..98db1cd 100644 --- a/backend/main.go +++ b/backend/main.go @@ -7,13 +7,11 @@ import ( "bytes" "context" "crypto/sha1" - "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" "errors" "fmt" - "html" "image" "image/color" "image/draw" @@ -36,11 +34,8 @@ import ( "strings" "sync" "sync/atomic" - "syscall" "time" - "github.com/PuerkitoBio/goquery" - "github.com/google/uuid" "github.com/grafov/m3u8" gocpu "github.com/shirou/gopsutil/v3/cpu" godisk "github.com/shirou/gopsutil/v3/disk" @@ -4454,3661 +4449,8 @@ func servePreviewForFinishedFile(w http.ResponseWriter, r *http.Request, id stri servePreviewJPEGBytes(w, img) } -func serveTeaserFile(w http.ResponseWriter, r *http.Request, path string) { - f, err := openForReadShareDelete(path) - if err != nil { - http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil || fi.IsDir() || fi.Size() == 0 { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - - w.Header().Set("Cache-Control", "public, max-age=31536000") - w.Header().Set("Content-Type", "video/mp4") - http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f) -} - -// tolerante Input-Flags für kaputte/abgeschnittene H264/TS Streams -var ffmpegInputTol = []string{ - "-fflags", "+discardcorrupt+genpts", - "-err_detect", "ignore_err", - "-max_error_rate", "1.0", -} - -var coverModelStore *ModelStore - -func setCoverModelStore(s *ModelStore) { - coverModelStore = s - // random seed (einmalig) - rand.Seed(time.Now().UnixNano()) -} - -func generateTeaserMP4(ctx context.Context, srcPath, outPath string, startSec, durSec float64) error { - if durSec <= 0 { - durSec = 8 - } - if startSec < 0 { - startSec = 0 - } - - // temp schreiben -> rename - tmp := outPath + ".tmp.mp4" - - args := []string{ - "-y", - "-hide_banner", - "-loglevel", "error", - } - args = append(args, ffmpegInputTol...) - args = append(args, - "-ss", fmt.Sprintf("%.3f", startSec), - "-i", srcPath, - "-t", fmt.Sprintf("%.3f", durSec), - - // Video - "-vf", "scale=720:-2", - "-map", "0:v:0", - - // Audio (optional: falls kein Audio vorhanden ist, bricht ffmpeg NICHT ab) - "-map", "0:a:0", - "-c:a", "aac", - "-b:a", "128k", - "-ac", "2", - - "-c:v", "libx264", - "-preset", "veryfast", - "-crf", "28", - "-pix_fmt", "yuv420p", - - // Wenn Audio minimal kürzer/länger ist, sauber beenden - "-shortest", - - "-movflags", "+faststart", - "-f", "mp4", - tmp, - ) - - cmd := exec.CommandContext(ctx, ffmpegPath, args...) - if out, err := cmd.CombinedOutput(); err != nil { - _ = os.Remove(tmp) - return fmt.Errorf("ffmpeg teaser failed: %v (%s)", err, strings.TrimSpace(string(out))) - } - - _ = os.Remove(outPath) - return os.Rename(tmp, outPath) -} - -func generatedTeaser(w http.ResponseWriter, r *http.Request) { - id := strings.TrimSpace(r.URL.Query().Get("id")) - if id == "" { - http.Error(w, "id fehlt", http.StatusBadRequest) - return - } - - var err error - id, err = sanitizeID(id) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - outPath, err := findFinishedFileByID(id) - if err != nil { - http.Error(w, "preview nicht verfügbar", http.StatusNotFound) - return - } - - if err := ensureGeneratedDirs(); err != nil { - http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError) - return - } - - assetID := stripHotPrefix(id) - if assetID == "" { - assetID = id - } - - assetDir, err := ensureGeneratedDir(assetID) - if err != nil { - http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError) - return - } - - previewPath := filepath.Join(assetDir, "preview.mp4") - - // ✅ NEU: noGenerate=1 -> niemals on-the-fly erzeugen, nur liefern wenn vorhanden - qNoGen := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("noGenerate"))) - noGen := qNoGen == "1" || qNoGen == "true" || qNoGen == "yes" - - // Cache hit (neu) - if fi, err := os.Stat(previewPath); err == nil && !fi.IsDir() && fi.Size() > 0 { - serveTeaserFile(w, r, previewPath) - return - } - - // Legacy: generated/teaser/_teaser.mp4 oder .mp4 - if teaserLegacy, _ := generatedTeaserRoot(); strings.TrimSpace(teaserLegacy) != "" { - cids := []string{assetID, id} - for _, cid := range cids { - candidates := []string{ - filepath.Join(teaserLegacy, cid+"_teaser.mp4"), - filepath.Join(teaserLegacy, cid+".mp4"), - } - for _, c := range candidates { - if fi, err := os.Stat(c); err == nil && !fi.IsDir() && fi.Size() > 0 { - if _, err2 := os.Stat(previewPath); os.IsNotExist(err2) { - _ = os.MkdirAll(filepath.Dir(previewPath), 0o755) - _ = os.Rename(c, previewPath) - } - if fi2, err2 := os.Stat(previewPath); err2 == nil && !fi2.IsDir() && fi2.Size() > 0 { - serveTeaserFile(w, r, previewPath) - return - } - serveTeaserFile(w, r, c) - return - } - } - } - } - - // ✅ NEU: wenn noGenerate aktiv und bisher kein Teaser gefunden -> 404 - if noGen { - http.Error(w, "preview nicht verfügbar", http.StatusNotFound) - return - } - - // Neu erzeugen - if err := genSem.Acquire(r.Context()); err != nil { - http.Error(w, "abgebrochen: "+err.Error(), http.StatusRequestTimeout) - return - } - defer genSem.Release() - - genCtx, cancel := context.WithTimeout(r.Context(), 3*time.Minute) - defer cancel() - - if err := generateTeaserClipsMP4(genCtx, outPath, previewPath, 1.0, 18); err != nil { - // Fallback: einzelner kurzer Teaser ab Anfang (trifft seltener kaputte Stellen) - if err2 := generateTeaserMP4(genCtx, outPath, previewPath, 0, 8); err2 != nil { - http.Error(w, "konnte preview nicht erzeugen: "+err.Error()+" (fallback ebenfalls fehlgeschlagen: "+err2.Error()+")", http.StatusInternalServerError) - return - } - } - - serveTeaserFile(w, r, previewPath) -} - -// --------------------------- -// Tasks: Missing Assets erzeugen -// --------------------------- - -type AssetsTaskState struct { - Running bool `json:"running"` - Total int `json:"total"` - Done int `json:"done"` - GeneratedThumbs int `json:"generatedThumbs"` - GeneratedPreviews int `json:"generatedPreviews"` - Skipped int `json:"skipped"` - StartedAt time.Time `json:"startedAt"` - FinishedAt *time.Time `json:"finishedAt,omitempty"` - Error string `json:"error,omitempty"` -} - -var assetsTaskMu sync.Mutex -var assetsTaskState AssetsTaskState -var assetsTaskCancel context.CancelFunc - -func tasksGenerateAssets(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case http.MethodGet: - assetsTaskMu.Lock() - st := assetsTaskState - assetsTaskMu.Unlock() - writeJSON(w, http.StatusOK, st) - return - - case http.MethodPost: - assetsTaskMu.Lock() - if assetsTaskState.Running { - st := assetsTaskState - assetsTaskMu.Unlock() - writeJSON(w, http.StatusOK, st) - return - } - - // ✅ cancelbaren Context erzeugen - ctx, cancel := context.WithCancel(context.Background()) - assetsTaskCancel = cancel - - assetsTaskState = AssetsTaskState{ - Running: true, - StartedAt: time.Now(), - } - st := assetsTaskState - assetsTaskMu.Unlock() - - go runGenerateMissingAssets(ctx) - - writeJSON(w, http.StatusOK, st) - return - - case http.MethodDelete: - assetsTaskMu.Lock() - cancel := assetsTaskCancel - running := assetsTaskState.Running - assetsTaskMu.Unlock() - - if !running || cancel == nil { - // nichts zu stoppen - w.WriteHeader(http.StatusNoContent) - return - } - - cancel() - - // optional: sofortiges Feedback in state.error - assetsTaskMu.Lock() - if assetsTaskState.Running { - assetsTaskState.Error = "abgebrochen" - } - st := assetsTaskState - assetsTaskMu.Unlock() - - writeJSON(w, http.StatusOK, st) - return - - default: - http.Error(w, "Nur GET/POST", http.StatusMethodNotAllowed) - return - } -} - -func runGenerateMissingAssets(ctx context.Context) { - finishWithErr := func(err error) { - now := time.Now() - assetsTaskMu.Lock() - assetsTaskState.Running = false - assetsTaskState.FinishedAt = &now - if err != nil { - assetsTaskState.Error = err.Error() - } - assetsTaskMu.Unlock() - } - - defer func() { - assetsTaskMu.Lock() - assetsTaskCancel = nil - assetsTaskMu.Unlock() - }() - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil || strings.TrimSpace(doneAbs) == "" { - finishWithErr(fmt.Errorf("doneDir auflösung fehlgeschlagen: %v", err)) - return - } - - type item struct { - name string - path string - } - - // .trash niemals verarbeiten - isTrashPath := func(full string) bool { - p := strings.ToLower(strings.ReplaceAll(full, "\\", "/")) - return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") - } - - seen := map[string]struct{}{} - items := make([]item, 0, 512) - - addIfVideo := func(full string) { - if isTrashPath(full) { - return - } - - name := filepath.Base(full) - low := strings.ToLower(name) - if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") { - return - } - ext := strings.ToLower(filepath.Ext(name)) - if ext != ".mp4" && ext != ".ts" { - return - } - - // Dedupe - if _, ok := seen[full]; ok { - return - } - seen[full] = struct{}{} - items = append(items, item{name: name, path: full}) - } - - scanOneLevel := func(dir string) { - ents, err := os.ReadDir(dir) - if err != nil { - return - } - for _, e := range ents { - // .trash-Ordner nie scannen - if e.IsDir() && strings.EqualFold(e.Name(), ".trash") { - continue - } - - full := filepath.Join(dir, e.Name()) - if e.IsDir() { - sub, err := os.ReadDir(full) - if err != nil { - continue - } - for _, se := range sub { - if se.IsDir() { - continue - } - addIfVideo(filepath.Join(full, se.Name())) - } - continue - } - addIfVideo(full) - } - } - - // ✅ done + done// + done/keep + done/keep// - scanOneLevel(doneAbs) - scanOneLevel(filepath.Join(doneAbs, "keep")) - - assetsTaskMu.Lock() - assetsTaskState.Total = len(items) - assetsTaskState.Done = 0 - assetsTaskState.GeneratedThumbs = 0 - assetsTaskState.GeneratedPreviews = 0 - assetsTaskState.Skipped = 0 - assetsTaskState.Error = "" - assetsTaskMu.Unlock() - - for i, it := range items { - if err := ctx.Err(); err != nil { - finishWithErr(err) - return - } - - base := strings.TrimSuffix(it.name, filepath.Ext(it.name)) - id := stripHotPrefix(base) - if strings.TrimSpace(id) == "" { - assetsTaskMu.Lock() - assetsTaskState.Done = i + 1 - assetsTaskMu.Unlock() - continue - } - - assetDir, derr := ensureGeneratedDir(id) - if derr != nil { - assetsTaskMu.Lock() - assetsTaskState.Error = "mindestens ein Eintrag konnte nicht verarbeitet werden (siehe Logs)" - assetsTaskState.Done = i + 1 - assetsTaskMu.Unlock() - fmt.Println("⚠️ ensureGeneratedDir:", derr) - continue - } - - thumbPath := filepath.Join(assetDir, "thumbs.jpg") - previewPath := filepath.Join(assetDir, "preview.mp4") - metaPath := filepath.Join(assetDir, "meta.json") - - thumbOK := func() bool { - fi, err := os.Stat(thumbPath) - return err == nil && !fi.IsDir() && fi.Size() > 0 - }() - previewOK := func() bool { - fi, err := os.Stat(previewPath) - return err == nil && !fi.IsDir() && fi.Size() > 0 - }() - - // Datei-Info (für Meta-Validierung) - vfi, verr := os.Stat(it.path) - if verr != nil || vfi.IsDir() || vfi.Size() <= 0 { - assetsTaskMu.Lock() - assetsTaskState.Done = i + 1 - assetsTaskMu.Unlock() - continue - } - - // ✅ SourceURL best-effort: aus bestehender meta.json, wenn vorhanden/valide - sourceURL := "" - if u, ok := readVideoMetaSourceURL(metaPath, vfi); ok { - sourceURL = u - } - - // ✅ Dauer zuerst aus meta.json, sonst 1× ffprobe & meta.json schreiben - durSec := 0.0 - metaOK := false - - if d, ok := readVideoMetaDuration(metaPath, vfi); ok { - durSec = d - metaOK = true - - // meta ist valide (Duration ok), aber falls wir (irgendwoher) eine SourceURL hätten - // und sie in meta noch fehlt -> meta anreichern ohne ffprobe. - if strings.TrimSpace(sourceURL) != "" { - if u, ok := readVideoMetaSourceURL(metaPath, vfi); !ok || strings.TrimSpace(u) == "" { - _ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL) - } - } - } else { - dctx, cancel := context.WithTimeout(ctx, 6*time.Second) - d, derr := durationSecondsCached(dctx, it.path) - cancel() - - if derr == nil && d > 0 { - durSec = d - // ✅ HIER: nicht writeVideoMeta(metaPath, fi, dur, sourceURL) !! - // sondern Duration-only writer nutzen - _ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL) - metaOK = true - } - } - - if thumbOK && previewOK && metaOK { - assetsTaskMu.Lock() - assetsTaskState.Skipped++ - assetsTaskState.Done = i + 1 - assetsTaskMu.Unlock() - continue - } - - // ---------------- - // Thumbs - // ---------------- - if !thumbOK { - genCtx, cancel := context.WithTimeout(ctx, 45*time.Second) - if err := thumbSem.Acquire(genCtx); err != nil { - cancel() - finishWithErr(err) - return - } - cancel() // Timeout-Context freigeben, Semaphore bleibt gehalten - defer thumbSem.Release() - - t := 0.0 - if durSec > 0 { - t = durSec * 0.5 - } - - img, e1 := extractFrameAtTimeJPEG(it.path, t) - if e1 != nil || len(img) == 0 { - img, e1 = extractLastFrameJPEG(it.path) - if e1 != nil || len(img) == 0 { - img, e1 = extractFirstFrameJPEG(it.path) - } - } - - // Release wurde defer’t, aber wir wollen pro Iteration releasen: - thumbSem.Release() - - if e1 == nil && len(img) > 0 { - if err := atomicWriteFile(thumbPath, img); err == nil { - assetsTaskMu.Lock() - assetsTaskState.GeneratedThumbs++ - assetsTaskMu.Unlock() - } else { - fmt.Println("⚠️ thumb write:", err) - } - } - } - - // ---------------- - // Preview - // ---------------- - if !previewOK { - genCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) - if err := genSem.Acquire(genCtx); err != nil { - cancel() - finishWithErr(err) - return - } - - err := generateTeaserClipsMP4(genCtx, it.path, previewPath, 1.0, 18) - - genSem.Release() - cancel() - - if err == nil { - assetsTaskMu.Lock() - assetsTaskState.GeneratedPreviews++ - assetsTaskMu.Unlock() - } else { - fmt.Println("⚠️ preview clips:", err) - } - } - - assetsTaskMu.Lock() - assetsTaskState.Done = i + 1 - assetsTaskMu.Unlock() - } - - finishWithErr(nil) -} - -// --- Teaser Preview Options + Helpers --- - -// Minimale Segmentdauer, damit ffmpeg nicht mit zu kurzen Schnipseln zickt. -const minSegmentDuration = 0.50 // Sekunden - -type TeaserPreviewOptions struct { - Segments int - SegmentDuration float64 - - Width int - Preset string - CRF int - - // wird von uns "hart" auf true gesetzt (Audio ist NICHT optional) - Audio bool - AudioBitrate string - - UseVsync2 bool -} - -// stepSizeAndOffset verteilt die Startpunkte über das Video. -// Rückgabe: stepSize, offset (beide in Sekunden). -func (o TeaserPreviewOptions) stepSizeAndOffset(dur float64) (float64, float64) { - if dur <= 0 { - return 0, 0 - } - - n := o.Segments - if n < 1 { - n = 1 - } - - segDur := o.SegmentDuration - if segDur <= 0 { - segDur = 1 - } - if segDur < minSegmentDuration { - segDur = minSegmentDuration - } - - // letzter sinnvoller Start (kleiner Sicherheitsabstand) - maxStart := dur - 0.05 - segDur - if maxStart < 0 { - maxStart = 0 - } - - // 1 Segment -> Mitte - if n == 1 { - return 0, maxStart * 0.5 - } - - // kleine Ränder, damit nicht immer ganz am Anfang/Ende - margin := 0.05 * maxStart - if margin < 0 { - margin = 0 - } - span := maxStart - 2*margin - if span < 0 { - span = maxStart - margin = 0 - } - - step := 0.0 - if n > 1 { - step = span / float64(n-1) - } - return step, margin -} - -func generateTeaserClipsMP4(ctx context.Context, srcPath, outPath string, clipLenSec float64, maxClips int) error { - return generateTeaserClipsMP4WithProgress(ctx, srcPath, outPath, clipLenSec, maxClips, nil) -} - -func generateTeaserClipsMP4WithProgress( - ctx context.Context, - srcPath, outPath string, - clipLenSec float64, - maxClips int, - onRatio func(r float64), -) error { - // kompatible Defaults aus deiner Signatur -> Options - opts := TeaserPreviewOptions{ - Segments: maxClips, - SegmentDuration: clipLenSec, - - // stash-like Defaults - Width: 640, - Preset: "veryfast", - CRF: 21, - Audio: true, - AudioBitrate: "128k", - UseVsync2: false, - } - return generateTeaserPreviewMP4WithProgress(ctx, srcPath, outPath, opts, onRatio) -} - -func generateTeaserChunkMP4(ctx context.Context, src, out string, start, dur float64, opts TeaserPreviewOptions) error { - - // ✅ Audio ist Pflicht (nicht optional) - opts.Audio = true - - tmp := strings.TrimSuffix(out, ".mp4") + ".part.mp4" - segDur := dur - if segDur < minSegmentDuration { - segDur = minSegmentDuration - } - - args := []string{ - "-y", "-hide_banner", "-loglevel", "error", - } - args = append(args, ffmpegInputTol...) - args = append(args, - "-ss", fmt.Sprintf("%.3f", start), - "-t", fmt.Sprintf("%.3f", segDur), - "-i", src, - "-map", "0:v:0", - "-c:v", "libx264", - "-pix_fmt", "yuv420p", - "-profile:v", "high", - "-level", "4.2", - "-preset", opts.Preset, - "-crf", strconv.Itoa(opts.CRF), - "-threads", "4", - ) - - if opts.UseVsync2 { - args = append(args, "-vsync", "2") - } - - if opts.Audio { - args = append(args, - "-map", "0:a:0", // Audio Pflicht - "-c:a", "aac", - "-b:a", opts.AudioBitrate, - "-ac", "2", - "-shortest", - ) - } else { - args = append(args, "-an") - } - - args = append(args, "-movflags", "+faststart", tmp) - - cmd := exec.CommandContext(ctx, ffmpegPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - _ = os.Remove(tmp) - return fmt.Errorf("ffmpeg teaser chunk failed: %v (%s)", err, strings.TrimSpace(stderr.String())) - } - _ = os.Remove(out) - return os.Rename(tmp, out) -} - -func generateTeaserPreviewMP4WithProgress( - ctx context.Context, - srcPath, outPath string, - opts TeaserPreviewOptions, - onRatio func(r float64), -) error { - - // ✅ Audio ist Pflicht (nicht optional) - opts.Audio = true - - // Defaults - if opts.SegmentDuration <= 0 { - opts.SegmentDuration = 1 - } - if opts.Segments <= 0 { - opts.Segments = 18 - } - if opts.Width <= 0 { - opts.Width = 640 - } - if opts.Preset == "" { - opts.Preset = "veryfast" - } - if opts.CRF <= 0 { - opts.CRF = 21 - } - if opts.AudioBitrate == "" { - opts.AudioBitrate = "128k" - } - segDur := opts.SegmentDuration - if segDur < minSegmentDuration { - segDur = minSegmentDuration - } - - // Dauer holen (einmalig; wird gecached) - dur, _ := durationSecondsCached(ctx, srcPath) - - // Kurzvideo-Fallback wie "die andere": - // Wenn Video kürzer als Segments*SegmentDuration -> Single Preview über komplette Dauer - if dur > 0 && dur < segDur*float64(opts.Segments) { - // als 1 Segment behandeln, Duration = dur - opts.Segments = 1 - segDur = dur - } - - // Wenn Dauer unbekannt/zu klein: ab 0 ein Stück - if !(dur > 0) { - if onRatio != nil { - onRatio(0) - } - // hier könntest du auch segDur verwenden; ich nehme min(8, segDur) ähnlich wie vorher - err := generateTeaserChunkMP4(ctx, srcPath, outPath, 0, math.Min(8, segDur), opts) - if onRatio != nil { - onRatio(1) - } - return err - } - - // Startpunkte wie "die andere": offset + i*stepSize - stepSize, offset := opts.stepSizeAndOffset(dur) - - starts := make([]float64, 0, opts.Segments) - for i := 0; i < opts.Segments; i++ { - t := offset + float64(i)*stepSize - - // clamp: sicherstellen, dass wir nicht über Ende hinaus trimmen - maxStart := math.Max(0, dur-0.05-segDur) - if t < 0 { - t = 0 - } - if t > maxStart { - t = maxStart - } - if t < 0.05 { - t = 0.05 - } - starts = append(starts, t) - } - - expectedOutSec := float64(len(starts)) * segDur - tmp := strings.TrimSuffix(outPath, ".mp4") + ".part.mp4" - - args := []string{ - "-y", - "-nostats", - "-progress", "pipe:1", - "-hide_banner", - "-loglevel", "error", - } - - // Inputs: pro Segment eigener -ss/-t/-i (wie bei dir) - for _, t := range starts { - args = append(args, ffmpegInputTol...) - args = append(args, - "-ss", fmt.Sprintf("%.3f", t), - "-t", fmt.Sprintf("%.3f", segDur), - "-i", srcPath, - ) - } - - // filter_complex bauen - var fc strings.Builder - for i := range starts { - // stash-like: ScaleWidth(640), pix_fmt yuv420p, profile high/level 4.2 später in output args - fmt.Fprintf(&fc, - "[%d:v]scale=%d:-2,setsar=1,setpts=PTS-STARTPTS[v%d];", - i, opts.Width, i, - ) - - if opts.Audio { - // dein “concat-safe” Audio normalisieren (gute Idee) - fmt.Fprintf(&fc, - "[%d:a]aresample=48000,aformat=channel_layouts=stereo,asetpts=PTS-STARTPTS[a%d];", - i, i, - ) - } - } - - // interleaved concat inputs - for i := range starts { - if opts.Audio { - fmt.Fprintf(&fc, "[v%d][a%d]", i, i) - } else { - fmt.Fprintf(&fc, "[v%d]", i) - } - } - - if opts.Audio { - fmt.Fprintf(&fc, "concat=n=%d:v=1:a=1[v][a]", len(starts)) - } else { - fmt.Fprintf(&fc, "concat=n=%d:v=1:a=0[v]", len(starts)) - } - - args = append(args, "-filter_complex", fc.String()) - - // map outputs - args = append(args, "-map", "[v]") - if opts.Audio { - args = append(args, "-map", "[a]") - } - - // Video encode (stash-like) - args = append(args, - "-c:v", "libx264", - "-pix_fmt", "yuv420p", - "-profile:v", "high", - "-level", "4.2", - "-preset", opts.Preset, - "-crf", strconv.Itoa(opts.CRF), - "-threads", "4", - ) - - if opts.UseVsync2 { - args = append(args, "-vsync", "2") - } - - // Audio encode optional (stash-like 128k), plus dein -ac 2 - if opts.Audio { - args = append(args, - "-c:a", "aac", - "-b:a", opts.AudioBitrate, - "-ac", "2", - "-shortest", - ) - } - - args = append(args, "-movflags", "+faststart", tmp) - - cmd := exec.CommandContext(ctx, ffmpegPath, args...) - - stdout, err := cmd.StdoutPipe() - if err != nil { - return err - } - - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Start(); err != nil { - return err - } - - sc := bufio.NewScanner(stdout) - sc.Buffer(make([]byte, 0, 64*1024), 1024*1024) - - var lastSent float64 - var lastAt time.Time - - send := func(outSec float64, force bool) { - if onRatio == nil { - return - } - if expectedOutSec > 0 && outSec > 0 { - r := outSec / expectedOutSec - if r < 0 { - r = 0 - } - if r > 1 { - r = 1 - } - if r-lastSent < 0.01 && !force { - return - } - if !lastAt.IsZero() && time.Since(lastAt) < 150*time.Millisecond && !force { - return - } - lastSent = r - lastAt = time.Now() - onRatio(r) - return - } - if force { - onRatio(1) - } - } - - var outSec float64 - - for sc.Scan() { - line := strings.TrimSpace(sc.Text()) - if line == "" { - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - continue - } - k, v := parts[0], parts[1] - - switch k { - case "out_time_ms": - if n, perr := strconv.ParseInt(strings.TrimSpace(v), 10, 64); perr == nil && n > 0 { - outSec = float64(n) / 1_000_000.0 - send(outSec, false) - } - case "out_time": - if s := parseFFmpegOutTime(v); s > 0 { - outSec = s - send(outSec, false) - } - case "progress": - if strings.TrimSpace(v) == "end" { - send(outSec, true) - } - } - } - - if err := cmd.Wait(); err != nil { - _ = os.Remove(tmp) - return fmt.Errorf("ffmpeg teaser preview failed: %v (%s)", err, strings.TrimSpace(stderr.String())) - } - - _ = os.Remove(outPath) - return os.Rename(tmp, outPath) -} - -func prunePreviewCacheDir(previewDir string, maxFrames int, maxAge time.Duration) { - entries, err := os.ReadDir(previewDir) - if err != nil { - return - } - - type frame struct { - path string - mt time.Time - } - - now := time.Now() - - var frames []frame - - for _, e := range entries { - name := e.Name() - path := filepath.Join(previewDir, name) - - // .part Dateien immer weg - if strings.HasSuffix(name, ".part") { - _ = os.Remove(path) - continue - } - - // optional: preview.jpg neu erzeugen lassen, wenn uralt - if name == "preview.jpg" { - if info, err := e.Info(); err == nil { - if maxAge > 0 && now.Sub(info.ModTime()) > maxAge { - _ = os.Remove(path) - } - } - continue - } - - // Nur t_*.jpg verwalten - if strings.HasPrefix(name, "t_") && strings.HasSuffix(name, ".jpg") { - info, err := e.Info() - if err != nil { - continue - } - - // alte Frames löschen - if maxAge > 0 && now.Sub(info.ModTime()) > maxAge { - _ = os.Remove(path) - continue - } - - frames = append(frames, frame{path: path, mt: info.ModTime()}) - } - } - - // Anzahl begrenzen: älteste zuerst löschen - if maxFrames > 0 && len(frames) > maxFrames { - sort.Slice(frames, func(i, j int) bool { return frames[i].mt.Before(frames[j].mt) }) - toDelete := len(frames) - maxFrames - for i := 0; i < toDelete; i++ { - _ = os.Remove(frames[i].path) - } - } -} - -func servePreviewJPEGBytes(w http.ResponseWriter, img []byte) { - w.Header().Set("Content-Type", "image/jpeg") - w.Header().Set("Cache-Control", "public, max-age=31536000") - w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(img) -} - -func servePreviewJPEGBytesNoStore(w http.ResponseWriter, img []byte) { - w.Header().Set("Content-Type", "image/jpeg") - w.Header().Set("Cache-Control", "no-store, max-age=0") - w.Header().Set("Pragma", "no-cache") - w.Header().Set("Expires", "0") - w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(img) -} - -func serveLivePreviewJPEGBytes(w http.ResponseWriter, img []byte) { - w.Header().Set("Content-Type", "image/jpeg") - w.Header().Set("Cache-Control", "no-store, max-age=0, must-revalidate") - w.Header().Set("Pragma", "no-cache") - w.Header().Set("Expires", "0") - w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(img) -} - -func servePreviewJPEGFile(w http.ResponseWriter, r *http.Request, path string) { - w.Header().Set("Content-Type", "image/jpeg") - w.Header().Set("Cache-Control", "public, max-age=31536000") - w.Header().Set("X-Content-Type-Options", "nosniff") - http.ServeFile(w, r, path) -} - -func recordList(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) - return - } - - jobsMu.Lock() - list := make([]*RecordJob, 0, len(jobs)) - for _, j := range jobs { - // ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht - if j == nil || j.Hidden { - continue - } - list = append(list, j) - } - jobsMu.Unlock() - - // optional: neueste zuerst - sort.Slice(list, func(i, j int) bool { - return list[i].StartedAt.After(list[j].StartedAt) - }) - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(list) -} - -var previewFileRe = regexp.MustCompile(`^(index(_hq)?\.m3u8|seg_(low|hq)_\d+\.ts|seg_\d+\.ts)$`) - -func serveEmptyLiveM3U8(w http.ResponseWriter, r *http.Request) { - // Für Player: gültige Playlist statt 204 liefern - w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8") - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("X-Content-Type-Options", "nosniff") - // Optional: Player/Proxy darf schnell retryen - w.Header().Set("Retry-After", "1") - - // Bei HEAD nur Header schicken - if r.Method == http.MethodHead { - w.WriteHeader(http.StatusOK) - return - } - - // Minimal gültige LIVE-Playlist (keine Segmente, kein ENDLIST) - // Viele Player bleiben damit im "loading", statt hart zu failen. - body := "#EXTM3U\n" + - "#EXT-X-VERSION:3\n" + - "#EXT-X-TARGETDURATION:2\n" + - "#EXT-X-MEDIA-SEQUENCE:0\n" - - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(body)) -} - -func stopPreview(job *RecordJob) { - jobsMu.Lock() - cmd := job.previewCmd - cancel := job.previewCancel - job.previewCmd = nil - job.previewCancel = nil - job.LiveThumbStarted = false - job.PreviewDir = "" - jobsMu.Unlock() - - if cancel != nil { - cancel() - } - if cmd != nil && cmd.Process != nil { - _ = cmd.Process.Kill() - } -} - -func servePreviewHLSFile(w http.ResponseWriter, r *http.Request, id, file string) { - file = strings.TrimSpace(file) - if file == "" || filepath.Base(file) != file || !previewFileRe.MatchString(file) { - http.Error(w, "ungültige file", http.StatusBadRequest) - return - } - - isIndex := file == "index.m3u8" || file == "index_hq.m3u8" - - jobsMu.Lock() - job, ok := jobs[id] - state := "" - if ok && job != nil { - state = strings.TrimSpace(job.PreviewState) - } - jobsMu.Unlock() - - // ========================= - // ✅ HEAD = nur Existenzcheck (kein hover nötig, kein Preview-Start) - // ========================= - if r.Method == http.MethodHead { - if !ok || job == nil { - w.WriteHeader(http.StatusNotFound) - return - } - if state == "private" { - w.WriteHeader(http.StatusForbidden) - return - } - if state == "offline" { - w.WriteHeader(http.StatusNotFound) - return - } - previewDir := strings.TrimSpace(job.PreviewDir) - if previewDir == "" { - w.WriteHeader(http.StatusNotFound) - return - } - p := filepath.Join(previewDir, file) - if st, err := os.Stat(p); err == nil && !st.IsDir() { - w.Header().Set("Cache-Control", "no-store") - w.WriteHeader(http.StatusOK) - return - } - w.WriteHeader(http.StatusNotFound) - return - } - - // ========================= - // ✅ NEU: Player darf Preview auch ohne Hover starten - // - Frontend hängt &play=1 an (empfohlen) - // - Wir akzeptieren zusätzlich: play=1 => treat as active - // ========================= - active := isHover(r) || strings.TrimSpace(r.URL.Query().Get("play")) == "1" - - if !active { - // Kein Hover/Play => niemals Live-HLS abgreifen - if isIndex { - serveEmptyLiveM3U8(w, r) - return - } - http.Error(w, "preview not active", http.StatusNotFound) - return - } - - // active => wenn Job unbekannt, sauber raus - if !ok || job == nil { - if isIndex { - serveEmptyLiveM3U8(w, r) - return - } - http.Error(w, "job nicht gefunden", http.StatusNotFound) - return - } - - // active => Preview starten/keepalive - ensurePreviewStarted(r, job) - touchPreview(job) - - // state ggf. nach Start nochmal lesen - jobsMu.Lock() - state = strings.TrimSpace(job.PreviewState) - jobsMu.Unlock() - - if state == "private" { - http.Error(w, "model private", http.StatusForbidden) - return - } - if state == "offline" { - http.Error(w, "model offline", http.StatusNotFound) - return - } - if state == "error" { - http.Error(w, "preview error", http.StatusServiceUnavailable) - return - } - - previewDir := strings.TrimSpace(job.PreviewDir) - if previewDir == "" { - if isIndex { - serveEmptyLiveM3U8(w, r) - return - } - http.Error(w, "preview nicht verfügbar", http.StatusNotFound) - return - } - - p := filepath.Join(previewDir, file) - - st, err := os.Stat(p) - if err != nil || st.IsDir() { - if isIndex { - serveEmptyLiveM3U8(w, r) - return - } - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - - ext := strings.ToLower(filepath.Ext(p)) - - // ✅ common: always no-store - w.Header().Set("Cache-Control", "no-store") - // ✅ avoids some proxy buffering surprises (harmless if ignored) - w.Header().Set("X-Accel-Buffering", "no") - - // ========================= - // ✅ .m3u8: rewrite (klein, ReadFile ok) - // ========================= - if ext == ".m3u8" { - raw, err := os.ReadFile(p) - if err != nil { - http.Error(w, "m3u8 read failed", http.StatusInternalServerError) - return - } - - rewritten := rewriteM3U8(raw, id) - - w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8") - w.WriteHeader(http.StatusOK) - _, _ = w.Write(rewritten) - return - } - - // ========================= - // ✅ Segmente: robust streamen + Range-support - // ========================= - switch ext { - case ".ts": - w.Header().Set("Content-Type", "video/mp2t") - case ".m4s": - w.Header().Set("Content-Type", "video/iso.segment") - default: - w.Header().Set("Content-Type", "application/octet-stream") - } - - // ✅ Optional aber sehr hilfreich: - // liefere ein Segment erst aus, wenn es nicht mehr wächst (verhindert "hängende" große .ts) - if ext == ".ts" || ext == ".m4s" { - if !waitForStableFile(p, 2, 120*time.Millisecond) { - // Segment ist vermutlich noch im Schreiben -> lieber 404, Player retryt - http.Error(w, "segment not ready", http.StatusNotFound) - return - } - } - - f, err := os.Open(p) - if err != nil { - http.Error(w, "open failed", http.StatusNotFound) - return - } - defer f.Close() - - // ✅ ServeContent macht Range korrekt und streamt ohne ReadAll. - // name ist nur für logs/cache; modTime für If-Modified-Since etc. - http.ServeContent(w, r, file, st.ModTime(), f) - -} - -func waitForStableFile(path string, checks int, interval time.Duration) bool { - // returns true if size is stable across N checks - var last int64 = -1 - for i := 0; i < checks; i++ { - st, err := os.Stat(path) - if err != nil || st.IsDir() { - return false - } - sz := st.Size() - if last >= 0 && sz == last { - return true - } - last = sz - time.Sleep(interval) - } - // if we never saw stability, assume not ready - return false -} - -func rewriteM3U8(raw []byte, id string) []byte { - // Wir bauen alle URIs so um, dass sie wieder über /api/record/preview laufen. - // Wichtig: play=1 bleibt dran, damit Folge-Requests (segments, chunklists) auch ohne Hover gehen. - base := "/api/record/preview?id=" + url.QueryEscape(id) + "&file=" - - var out bytes.Buffer - sc := bufio.NewScanner(bytes.NewReader(raw)) - - // Scanner default token limit 64K – m3u8 ist normalerweise klein, passt. - // Wenn du riesige Playlists hast, kannst du Buffer erhöhen. - - for sc.Scan() { - line := sc.Text() - trim := strings.TrimSpace(line) - - if trim == "" { - out.WriteByte('\n') - continue - } - - // Kommentare/Tags: ggf. URI="..." in Tags rewriten - if strings.HasPrefix(trim, "#") { - // EXT-X-KEY:URI="..." - line = rewriteAttrURI(line, base) - out.WriteString(line) - out.WriteByte('\n') - continue - } - - // Nicht-Tag => URI (segment oder child-playlist) - u := trim - - // Absolut? dann lassen - if strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://") { - out.WriteString(line) - out.WriteByte('\n') - continue - } - - // Wenn es schon unser API ist, lassen - if strings.Contains(u, "/api/record/preview") { - out.WriteString(line) - out.WriteByte('\n') - continue - } - - // Nur basename nehmen (ffmpeg schreibt i.d.R. keine Subdirs) - name := path.Base(u) - - // Hier play=1 mitschicken: - out.WriteString(base + url.QueryEscape(name) + "&play=1") - out.WriteByte('\n') - } - - if err := sc.Err(); err != nil { - // Wenn Scanner aus irgendeinem Grund scheitert: lieber raw zurück (besser als kaputt) - return raw - } - return out.Bytes() -} - -func rewriteAttrURI(line, base string) string { - // Rewritet URI="xyz" in EXT-X-KEY / EXT-X-MAP / EXT-X-MEDIA / EXT-X-I-FRAME-STREAM-INF etc. - // Nur relative URIs werden angefasst. - const key = `URI="` - i := strings.Index(line, key) - if i < 0 { - return line - } - - j := strings.Index(line[i+len(key):], `"`) - if j < 0 { - return line - } - - start := i + len(key) - end := start + j - val := line[start:end] - valTrim := strings.TrimSpace(val) - - // absolut oder schon preview => nix tun - if strings.HasPrefix(valTrim, "http://") || strings.HasPrefix(valTrim, "https://") || strings.Contains(valTrim, "/api/record/preview") { - return line - } - - name := path.Base(valTrim) - repl := base + url.QueryEscape(name) + "&play=1" - - return line[:start] + repl + line[end:] -} - -func rewriteQuotedURI(line, id string) string { - re := regexp.MustCompile(`URI="([^"]+)"`) - return re.ReplaceAllStringFunc(line, func(m string) string { - sub := re.FindStringSubmatch(m) - if len(sub) != 2 { - return m - } - u := sub[1] - uu := strings.TrimSpace(u) - if uu == "" || strings.HasPrefix(uu, "http://") || strings.HasPrefix(uu, "https://") || strings.HasPrefix(uu, "/") { - return m - } - repl := "/api/record/preview?id=" + url.QueryEscape(id) + "&file=" + url.QueryEscape(uu) - return `URI="` + repl + `"` - }) -} - -func rewriteM3U8ToPreviewEndpoint(m3u8 string, id string) string { - lines := strings.Split(m3u8, "\n") - escapedID := url.QueryEscape(id) - - for i, line := range lines { - l := strings.TrimSpace(line) - if l == "" || strings.HasPrefix(l, "#") { - continue - } - // Segment/URI-Zeilen umschreiben - lines[i] = "/api/record/preview?id=" + escapedID + "&file=" + url.QueryEscape(l) - } - - return strings.Join(lines, "\n") -} - -func classifyPreviewFFmpegStderr(stderr string) (state string, httpStatus int) { - s := strings.ToLower(stderr) - - // ffmpeg schreibt typischerweise: - // "HTTP error 403 Forbidden" oder "Server returned 403 Forbidden" - if strings.Contains(s, "403 forbidden") || strings.Contains(s, "http error 403") || strings.Contains(s, "server returned 403") { - return "private", http.StatusForbidden - } - - // "HTTP error 404 Not Found" oder "Server returned 404 Not Found" - if strings.Contains(s, "404 not found") || strings.Contains(s, "http error 404") || strings.Contains(s, "server returned 404") { - return "offline", http.StatusNotFound - } - - return "", 0 -} - -func servePreviewStatusSVG(w http.ResponseWriter, label string, status int) { - w.Header().Set("Content-Type", "image/svg+xml; charset=utf-8") - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("X-Content-Type-Options", "nosniff") - - if status <= 0 { - status = http.StatusOK - } - - title := html.EscapeString(strings.TrimSpace(label)) - if title == "" { - title = "Preview" - } - - // 16:9 (passt zu deinen Cards) - svg := ` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ` + title + ` - - Preview nicht verfügbar - -` - - w.WriteHeader(status) - _, _ = w.Write([]byte(svg)) -} - -func startPreviewHLS(ctx context.Context, job *RecordJob, m3u8URL, previewDir, httpCookie, userAgent string) error { - if strings.TrimSpace(ffmpegPath) == "" { - return fmt.Errorf("kein ffmpeg gefunden – setze FFMPEG_PATH oder lege ffmpeg(.exe) neben das Backend") - } - - if err := os.MkdirAll(previewDir, 0755); err != nil { - return err - } - - // ✅ PreviewState reset (neuer Start) - jobsMu.Lock() - job.PreviewState = "" - job.PreviewStateAt = "" - job.PreviewStateMsg = "" - jobsMu.Unlock() - notifyJobsChanged() - - commonIn := []string{"-y"} - if strings.TrimSpace(userAgent) != "" { - commonIn = append(commonIn, "-user_agent", userAgent) - } - if strings.TrimSpace(httpCookie) != "" { - commonIn = append(commonIn, "-headers", fmt.Sprintf("Cookie: %s\r\n", httpCookie)) - } - commonIn = append(commonIn, "-i", m3u8URL) - - hqArgs := append(commonIn, - "-vf", "scale=480:-2", - "-c:v", "libx264", "-preset", "veryfast", "-tune", "zerolatency", - "-pix_fmt", "yuv420p", - "-profile:v", "main", - "-level", "3.1", - "-threads", "4", - - // GOP ~ 2s (bei 24fps). Optional force_key_frames zusätzlich. - "-g", "48", "-keyint_min", "48", "-sc_threshold", "0", - // optional, wenn du noch große Segmente bekommst: - // "-force_key_frames", "expr:gte(t,n_forced*2)", - - "-map", "0:v:0", - "-map", "0:a:0?", - "-c:a", "aac", "-b:a", "128k", "-ac", "2", - - "-f", "hls", - "-hls_time", "2", - "-hls_list_size", "6", - "-hls_allow_cache", "0", - - // ✅ wichtig: temp_file - "-hls_flags", "delete_segments+append_list+independent_segments+temp_file", - - "-hls_segment_filename", filepath.Join(previewDir, "seg_hq_%05d.ts"), - - // ✅ Empfehlung: weglassen (du rewritest ohnehin) - // "-hls_base_url", baseURL, - - filepath.Join(previewDir, "index_hq.m3u8"), - ) - - cmd := exec.CommandContext(ctx, ffmpegPath, hqArgs...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - jobsMu.Lock() - job.previewCmd = cmd - jobsMu.Unlock() - - go func() { - if err := previewSem.Acquire(ctx); err != nil { - jobsMu.Lock() - if job.previewCmd == cmd { - job.previewCmd = nil - } - jobsMu.Unlock() - return - } - defer previewSem.Release() - - if err := cmd.Run(); err != nil && ctx.Err() == nil { - st := strings.TrimSpace(stderr.String()) - - // ✅ 403/404 erkennen -> Private/Offline setzen - state, code := classifyPreviewFFmpegStderr(st) - - jobsMu.Lock() - if state != "" { - job.PreviewState = state - job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano) - job.PreviewStateMsg = fmt.Sprintf("ffmpeg input returned HTTP %d", code) - } else { - job.PreviewState = "error" - job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano) - if len(st) > 280 { - job.PreviewStateMsg = st[:280] + "…" - } else { - job.PreviewStateMsg = st - } - } - jobsMu.Unlock() - notifyJobsChanged() - - fmt.Printf("⚠️ preview hq ffmpeg failed: %v (%s)\n", err, st) - } - - jobsMu.Lock() - if job.previewCmd == cmd { - job.previewCmd = nil - } - jobsMu.Unlock() - }() - - // ✅ Live thumb writer starten (schreibt generated//thumbs.jpg regelmäßig neu) - startLiveThumbLoop(ctx, job) - - return nil -} - -func extractFirstFrameJPEG(path string) ([]byte, error) { - cmd := exec.Command( - ffmpegPath, - "-hide_banner", - "-loglevel", "error", - "-i", path, - "-frames:v", "1", - "-vf", "scale=720:-2", - "-q:v", "10", - "-f", "image2pipe", - "-vcodec", "mjpeg", - "pipe:1", - ) - - var out bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("ffmpeg first-frame: %w (%s)", err, strings.TrimSpace(stderr.String())) - } - return out.Bytes(), nil -} - -func resolvePathRelativeToApp(p string) (string, error) { - p = strings.TrimSpace(p) - if p == "" { - return "", nil - } - - p = filepath.Clean(filepath.FromSlash(p)) - if filepath.IsAbs(p) { - return p, nil - } - - exe, err := os.Executable() - if err == nil { - exeDir := filepath.Dir(exe) - low := strings.ToLower(exeDir) - - // Heuristik: go run / tests -> exe liegt in Temp/go-build - isTemp := strings.Contains(low, `\appdata\local\temp`) || - strings.Contains(low, `\temp\`) || - strings.Contains(low, `\tmp\`) || - strings.Contains(low, `\go-build`) || - strings.Contains(low, `/tmp/`) || - strings.Contains(low, `/go-build`) - - if !isTemp { - return filepath.Join(exeDir, p), nil - } - } - - // Fallback: Working Directory (Dev) - wd, err := os.Getwd() - if err != nil { - return "", err - } - return filepath.Join(wd, p), nil -} - -// Frontend (Vite build) als SPA ausliefern: Dateien aus dist, sonst index.html -func registerFrontend(mux *http.ServeMux) { - // Kandidaten: zuerst ENV, dann typische Ordner - candidates := []string{ - strings.TrimSpace(os.Getenv("FRONTEND_DIST")), - "web/dist", - "dist", - } - - var distAbs string - for _, c := range candidates { - if c == "" { - continue - } - abs, err := resolvePathRelativeToApp(c) - if err != nil { - continue - } - if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() { - distAbs = abs - break - } - } - - if distAbs == "" { - fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, frontend/dist, dist) – API läuft trotzdem.") - return - } - - fmt.Println("🖼️ Frontend dist:", distAbs) - - fileServer := http.FileServer(http.Dir(distAbs)) - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - // /api bleibt bei deinen API-Routen (längeres Pattern gewinnt), - // aber falls mal was durchrutscht: - if strings.HasPrefix(r.URL.Path, "/api/") { - http.NotFound(w, r) - return - } - - // 1) Wenn echte Datei existiert -> ausliefern - reqPath := r.URL.Path - if reqPath == "" || reqPath == "/" { - // index.html - w.Header().Set("Cache-Control", "no-store") - http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) - return - } - - // URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal) - clean := path.Clean("/" + reqPath) // path.Clean (für URL-Slashes) - rel := strings.TrimPrefix(clean, "/") - onDisk := filepath.Join(distAbs, filepath.FromSlash(rel)) - - if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() { - // Statische Assets ruhig cachen (Vite hashed assets) - ext := strings.ToLower(filepath.Ext(onDisk)) - if ext != "" && ext != ".html" { - w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") - } else { - w.Header().Set("Cache-Control", "no-store") - } - fileServer.ServeHTTP(w, r) - return - } - - // 2) SPA-Fallback: alle "Routen" ohne Datei -> index.html - w.Header().Set("Cache-Control", "no-store") - http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) - }) -} - -func makeFrontendHandler() (http.Handler, bool) { - // Kandidaten: zuerst ENV, dann typische Ordner - candidates := []string{ - strings.TrimSpace(os.Getenv("FRONTEND_DIST")), - "web/dist", - "dist", - } - - var distAbs string - for _, c := range candidates { - if c == "" { - continue - } - abs, err := resolvePathRelativeToApp(c) - if err != nil { - continue - } - if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() { - distAbs = abs - break - } - } - - if distAbs == "" { - fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, web/dist, dist) – API läuft trotzdem.") - return nil, false - } - - fmt.Println("🖼️ Frontend dist:", distAbs) - - fileServer := http.FileServer(http.Dir(distAbs)) - - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // /api bleibt API - if strings.HasPrefix(r.URL.Path, "/api/") { - http.NotFound(w, r) - return - } - - reqPath := r.URL.Path - if reqPath == "" || reqPath == "/" { - w.Header().Set("Cache-Control", "no-store") - http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) - return - } - - // URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal) - clean := path.Clean("/" + reqPath) - rel := strings.TrimPrefix(clean, "/") - onDisk := filepath.Join(distAbs, filepath.FromSlash(rel)) - - if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() { - ext := strings.ToLower(filepath.Ext(onDisk)) - if ext != "" && ext != ".html" { - w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") - } else { - w.Header().Set("Cache-Control", "no-store") - } - fileServer.ServeHTTP(w, r) - return - } - - // SPA-Fallback - w.Header().Set("Cache-Control", "no-store") - http.ServeFile(w, r, filepath.Join(distAbs, "index.html")) - }) - - return h, true -} - -// routes.go (package main) -func registerRoutes(mux *http.ServeMux, auth *AuthManager) *ModelStore { - // -------------------------- - // 1) Public Auth Endpoints - // -------------------------- - mux.HandleFunc("/api/auth/login", authLoginHandler(auth)) - mux.HandleFunc("/api/auth/logout", authLogoutHandler(auth)) - mux.HandleFunc("/api/auth/me", authMeHandler(auth)) - - // 2FA (Authenticator/TOTP) - mux.HandleFunc("/api/auth/2fa/setup", auth2FASetupHandler(auth)) - mux.HandleFunc("/api/auth/2fa/enable", auth2FAEnableHandler(auth)) - // mux.HandleFunc("/api/auth/2fa/disable", auth2FADisableHandler(auth)) - - // -------------------------- - // 2) Protected API Mux - // -------------------------- - api := http.NewServeMux() - - api.HandleFunc("/api/cookies", cookiesHandler) - - api.HandleFunc("/api/record/done/stream", handleDoneStream) - api.HandleFunc("/api/perf/stream", perfStreamHandler) - api.HandleFunc("/api/status/disk", diskStatusHandler) - - api.HandleFunc("/api/autostart/state", autostartStateHandler) - api.HandleFunc("/api/autostart/state/stream", autostartStateStreamHandler) - api.HandleFunc("/api/autostart/pause", autostartPauseQuickHandler) - api.HandleFunc("/api/autostart/resume", autostartResumeHandler) - - api.HandleFunc("/api/settings", recordSettingsHandler) - api.HandleFunc("/api/settings/browse", settingsBrowse) - api.HandleFunc("/api/settings/cleanup", settingsCleanupHandler) - - api.HandleFunc("/api/record", startRecordingFromRequest) - api.HandleFunc("/api/record/status", recordStatus) - api.HandleFunc("/api/record/stop", recordStop) - api.HandleFunc("/api/record/preview", recordPreview) - api.HandleFunc("/api/record/list", recordList) - api.HandleFunc("/api/record/stream", recordStream) - api.HandleFunc("/api/record/video", recordVideo) - api.HandleFunc("/api/record/done", recordDoneList) - api.HandleFunc("/api/record/delete", recordDeleteVideo) - api.HandleFunc("/api/record/toggle-hot", recordToggleHot) - api.HandleFunc("/api/record/keep", recordKeepVideo) - api.HandleFunc("/api/record/unkeep", recordUnkeepVideo) - api.HandleFunc("/api/record/restore", recordRestoreVideo) - - api.HandleFunc("/api/chaturbate/online", chaturbateOnlineHandler) - api.HandleFunc("/api/chaturbate/biocontext", chaturbateBioContextHandler) - - api.HandleFunc("/api/generated/teaser", generatedTeaser) - api.HandleFunc("/api/generated/cover", generatedCover) - api.HandleFunc("/api/generated/coverinfo/list", generatedCoverInfoList) - - // Tasks - api.HandleFunc("/api/tasks/generate-assets", tasksGenerateAssets) - - // -------------------------- - // 3) ModelStore - // -------------------------- - modelsPath, _ := resolvePathRelativeToApp("data/models_store.db") - fmt.Println("📦 Models DB:", modelsPath) - - store := NewModelStore(modelsPath) - if err := store.Load(); err != nil { - fmt.Println("⚠️ models load:", err) - } - - setCoverModelStore(store) - RegisterModelAPI(api, store) - setChaturbateOnlineModelStore(store) - - // -------------------------- - // 4) Mount Protected API - // -------------------------- - // /api/auth/* ist schon public am root mux und gewinnt als längeres Pattern. - mux.Handle("/api/", requireAuth(auth, api, false)) - - // -------------------------- - // 5) Mount Protected SPA (/) - // -------------------------- - frontend, ok := makeFrontendHandler() - if ok && frontend != nil { - // allowPaths: login + assets müssen öffentlich sein, sonst Redirect-Loop - mux.Handle("/", requireAuth(auth, frontend, true, - "/login", - "/assets/", - "/favicon.ico", - "/manifest.webmanifest", - "/robots.txt", - "/service-worker.js", - )) - } - - return store -} - -func withCORS(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - origin := strings.TrimSpace(r.Header.Get("Origin")) - - // Dev-Origins erlauben - if origin == "http://localhost:5173" || origin == "http://127.0.0.1:5173" { - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Vary", "Origin") - w.Header().Set("Access-Control-Allow-Methods", "GET,POST,DELETE,HEAD,OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Range, Last-Event-ID") - w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges") - // Nur wenn du wirklich Cookies/Authorization cross-origin brauchst: - // w.Header().Set("Access-Control-Allow-Credentials", "true") - } - - if r.Method == http.MethodOptions { - w.WriteHeader(http.StatusNoContent) - return - } - - next.ServeHTTP(w, r) - }) -} - -// --- main --- -func main() { - loadSettings() - - fixKeepRootFilesIntoModelSubdirs() - postWorkQ.StartWorkers(1) - startPostWorkStatusRefresher() - - go startGeneratedGarbageCollector() - - mux := http.NewServeMux() - - // ✅ AuthManager erstellen (Beispiel) - // Du brauchst hier typischerweise: - // - ein Secret/Key (Cookie signen / Sessions) - // - Username+Pass Hash oder config - // - optional 2FA store - auth, err := NewAuthManager() - if err != nil { - fmt.Println("❌ auth init:", err) - os.Exit(1) - } - - if err != nil { - fmt.Println("❌ auth init:", err) - os.Exit(1) - } - - store := registerRoutes(mux, auth) - - go startChaturbateOnlinePoller(store) - go startChaturbateAutoStartWorker(store) - go startMyFreeCamsAutoStartWorker(store) - go startDiskSpaceGuard() - - if _, err := ensureCoversDir(); err != nil { - fmt.Println("⚠️ covers dir:", err) - } - - fmt.Println("🌐 HTTP-API aktiv: http://localhost:9999") - - handler := withCORS(mux) - if err := http.ListenAndServe(":9999", handler); err != nil { - fmt.Println("❌ HTTP-Server Fehler:", err) - os.Exit(1) - } -} - -type RecordRequest struct { - URL string `json:"url"` - Cookie string `json:"cookie,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - Hidden bool `json:"hidden,omitempty"` -} - -func getRecordingsDir() string { - s := getSettings() - - abs, err := resolvePathRelativeToApp(s.RecordDir) - if err == nil && strings.TrimSpace(abs) != "" { - return abs - } - - // Fallback (falls resolve fehlschlägt) - return strings.TrimSpace(s.RecordDir) -} - -func getKeepDir() string { - s := getSettings() - - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil || strings.TrimSpace(doneAbs) == "" { - doneAbs = strings.TrimSpace(s.DoneDir) - } - if strings.TrimSpace(doneAbs) == "" { - return "" - } - - return filepath.Join(doneAbs, "keep") -} - -func getDoneDir() string { - s := getSettings() - - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err == nil && strings.TrimSpace(doneAbs) != "" { - return doneAbs - } - - return strings.TrimSpace(s.DoneDir) -} - -func findVideoPath(file string) (string, error) { - base := filepath.Base(file) // verhindert path traversal - - // TODO: passe diese Root-Dirs an deine echten Pfade an: - roots := []string{ - getRecordingsDir(), // z.B. downloads/output root - getDoneDir(), // ✅ NEU: fertige Dateien liegen typischerweise hier - getKeepDir(), // keep root - } - - // 1) direkt in den Roots - for _, root := range roots { - root = strings.TrimSpace(root) - if root == "" { - continue - } - p := filepath.Join(root, base) - if st, err := os.Stat(p); err == nil && !st.IsDir() { - return p, nil - } - } - - // 2) 1 Ebene Unterordner: root/*/file - for _, root := range roots { - root = strings.TrimSpace(root) - if root == "" { - continue - } - matches, _ := filepath.Glob(filepath.Join(root, "*", base)) - for _, p := range matches { - if st, err := os.Stat(p); err == nil && !st.IsDir() { - return p, nil - } - } - } - - return "", os.ErrNotExist -} - -// main.go (oder wo deine Routes sind) -func writeSSE(w http.ResponseWriter, data []byte) { - // SSE spec: jede Zeile mit "data:" prefixen - s := strings.ReplaceAll(string(data), "\r\n", "\n") - lines := strings.Split(s, "\n") - for _, line := range lines { - fmt.Fprintf(w, "data: %s\n", line) - } - fmt.Fprint(w, "\n") -} - -func handleDoneStream(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/event-stream") - w.Header().Set("Cache-Control", "no-cache") - w.Header().Set("Connection", "keep-alive") - - flusher, ok := w.(http.Flusher) - if !ok { - http.Error(w, "streaming unsupported", http.StatusInternalServerError) - return - } - - ch := make(chan []byte, 16) - doneHub.add(ch) - defer doneHub.remove(ch) - - // optional: initial ping/hello, damit Client sofort "lebt" - fmt.Fprintf(w, "event: doneChanged\ndata: {\"type\":\"doneChanged\",\"seq\":%d,\"ts\":%d}\n\n", - atomic.LoadUint64(&doneSeq), time.Now().UnixMilli()) - flusher.Flush() - - ctx := r.Context() - for { - select { - case <-ctx.Done(): - return - case b := <-ch: - // wichtig: event-name setzen -> Client kann addEventListener("doneChanged", ...) - fmt.Fprintf(w, "event: doneChanged\ndata: %s\n\n", b) - flusher.Flush() - } - } -} - -func handleRecordVideo(w http.ResponseWriter, r *http.Request) { - // Priorität: id -> (dein bestehendes Mapping), sonst file - id := strings.TrimSpace(r.URL.Query().Get("id")) - if id != "" { - // ✅ wenn du schon eine bestehende Logik hast: Pfad aus JobStore holen und dann ServeContent nutzen - // path := lookupPathByJobID(id) - // ... - } - - file := strings.TrimSpace(r.URL.Query().Get("file")) - if file == "" && id == "" { - http.Error(w, "missing id or file", http.StatusBadRequest) - return - } - - var path string - var err error - - if file != "" { - path, err = findVideoPath(file) - if err != nil { - http.NotFound(w, r) - return - } - } else { - // TODO: wenn id verwendet wurde, path hier setzen - http.NotFound(w, r) - return - } - - f, err := openForReadShareDelete(path) - if err != nil { - http.Error(w, "open failed", http.StatusInternalServerError) - return - } - defer f.Close() - - st, err := f.Stat() - if err != nil { - http.Error(w, "stat failed", http.StatusInternalServerError) - return - } - - // ✅ wichtig für Browser/VideoJS - ext := strings.ToLower(filepath.Ext(path)) - switch ext { - case ".ts": - w.Header().Set("Content-Type", "video/mp2t") - default: - w.Header().Set("Content-Type", "video/mp4") - } - - w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("Cache-Control", "no-store") - - // ✅ Range/206/Seeking korrekt - http.ServeContent(w, r, filepath.Base(path), st.ModTime(), f) -} - -func durationFromMetaIfFresh(videoPath, assetDir string, fi os.FileInfo) (float64, bool) { - metaPath := filepath.Join(assetDir, "meta.json") - return readVideoMetaDuration(metaPath, fi) -} - -// shared: wird vom HTTP-Handler UND vom Autostart-Worker genutzt -func startRecordingInternal(req RecordRequest) (*RecordJob, error) { - url := strings.TrimSpace(req.URL) - if url == "" { - return nil, errors.New("url fehlt") - } - - // Duplicate-running guard (identische URL) - jobsMu.Lock() - for _, j := range jobs { - if j != nil && j.Status == JobRunning && strings.TrimSpace(j.SourceURL) == url { - // ✅ Wenn ein versteckter Auto-Check-Job läuft und der User manuell startet -> sofort sichtbar machen - if j.Hidden && !req.Hidden { - j.Hidden = false - jobsMu.Unlock() - - notifyJobsChanged() - return j, nil - } - - jobsMu.Unlock() - return j, nil - } - } - - // ✅ Timestamp + Output schon hier setzen, damit UI sofort Model/Filename/Details hat - startedAt := time.Now() - provider := detectProvider(url) - - // best-effort Username aus URL - username := "" - switch provider { - case "chaturbate": - username = extractUsername(url) - case "mfc": - username = extractMFCUsername(url) - } - if strings.TrimSpace(username) == "" { - username = "unknown" - } - - // Dateiname (konsistent zu runJob: gleicher Timestamp) - filename := fmt.Sprintf("%s_%s.ts", username, startedAt.Format("01_02_2006__15-04-05")) - - // best-effort: absoluter RecordDir (fallback auf Settings-Wert) - s := getSettings() - recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir) - recordDir := strings.TrimSpace(recordDirAbs) - if recordDir == "" { - recordDir = strings.TrimSpace(s.RecordDir) - } - outPath := filepath.Join(recordDir, filename) - - jobID := uuid.NewString() - ctx, cancel := context.WithCancel(context.Background()) - - job := &RecordJob{ - ID: jobID, - SourceURL: url, - Status: JobRunning, - StartedAt: startedAt, - Output: outPath, // ✅ sofort befüllt - Hidden: req.Hidden, // ✅ NEU - cancel: cancel, - } - - jobs[jobID] = job - jobsMu.Unlock() - - // ✅ NEU: Hidden-Jobs nicht sofort ins UI broadcasten - if !job.Hidden { - notifyJobsChanged() - } - - go runJob(ctx, job, req) - return job, nil -} - -func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) - return - } - - var req RecordRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - job, err := startRecordingInternal(req) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(job) -} - -func parseCookieString(cookieStr string) map[string]string { - out := map[string]string{} - for _, pair := range strings.Split(cookieStr, ";") { - parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) - if len(parts) != 2 { - continue - } - name := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - if name == "" { - continue - } - out[strings.ToLower(name)] = value - } - return out -} - -func hasChaturbateCookies(cookieStr string) bool { - m := parseCookieString(cookieStr) - _, hasCF := m["cf_clearance"] - // akzeptiere session_id ODER sessionid ODER sessionid/sessionId Varianten (case-insensitive durch ToLower) - _, hasSessID := m["session_id"] - _, hasSessIdAlt := m["sessionid"] // falls es ohne underscore kommt - return hasCF && (hasSessID || hasSessIdAlt) -} - -func runJob(ctx context.Context, job *RecordJob, req RecordRequest) { - hc := NewHTTPClient(req.UserAgent) - provider := detectProvider(req.URL) - - var err error - - // ✅ nutze den Timestamp vom Job (damit Start/Output konsistent sind) - now := job.StartedAt - if now.IsZero() { - now = time.Now() - } - - // ---- Aufnahme starten (Output-Pfad sauber relativ zur EXE auflösen) ---- - switch provider { - case "chaturbate": - if !hasChaturbateCookies(req.Cookie) { - err = errors.New("cf_clearance und session_id (oder sessionid) Cookies sind für Chaturbate erforderlich") - break - } - - s := getSettings() - recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir) - if rerr != nil || strings.TrimSpace(recordDirAbs) == "" { - err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr) - break - } - _ = os.MkdirAll(recordDirAbs, 0o755) - - username := extractUsername(req.URL) - filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05")) - - // ✅ wenn Output schon beim Start gesetzt wurde, nutze ihn (falls absolut) - jobsMu.Lock() - existingOut := strings.TrimSpace(job.Output) - jobsMu.Unlock() - - outPath := existingOut - if outPath == "" || !filepath.IsAbs(outPath) { - outPath = filepath.Join(recordDirAbs, filename) - } - - // Output nur aktualisieren, wenn es sich ändert - if strings.TrimSpace(existingOut) != strings.TrimSpace(outPath) { - jobsMu.Lock() - job.Output = outPath - jobsMu.Unlock() - notifyJobsChanged() - } - - err = RecordStream(ctx, hc, "https://chaturbate.com/", username, outPath, req.Cookie, job) - - case "mfc": - s := getSettings() - recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir) - if rerr != nil || strings.TrimSpace(recordDirAbs) == "" { - err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr) - break - } - _ = os.MkdirAll(recordDirAbs, 0o755) - - username := extractMFCUsername(req.URL) - filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05")) - outPath := filepath.Join(recordDirAbs, filename) - - jobsMu.Lock() - job.Output = outPath - jobsMu.Unlock() - notifyJobsChanged() - - err = RecordStreamMFC(ctx, hc, username, outPath, job) - - default: - err = errors.New("unsupported provider") - } - - // ---- Recording fertig: EndedAt/Error setzen ---- - end := time.Now() - - // Zielstatus bestimmen (finaler Status wird erst NACH Postwork gesetzt!) - target := JobFinished - var errText string - if err != nil { - if errors.Is(err, context.Canceled) { - target = JobStopped - } else { - target = JobFailed - errText = err.Error() - } - } - - // direkt nach provider record endet (egal ob err != nil oder nil) - stopPreview(job) - - // EndedAt + Error speichern (kurz locken) - jobsMu.Lock() - job.EndedAt = &end - if errText != "" { - job.Error = errText - } - out := strings.TrimSpace(job.Output) - jobsMu.Unlock() - notifyJobsChanged() - - // Falls Output fehlt (z.B. provider error), direkt final status setzen - if out == "" { - jobsMu.Lock() - job.Status = target - job.Phase = "" - job.Progress = 100 - job.PostWorkKey = "" - job.PostWork = nil - jobsMu.Unlock() - notifyJobsChanged() - notifyDoneChanged() - return - } - - // ✅ Postwork: remux/move/ffprobe/assets begrenzen -> in Queue - postOut := out - postTarget := target - - postKey := "postwork:" + job.ID - - // ✅ WICHTIG: - // - Status noch NICHT auf JobStopped/JobFinished setzen, sonst verschwindet er aus der Downloads-Tabelle. - // - Stattdessen Phase "postwork" + Progress hochsetzen (monoton). - // - Zusätzlich: PostWorkKey setzen + initialen Queue-Status ins Job-JSON hängen. - jobsMu.Lock() - job.Phase = "postwork" - if job.Progress < 70 { - job.Progress = 70 - } - - job.PostWorkKey = postKey - // initialer Status (meist "missing", bis Enqueue done ist – wir updaten direkt danach nochmal) - { - s := postWorkQ.StatusForKey(postKey) - job.PostWork = &s - } - jobsMu.Unlock() - notifyJobsChanged() - - okQueued := postWorkQ.Enqueue(PostWorkTask{ - Key: postKey, - Added: time.Now(), - Run: func(ctx context.Context) error { - // beim Start: Queue-Status refresh (sollte jetzt "running" werden) - { - st := postWorkQ.StatusForKey(postKey) - jobsMu.Lock() - job.PostWork = &st - // optional: wenn du "queued" Progress optisch unterscheiden willst - if job.Phase == "postwork" && job.Progress < 71 { - job.Progress = 71 - } - - jobsMu.Unlock() - notifyJobsChanged() - } - - out := strings.TrimSpace(postOut) - if out == "" { - jobsMu.Lock() - job.Phase = "" - job.Progress = 100 - job.Status = postTarget - job.PostWorkKey = "" - job.PostWork = nil - jobsMu.Unlock() - notifyJobsChanged() - notifyDoneChanged() - return nil - } - - // Helper: Progress nur nach oben (gegen "rückwärts") - setPhase := func(phase string, pct int) { - jobsMu.Lock() - if pct < job.Progress { - pct = job.Progress - } - job.Phase = phase - job.Progress = pct - - // Queue-Status auch bei Phase-Wechsel aktuell halten (nice für UI) - st := postWorkQ.StatusForKey(postKey) - job.PostWork = &st - - jobsMu.Unlock() - notifyJobsChanged() - } - - // 1) Remux (nur wenn TS) - if strings.EqualFold(filepath.Ext(out), ".ts") { - setPhase("remuxing", 72) - if newOut, err2 := maybeRemuxTSForJob(job, out); err2 == nil && strings.TrimSpace(newOut) != "" { - out = strings.TrimSpace(newOut) - jobsMu.Lock() - job.Output = out - jobsMu.Unlock() - notifyJobsChanged() - } - } - - // 2) Move to done (best-effort) - setPhase("moving", 78) - if moved, err2 := moveToDoneDir(out); err2 == nil && strings.TrimSpace(moved) != "" { - out = strings.TrimSpace(moved) - jobsMu.Lock() - job.Output = out - jobsMu.Unlock() - notifyJobsChanged() - // ✅ erst JETZT ist done wirklich betroffen - notifyDoneChanged() - } - - // 3) Optional: kleine Downloads automatisch löschen - setPhase("postwork", 82) - if fi, serr := os.Stat(out); serr == nil && fi != nil && !fi.IsDir() { - jobsMu.Lock() - job.SizeBytes = fi.Size() - jobsMu.Unlock() - notifyJobsChanged() - - s := getSettings() - minMB := s.AutoDeleteSmallDownloadsBelowMB - if s.AutoDeleteSmallDownloads && minMB > 0 { - threshold := int64(minMB) * 1024 * 1024 - if fi.Size() > 0 && fi.Size() < threshold { - base := filepath.Base(out) - id := stripHotPrefix(strings.TrimSuffix(base, filepath.Ext(base))) - - if derr := removeWithRetry(out); derr == nil || os.IsNotExist(derr) { - removeGeneratedForID(id) - if doneAbs, rerr := resolvePathRelativeToApp(getSettings().DoneDir); rerr == nil && strings.TrimSpace(doneAbs) != "" { - _ = os.RemoveAll(filepath.Join(doneAbs, "preview", id)) - _ = os.RemoveAll(filepath.Join(doneAbs, "thumbs", id)) - } - purgeDurationCacheForPath(out) - - jobsMu.Lock() - delete(jobs, job.ID) - jobsMu.Unlock() - notifyJobsChanged() - notifyDoneChanged() - - fmt.Println("🧹 auto-deleted:", base, "size:", formatBytesSI(fi.Size())) - return nil - } else { - fmt.Println("⚠️ auto-delete failed:", derr) - } - } - } - } - - // 4) Dauer (ffprobe) - setPhase("ffprobe", 84) - { - dctx, cancel := context.WithTimeout(ctx, 6*time.Second) - if sec, derr := durationSecondsCached(dctx, out); derr == nil && sec > 0 { - jobsMu.Lock() - job.DurationSeconds = sec - jobsMu.Unlock() - notifyJobsChanged() - } - cancel() - } - - // 5) Video-Props - setPhase("probe", 86) - { - pctx, cancel := context.WithTimeout(ctx, 6*time.Second) - w, h, fps, perr := probeVideoProps(pctx, out) - cancel() - - if perr == nil { - jobsMu.Lock() - job.VideoWidth = w - job.VideoHeight = h - job.FPS = fps - jobsMu.Unlock() - notifyJobsChanged() - } - } - - // 6) Assets (thumbs.jpg + preview.mp4) - const ( - assetsStart = 86 - assetsEnd = 99 - ) - - setPhase("assets", assetsStart) - - lastPct := -1 - lastTick := time.Time{} - - update := func(r float64) { - if r < 0 { - r = 0 - } - if r > 1 { - r = 1 - } - - pct := assetsStart + int(math.Round(r*float64(assetsEnd-assetsStart))) - if pct < assetsStart { - pct = assetsStart - } - if pct > assetsEnd { - pct = assetsEnd - } - - if pct == lastPct { - return - } - if !lastTick.IsZero() && time.Since(lastTick) < 150*time.Millisecond { - return - } - lastPct = pct - lastTick = time.Now() - setPhase("assets", pct) - } - - if err := ensureAssetsForVideoWithProgress(out, job.SourceURL, update); err != nil { - fmt.Println("⚠️ ensureAssetsForVideo:", err) - } - setPhase("assets", assetsEnd) - - // 7) Finalize: JETZT finalen Status setzen (damit er erst dann aus Downloads verschwindet) - jobsMu.Lock() - job.Status = postTarget - job.Phase = "" - job.Progress = 100 - job.PostWorkKey = "" - job.PostWork = nil - jobsMu.Unlock() - notifyJobsChanged() - notifyDoneChanged() - - return nil - }, - }) - - if okQueued { - // ✅ direkt nach erfolgreichem Enqueue nochmal Status holen (nun "queued" + Position möglich) - st := postWorkQ.StatusForKey(postKey) - jobsMu.Lock() - job.PostWork = &st - jobsMu.Unlock() - notifyJobsChanged() - } else { - // Queue voll -> Fallback: finalisieren - jobsMu.Lock() - job.Status = postTarget - job.Phase = "" - job.Progress = 100 - job.PostWorkKey = "" - job.PostWork = nil - jobsMu.Unlock() - notifyJobsChanged() - notifyDoneChanged() - } - - return -} - -func formatBytesSI(b int64) string { - if b < 0 { - b = 0 - } - const unit = 1024 - if b < unit { - return fmt.Sprintf("%d B", b) - } - div, exp := int64(unit), 0 - for n := b / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - suffix := []string{"KB", "MB", "GB", "TB", "PB"} - v := float64(b) / float64(div) - // 1 Nachkommastelle, außer sehr große ganze Zahlen - if v >= 10 { - return fmt.Sprintf("%.0f %s", v, suffix[exp]) - } - return fmt.Sprintf("%.1f %s", v, suffix[exp]) -} - const maxInt64 = int64(^uint64(0) >> 1) -func u64ToI64(x uint64) int64 { - if x > uint64(maxInt64) { - return maxInt64 - } - return int64(x) -} - -func ensureAssetsForVideo(videoPath string) error { - // Default: keine SourceURL (für Covers egal) - return ensureAssetsForVideoWithProgress(videoPath, "", nil) -} - -// Optional: für Stellen, wo du die URL hast (z.B. Postwork / Jobs) -func ensureAssetsForVideoWithSource(videoPath string, sourceURL string) error { - return ensureAssetsForVideoWithProgress(videoPath, sourceURL, nil) -} - -// onRatio: 0..1 (Assets-Gesamtfortschritt) -func ensureAssetsForVideoWithProgress(videoPath string, sourceURL string, onRatio func(r float64)) error { - videoPath = strings.TrimSpace(videoPath) - if videoPath == "" { - return nil - } - - fi, statErr := os.Stat(videoPath) - if statErr != nil || fi.IsDir() || fi.Size() <= 0 { - return nil - } - - // ✅ ID = Dateiname ohne Endung (immer OHNE "HOT " Prefix) - base := filepath.Base(videoPath) - id := strings.TrimSuffix(base, filepath.Ext(base)) - id = stripHotPrefix(id) - if strings.TrimSpace(id) == "" { - return nil - } - - assetDir, gerr := ensureGeneratedDir(id) - if gerr != nil || strings.TrimSpace(assetDir) == "" { - return fmt.Errorf("generated dir: %v", gerr) - } - - metaPath := filepath.Join(assetDir, "meta.json") - - // ---- Meta / Duration ---- - durSec := 0.0 - if d, ok := readVideoMetaDuration(metaPath, fi); ok { - durSec = d - } else { - dctx, cancel := context.WithTimeout(context.Background(), 6*time.Second) - d, derr := durationSecondsCached(dctx, videoPath) - cancel() - - if derr == nil && d > 0 { - durSec = d - // ✅ Duration-only meta schreiben (inkl. sourceURL) - _ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL) - } - } - - // ✅ Wenn Duration aus Meta kam, aber SourceURL jetzt neu vorhanden ist, - // dann Meta "anreichern" (ohne ffprobe). - if durSec > 0 && strings.TrimSpace(sourceURL) != "" { - if u, ok := readVideoMetaSourceURL(metaPath, fi); !ok || strings.TrimSpace(u) == "" { - _ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL) - } - } - - // Gewichte: thumbs klein, preview groß - const ( - thumbsW = 0.25 - previewW = 0.75 - ) - - progress := func(r float64) { - if onRatio == nil { - return - } - if r < 0 { - r = 0 - } - if r > 1 { - r = 1 - } - onRatio(r) - } - - progress(0) - - // ---------------- - // Thumbs - // ---------------- - thumbPath := filepath.Join(assetDir, "thumbs.jpg") - if tfi, err := os.Stat(thumbPath); err == nil && !tfi.IsDir() && tfi.Size() > 0 { - progress(thumbsW) - } else { - progress(0.05) - - genCtx, cancel := context.WithTimeout(context.Background(), 45*time.Second) - defer cancel() - - if err := thumbSem.Acquire(genCtx); err != nil { - // best-effort - progress(thumbsW) - goto PREVIEW - } - defer thumbSem.Release() - - progress(0.10) - - t := 0.0 - if durSec > 0 { - t = durSec * 0.5 - } - - progress(0.15) - - img, e1 := extractFrameAtTimeJPEG(videoPath, t) - if e1 != nil || len(img) == 0 { - img, e1 = extractLastFrameJPEG(videoPath) - if e1 != nil || len(img) == 0 { - img, e1 = extractFirstFrameJPEG(videoPath) - } - } - - progress(0.20) - - if e1 == nil && len(img) > 0 { - if err := atomicWriteFile(thumbPath, img); err != nil { - fmt.Println("⚠️ thumb write:", err) - } - } - - progress(thumbsW) - } - -PREVIEW: - // ---------------- - // Preview - // ---------------- - previewPath := filepath.Join(assetDir, "preview.mp4") - if pfi, err := os.Stat(previewPath); err == nil && !pfi.IsDir() && pfi.Size() > 0 { - progress(1) - return nil - } - - genCtx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancel() - - progress(thumbsW + 0.02) - - if err := genSem.Acquire(genCtx); err != nil { - progress(1) - return nil - } - defer genSem.Release() - - progress(thumbsW + 0.05) - - if err := generateTeaserClipsMP4WithProgress(genCtx, videoPath, previewPath, 1.0, 18, func(r float64) { - if r < 0 { - r = 0 - } - if r > 1 { - r = 1 - } - progress(thumbsW + r*previewW) - }); err != nil { - fmt.Println("⚠️ preview clips:", err) - } - - progress(1) - return nil -} - -func recordVideo(w http.ResponseWriter, r *http.Request) { - - origin := r.Header.Get("Origin") - if origin != "" { - // ✅ dev origin erlauben (oder "*" wenn’s dir egal ist) - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Vary", "Origin") - w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Range") - w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges") - } - if r.Method == http.MethodOptions { - w.WriteHeader(http.StatusNoContent) - return - } - - // ✅ Wiedergabe über Dateiname (für doneDir / recordDir) - if raw := strings.TrimSpace(r.URL.Query().Get("file")); raw != "" { - // explizit decoden (zur Sicherheit) - file, err := url.QueryUnescape(raw) - if err != nil { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - file = strings.TrimSpace(file) - - // kein Pfad, keine Backslashes, kein Traversal - if file == "" || - strings.Contains(file, "/") || - strings.Contains(file, "\\") || - filepath.Base(file) != file { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - - ext := strings.ToLower(filepath.Ext(file)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - s := getSettings() - recordAbs, err := resolvePathRelativeToApp(s.RecordDir) - if err != nil { - http.Error(w, "recordDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"), - // dann keep (inkl. 1 Level Subdir), dann recordDir - names := []string{file} - - // Falls UI noch ".ts" kennt, die Datei aber schon als ".mp4" existiert: - if ext == ".ts" { - mp4File := strings.TrimSuffix(file, ext) + ".mp4" - names = append(names, mp4File) - } - - var outPath string - for _, name := range names { - // done root + done// (skip "keep") - if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok { - outPath = p - break - } - // keep root + keep// - if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok { - outPath = p - break - } - // record root (+ optional 1 Level Subdir) - if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok { - outPath = p - break - } - } - - if outPath == "" { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - - // TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4 - if strings.ToLower(filepath.Ext(outPath)) == ".ts" { - newOut, err := maybeRemuxTS(outPath) - if err != nil { - http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(newOut) == "" { - http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError) - return - } - outPath = newOut - - // sicherstellen, dass wirklich eine MP4 existiert - fi, err := os.Stat(outPath) - if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" { - http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError) - return - } - } - - // ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern - if strings.ToLower(filepath.Ext(outPath)) == ".mp4" { - kind, _ := sniffVideoKind(outPath) - switch kind { - case "ts": - newOut, err := maybeRemuxTS(outPath) - if err != nil { - http.Error(w, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - outPath = newOut - case "html": - http.Error(w, "Server liefert HTML statt Video (Pfad/Lookup prüfen)", http.StatusInternalServerError) - return - } - } - - w.Header().Set("Cache-Control", "no-store") - - serveVideoFile(w, r, outPath) - return - - } - - // ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert) - id := strings.TrimSpace(r.URL.Query().Get("id")) - if id == "" { - http.Error(w, "id fehlt", http.StatusBadRequest) - return - } - - jobsMu.Lock() - job, ok := jobs[id] - jobsMu.Unlock() - if !ok { - http.Error(w, "job nicht gefunden", http.StatusNotFound) - return - } - - outPath := filepath.Clean(strings.TrimSpace(job.Output)) - if outPath == "" { - http.Error(w, "output fehlt", http.StatusNotFound) - return - } - - if !filepath.IsAbs(outPath) { - abs, err := resolvePathRelativeToApp(outPath) - if err != nil { - http.Error(w, "pfad auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - outPath = abs - } - - fi, err := os.Stat(outPath) - if err != nil || fi.IsDir() || fi.Size() == 0 { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - - // TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4 - if strings.ToLower(filepath.Ext(outPath)) == ".ts" { - newOut, err := maybeRemuxTS(outPath) - if err != nil { - http.Error(w, "TS Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(newOut) == "" { - http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError) - return - } - outPath = newOut - - fi, err := os.Stat(outPath) - if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" { - http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError) - return - } - } - - serveVideoFile(w, r, outPath) -} - -func setNoStoreHeaders(w http.ResponseWriter) { - // verhindert Browser/Proxy Caching (wichtig für Logs/Status) - w.Header().Set("Cache-Control", "no-store, max-age=0") - w.Header().Set("Pragma", "no-cache") - w.Header().Set("Expires", "0") -} - -func durationSecondsCacheOnly(path string, fi os.FileInfo) float64 { - durCache.mu.Lock() - e, ok := durCache.m[path] - durCache.mu.Unlock() - - if ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 { - return e.sec - } - return 0 -} - -func findFileInDirOrOneLevelSubdirs(root string, file string, skipDirName string) (string, os.FileInfo, bool) { - // direct - p := filepath.Join(root, file) - if fi, err := os.Stat(p); err == nil && !fi.IsDir() && fi.Size() > 0 { - return p, fi, true - } - - entries, err := os.ReadDir(root) - if err != nil { - return "", nil, false - } - - for _, e := range entries { - if !e.IsDir() { - continue - } - if skipDirName != "" && e.Name() == skipDirName { - continue - } - pp := filepath.Join(root, e.Name(), file) - if fi, err := os.Stat(pp); err == nil && !fi.IsDir() && fi.Size() > 0 { - return pp, fi, true - } - } - - return "", nil, false -} - -func resolveDoneFileByName(doneAbs string, file string) (full string, from string, fi os.FileInfo, err error) { - // 1) done (root + /done//) — "keep" wird übersprungen - if p, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep"); ok { - return p, "done", fi, nil - } - - // 2) keep (root + /done/keep//) - keepDir := filepath.Join(doneAbs, "keep") - if p, fi, ok := findFileInDirOrOneLevelSubdirs(keepDir, file, ""); ok { - return p, "keep", fi, nil - } - - return "", "", nil, fmt.Errorf("not found") -} - -type doneListResponse struct { - Items []*RecordJob `json:"items"` - TotalCount int `json:"totalCount"` - Page int `json:"page,omitempty"` - PageSize int `json:"pageSize,omitempty"` -} - -func isTrashPath(p string) bool { - p = strings.ReplaceAll(p, "\\", "/") - return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") -} - -func recordDoneList(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) - return - } - - // ✅ optional: auch /done/keep/ einbeziehen (Standard: false) - qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep"))) - includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes" - - // ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll) - normalizeQueryModel := func(raw string) string { - s := strings.TrimSpace(raw) - if s == "" { - return "" - } - s = strings.TrimPrefix(s, "http://") - s = strings.TrimPrefix(s, "https://") - - // letzter URL-Segment, falls jemand "…/modelname" übergibt - if strings.Contains(s, "/") { - parts := strings.Split(s, "/") - for i := len(parts) - 1; i >= 0; i-- { - p := strings.TrimSpace(parts[i]) - if p != "" { - s = p - break - } - } - } - // falls "host:model" übergeben wird - if strings.Contains(s, ":") { - s = strings.TrimSpace(strings.Split(s, ":")[len(strings.Split(s, ":"))-1]) - } - - s = strings.TrimPrefix(s, "@") - return strings.ToLower(strings.TrimSpace(s)) - } - - qModel := normalizeQueryModel(r.URL.Query().Get("model")) - - // optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste - page := 0 - pageSize := 0 - if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" { - if n, err := strconv.Atoi(v); err == nil && n > 0 { - page = n - } - } - if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" { - if n, err := strconv.Atoi(v); err == nil && n > 0 { - pageSize = n - } - } - - // optional: Sort - // supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc) - sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort"))) - if sortMode == "" { - sortMode = "completed_desc" - } - - // ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen - if sortMode == "model_asc" { - sortMode = "file_asc" - } - if sortMode == "model_desc" { - sortMode = "file_desc" - } - - // ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren) - qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all"))) - fetchAll := qAll == "1" || qAll == "true" || qAll == "yes" - if fetchAll { - page = 0 - pageSize = 0 - } - - // ✅ .trash niemals als "done item" zählen/listen - isTrashOutput := func(p string) bool { - pp := strings.ToLower(filepath.ToSlash(strings.TrimSpace(p))) - return strings.Contains(pp, "/.trash/") || strings.HasSuffix(pp, "/.trash") - } - - // --- helpers (ModelKey aus Filename/Dir ableiten) --- - - modelFromStem := func(stem string) string { - // stem: lower, ohne ext, ohne HOT - if stem == "" { - return "" - } - if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { - return strings.ToLower(strings.TrimSpace(m[1])) - } - // fallback: alles vor letztem "_" (oder kompletter stem) - if i := strings.LastIndex(stem, "_"); i > 0 { - return strings.ToLower(strings.TrimSpace(stem[:i])) - } - return strings.ToLower(strings.TrimSpace(stem)) - } - - modelFromFullPath := func(full string) string { - name := strings.ToLower(filepath.Base(full)) - stem := strings.TrimSuffix(name, filepath.Ext(name)) - stem = strings.TrimPrefix(stem, "hot ") - mk := modelFromStem(stem) - - // fallback: wenn Dateiname nichts taugt, aus Ordner nehmen (/done//file) - if mk == "" { - parent := strings.ToLower(filepath.Base(filepath.Dir(full))) - parent = strings.TrimSpace(parent) - if parent != "" && parent != "keep" { - mk = parent - } - } - return mk - } - - isTrashPath := func(full string) bool { - p := strings.ReplaceAll(full, "\\", "/") - // match: ".../.trash/file.ext" oder ".../.trash" - return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") - } - - // --- resolve done path --- - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben - if strings.TrimSpace(doneAbs) == "" { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(doneListResponse{ - Items: []*RecordJob{}, - TotalCount: 0, - Page: page, - PageSize: pageSize, - }) - return - } - - type scanDir struct { - dir string - skipKeep bool // nur für doneAbs: "keep" nicht doppelt scannen - } - - dirs := []scanDir{{dir: doneAbs, skipKeep: true}} - if includeKeep { - dirs = append(dirs, scanDir{dir: filepath.Join(doneAbs, "keep"), skipKeep: false}) - } - - list := make([]*RecordJob, 0, 256) - - addFile := func(full string, fi os.FileInfo) { - // ✅ .trash niemals zählen / zurückgeben - if isTrashPath(full) { - return - } - - name := filepath.Base(full) - ext := strings.ToLower(filepath.Ext(name)) - if ext != ".mp4" && ext != ".ts" { - return - } - - // ✅ .trash aus Done-Liste ausschließen (auch für totalCount/tab counter) - if isTrashOutput(full) { - return - } - - // ✅ NEU: Model-Filter vor dem teureren Meta-Kram - if qModel != "" { - if mk := modelFromFullPath(full); mk != qModel { - return - } - } - - base := strings.TrimSuffix(name, filepath.Ext(name)) - t := fi.ModTime() - - // StartedAt aus Dateiname (Fallback: ModTime) - start := t - stem := base - if strings.HasPrefix(stem, "HOT ") { - stem = strings.TrimPrefix(stem, "HOT ") - } - if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { - mm, _ := strconv.Atoi(m[2]) - dd, _ := strconv.Atoi(m[3]) - yy, _ := strconv.Atoi(m[4]) - hh, _ := strconv.Atoi(m[5]) - mi, _ := strconv.Atoi(m[6]) - ss, _ := strconv.Atoi(m[7]) - start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local) - } - - dur := 0.0 - - // 1) meta.json aus generated//meta.json lesen (schnell) - id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full))) - - srcURL := "" - if strings.TrimSpace(id) != "" { - if mp, err := generatedMetaFile(id); err == nil { - - if d, ok := readVideoMetaDuration(mp, fi); ok { - dur = d - } - - if u, ok := readVideoMetaSourceURL(mp, fi); ok { - srcURL = u - } - } - } - - // 2) Fallback: RAM-Cache only (immer noch schnell, kein ffprobe) - if dur <= 0 { - dur = durationSecondsCacheOnly(full, fi) - } - - // 3) KEIN ffprobe hier! (sonst wird die API wieder langsam) - - list = append(list, &RecordJob{ - ID: base, - Output: full, - SourceURL: srcURL, - Status: JobFinished, - StartedAt: start, - EndedAt: &t, - DurationSeconds: dur, - SizeBytes: fi.Size(), - }) - } - - for _, sd := range dirs { - entries, err := os.ReadDir(sd.dir) - if err != nil { - if os.IsNotExist(err) { - if sd.dir == doneAbs { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(doneListResponse{ - Items: []*RecordJob{}, - TotalCount: 0, - Page: page, - PageSize: pageSize, - }) - return - - } - continue - } - if sd.dir == doneAbs { - http.Error(w, "doneDir lesen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - continue - } - - for _, e := range entries { - // Subdir: 1 Level rein (z.B. /done// oder /done/keep//) - if e.IsDir() { - // ✅ .trash Ordner niemals scannen - if e.Name() == ".trash" { - continue - } - - if sd.skipKeep && e.Name() == "keep" { - continue - } - - // ✅ .trash nie scannen - if strings.EqualFold(e.Name(), ".trash") { - continue - } - - sub := filepath.Join(sd.dir, e.Name()) - subEntries, err := os.ReadDir(sub) - if err != nil { - continue - } - for _, se := range subEntries { - if se.IsDir() { - continue - } - full := filepath.Join(sub, se.Name()) - fi, err := os.Stat(full) - if err != nil || fi.IsDir() || fi.Size() == 0 { - continue - } - addFile(full, fi) - } - continue - } - - full := filepath.Join(sd.dir, e.Name()) - fi, err := os.Stat(full) - if err != nil || fi.IsDir() || fi.Size() == 0 { - continue - } - addFile(full, fi) - } - } - - // helpers (Sort) - fileForSort := func(j *RecordJob) string { - f := strings.ToLower(filepath.Base(j.Output)) - // HOT Prefix aus Sortierung rausnehmen - f = strings.TrimPrefix(f, "hot ") - return f - } - durationForSort := func(j *RecordJob) (sec float64, ok bool) { - if j.DurationSeconds > 0 { - return j.DurationSeconds, true - } - return 0, false - } - - // Sortierung - sort.Slice(list, func(i, j int) bool { - a, b := list[i], list[j] - ta, tb := time.Time{}, time.Time{} - if a.EndedAt != nil { - ta = *a.EndedAt - } - if b.EndedAt != nil { - tb = *b.EndedAt - } - - switch sortMode { - case "completed_asc": - if !ta.Equal(tb) { - return ta.Before(tb) - } - return fileForSort(a) < fileForSort(b) - case "completed_desc": - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - case "file_asc": - fa, fb := fileForSort(a), fileForSort(b) - if fa != fb { - return fa < fb - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - case "file_desc": - fa, fb := fileForSort(a), fileForSort(b) - if fa != fb { - return fa > fb - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - - case "duration_asc": - da, okA := durationForSort(a) - db, okB := durationForSort(b) - if okA != okB { - return okA // unbekannt nach hinten - } - if okA && okB && da != db { - return da < db - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - case "duration_desc": - da, okA := durationForSort(a) - db, okB := durationForSort(b) - if okA != okB { - return okA - } - if okA && okB && da != db { - return da > db - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - - case "size_asc": - if a.SizeBytes != b.SizeBytes { - return a.SizeBytes < b.SizeBytes - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - case "size_desc": - if a.SizeBytes != b.SizeBytes { - return a.SizeBytes > b.SizeBytes - } - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - default: - if !ta.Equal(tb) { - return ta.After(tb) - } - return fileForSort(a) < fileForSort(b) - } - }) - - // ✅ optional: count mitsenden - qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount"))) - withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes" - - // ✅ Gesamtanzahl IMMER vor Pagination merken - totalCount := len(list) - - // ✅ Pagination nur auf "items" anwenden (list bleibt für totalCount intakt) - items := list - if pageSize > 0 && !fetchAll { - if page <= 0 { - page = 1 - } - start := (page - 1) * pageSize - if start < 0 { - start = 0 - } - if start >= totalCount { - items = []*RecordJob{} - } else { - end := start + pageSize - if end > totalCount { - end = totalCount - } - items = list[start:end] - } - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - - // ✅ Wenn Frontend "withCount=1" nutzt: {count, items} - if withCount { - _ = json.NewEncoder(w).Encode(map[string]any{ - "count": totalCount, - "items": items, - }) - return - } - - // ✅ Standard-Response: immer auch totalCount mitsenden - _ = json.NewEncoder(w).Encode(doneListResponse{ - Items: items, - TotalCount: totalCount, - Page: page, - PageSize: pageSize, - }) - return - -} - -type doneMetaResp struct { - Count int `json:"count"` -} - -type durationReq struct { - Files []string `json:"files"` -} - -type durationItem struct { - File string `json:"file"` - DurationSeconds float64 `json:"durationSeconds,omitempty"` - Error string `json:"error,omitempty"` -} - func removeJobsByOutputBasename(file string) { file = strings.TrimSpace(file) if file == "" { @@ -8166,1328 +4508,6 @@ func renameJobsOutputBasename(oldFile, newFile string) { } } -// --- Undo Token (Trash Restore) --- - -type undoDeleteToken struct { - Trash string `json:"trash"` // basename in .trash - RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model" - File string `json:"file"` // original basename, z.B. "HOT xyz.mp4" -} - -func encodeUndoDeleteToken(t undoDeleteToken) (string, error) { - b, err := json.Marshal(t) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) { - var t undoDeleteToken - b, err := base64.RawURLEncoding.DecodeString(raw) - if err != nil { - return t, err - } - if err := json.Unmarshal(b, &t); err != nil { - return t, err - } - return t, nil -} - -func isSafeRelDir(rel string) bool { - rel = strings.TrimSpace(rel) - if rel == "" { - return false - } - // normalize to slash for validation - rel = filepath.ToSlash(rel) - if strings.HasPrefix(rel, "/") { - return false - } - clean := path.Clean(rel) // path.Clean => forward slashes - if clean == "." { - return true - } - if strings.HasPrefix(clean, "../") || clean == ".." { - return false - } - // prevent weird traversal - if strings.Contains(clean, `\`) { - return false - } - return true -} - -func isSafeBasename(name string) bool { - name = strings.TrimSpace(name) - if name == "" { - return false - } - if strings.Contains(name, "/") || strings.Contains(name, "\\") { - return false - } - return filepath.Base(name) == name -} - -func recordDeleteVideo(w http.ResponseWriter, r *http.Request) { - // Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE - if r.Method != http.MethodPost && r.Method != http.MethodDelete { - http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed) - return - } - - raw := strings.TrimSpace(r.URL.Query().Get("file")) - if raw == "" { - http.Error(w, "file fehlt", http.StatusBadRequest) - return - } - - // sicher decoden - file, err := url.QueryUnescape(raw) - if err != nil { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - file = strings.TrimSpace(file) - - // ✅ nur Basename erlauben (keine Unterordner, kein Traversal) - if file == "" || - strings.Contains(file, "/") || - strings.Contains(file, "\\") || - filepath.Base(file) != file { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - - ext := strings.ToLower(filepath.Ext(file)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(doneAbs) == "" { - http.Error(w, "doneDir ist leer", http.StatusBadRequest) - return - } - - // ✅ done + done/ sowie keep + keep/ - target, from, fi, err := resolveDoneFileByName(doneAbs, file) - if err != nil { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - if fi != nil && fi.IsDir() { - http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) - return - } - - // ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben - trashDir := filepath.Join(doneAbs, ".trash") - - // ✅ Wenn im Single-slot Trash schon was liegt: ID merken, - // aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde. - prevBase := "" - prevCanonical := "" - - if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 { - var prev struct { - File string `json:"file"` - } - if err := json.Unmarshal(b, &prev); err == nil { - prevFile := strings.TrimSpace(prev.File) - if prevFile != "" { - prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile)) - prevCanonical = stripHotPrefix(prevBase) - } - } - } - - // Trash komplett leeren => ältere Undos sind automatisch ungültig - // ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen. - if err := os.RemoveAll(trashDir); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict) - return - } - http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta// entfernen. - if prevCanonical != "" { - removeGeneratedForID(prevCanonical) - - // Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind - if prevBase != "" && prevBase != prevCanonical { - removeGeneratedForID(prevBase) - } - } - - if err := os.MkdirAll(trashDir, 0o755); err != nil { - http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // Original-Dir relativ zu doneAbs merken (inkl. keep/ oder ) - origDir := filepath.Dir(target) - relDir, err := filepath.Rel(doneAbs, origDir) - if err != nil { - http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - relDir = filepath.ToSlash(relDir) - if strings.TrimSpace(relDir) == "" { - relDir = "." - } - - // ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können - tok, err := encodeUndoDeleteToken(undoDeleteToken{ - Trash: "", // setzen wir gleich (trashName) - RelDir: relDir, // hast du oben schon berechnet - File: file, - }) - if err != nil { - http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - trashName := tok + "__" + file // eindeutig + Token sichtbar in filename - trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_") - dst := filepath.Join(trashDir, trashName) - - // ✅ Token muss auch wissen, wie der Trashname heißt - // (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json) - - // move mit retry (Windows file-lock robust) - if err := renameWithRetry(target, dst); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict) - return - } - http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // ✅ last.json schreiben: nur dieser Token ist gültig - type trashMeta struct { - Token string `json:"token"` // exakt der Query-Token (encoded) - TrashName string `json:"trashName"` // Dateiname in .trash - RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs - File string `json:"file"` // originaler Name (basename) - DeletedAt int64 `json:"deletedAt"` - } - - meta := trashMeta{ - Token: tok, - TrashName: trashName, - RelDir: relDir, - File: file, - DeletedAt: time.Now().Unix(), - } - - b, _ := json.Marshal(meta) - _ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644) - - // Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich) - purgeDurationCacheForPath(target) - removeJobsByOutputBasename(file) - - notifyDoneChanged() - notifyJobsChanged() - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "file": file, - "from": from, // "done" | "keep" - "undoToken": tok, // ✅ für Undo - "trashed": true, - }) - -} - -func recordRestoreVideo(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) - return - } - - raw := strings.TrimSpace(r.URL.Query().Get("token")) - if raw == "" { - http.Error(w, "token fehlt", http.StatusBadRequest) - return - } - - // ✅ doneDir auflösen - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(doneAbs) == "" { - http.Error(w, "doneDir ist leer", http.StatusBadRequest) - return - } - - // ✅ Single-slot: last.json lesen und Token strikt validieren - trashDir := filepath.Join(doneAbs, ".trash") - metaPath := filepath.Join(trashDir, "last.json") - - b, err := os.ReadFile(metaPath) - if err != nil { - http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound) - return - } - - var meta struct { - Token string `json:"token"` - TrashName string `json:"trashName"` - RelDir string `json:"relDir"` - File string `json:"file"` - DeletedAt int64 `json:"deletedAt"` - } - if err := json.Unmarshal(b, &meta); err != nil { - http.Error(w, "trash meta ungültig", http.StatusInternalServerError) - return - } - if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" { - http.Error(w, "trash meta unvollständig", http.StatusInternalServerError) - return - } - - // ✅ Nur der letzte Token ist gültig - if raw != meta.Token { - http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound) - return - } - - // ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json) - tok, err := decodeUndoDeleteToken(raw) - if err != nil { - http.Error(w, "token ungültig", http.StatusBadRequest) - return - } - - // ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden - if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) { - http.Error(w, "token inhalt ungültig", http.StatusBadRequest) - return - } - - // ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll) - if tok.File != meta.File || tok.RelDir != meta.RelDir { - http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound) - return - } - - ext := strings.ToLower(filepath.Ext(meta.File)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - // Quelle: exakt die zuletzt gelöschte Datei - src := filepath.Join(trashDir, meta.TrashName) - - // Zielordner rekonstruieren (relativ zu doneAbs) - rel := meta.RelDir - if rel == "." { - rel = "" - } - dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel)) - dstDirClean := filepath.Clean(dstDir) - doneClean := filepath.Clean(doneAbs) - - // safety: dstDir muss innerhalb doneAbs liegen - if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) && - !strings.EqualFold(dstDirClean, doneClean) { - http.Error(w, "zielpfad ungültig", http.StatusBadRequest) - return - } - - if err := os.MkdirAll(dstDirClean, 0o755); err != nil { - http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - dst, err := uniqueDestPath(dstDirClean, meta.File) - if err != nil { - http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) - return - } - - if err := renameWithRetry(src, dst); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) - return - } - http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // ✅ Optional: Trash leeren, damit Token danach definitiv tot ist - _ = os.RemoveAll(trashDir) - _ = os.MkdirAll(trashDir, 0o755) - - notifyDoneChanged() - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "file": meta.File, - "restoredFile": filepath.Base(dst), // kann __dup enthalten - }) -} - -func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) - return - } - - raw := strings.TrimSpace(r.URL.Query().Get("file")) - if raw == "" { - http.Error(w, "file fehlt", http.StatusBadRequest) - return - } - - file, err := url.QueryUnescape(raw) - if err != nil { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - file = strings.TrimSpace(file) - - if !isSafeBasename(file) { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - - ext := strings.ToLower(filepath.Ext(file)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(doneAbs) == "" { - http.Error(w, "doneDir ist leer", http.StatusBadRequest) - return - } - - // Quelle muss in keep (root oder keep/) liegen - src, from, fi, err := resolveDoneFileByName(doneAbs, file) - if err != nil { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - if from != "keep" { - http.Error(w, "datei ist nicht in keep", http.StatusConflict) - return - } - if fi != nil && fi.IsDir() { - http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) - return - } - - // Ziel: zurück nach done/ (flach, ohne model-subdirs) - dstDir := doneAbs - - if err := os.MkdirAll(dstDir, 0o755); err != nil { - http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - dst, err := uniqueDestPath(dstDir, file) - if err != nil { - http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) - return - } - - if err := renameWithRetry(src, dst); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) - return - } - http.Error(w, "unkeep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - notifyDoneChanged() - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "oldFile": file, - "newFile": filepath.Base(dst), - }) -} - -func serveVideoFile(w http.ResponseWriter, r *http.Request, path string) { - f, err := openForReadShareDelete(path) - if err != nil { - http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil || fi.IsDir() || fi.Size() == 0 { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - - w.Header().Set("Cache-Control", "no-store") - w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("X-Content-Type-Options", "nosniff") - - ext := strings.ToLower(filepath.Ext(path)) - switch ext { - case ".ts": - w.Header().Set("Content-Type", "video/mp2t") - default: - w.Header().Set("Content-Type", "video/mp4") - } - - // ServeContent unterstützt Range Requests (wichtig für Video) - http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f) -} - -func sniffVideoKind(path string) (string, error) { - f, err := openForReadShareDelete(path) - if err != nil { - return "", err - } - defer f.Close() - - buf := make([]byte, 64) - n, _ := f.Read(buf) - buf = buf[:n] - - // HTML? - trim := bytes.TrimSpace(buf) - if len(trim) >= 1 && trim[0] == '<' { - return "html", nil - } - - // MPEG-TS: 0x47 sync byte - if len(buf) >= 1 && buf[0] == 0x47 { - return "ts", nil - } - - // MP4: "ftyp" typischerweise bei Offset 4 - if len(buf) >= 8 && string(buf[4:8]) == "ftyp" { - return "mp4", nil - } - - return "unknown", nil -} - -func recordKeepVideo(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) - return - } - - raw := strings.TrimSpace(r.URL.Query().Get("file")) - if raw == "" { - http.Error(w, "file fehlt", http.StatusBadRequest) - return - } - - file, err := url.QueryUnescape(raw) - if err != nil { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - file = strings.TrimSpace(file) - - // ✅ nur Basename erlauben - if file == "" || - strings.Contains(file, "/") || - strings.Contains(file, "\\") || - filepath.Base(file) != file { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - - ext := strings.ToLower(filepath.Ext(file)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(doneAbs) == "" { - http.Error(w, "doneDir ist leer", http.StatusBadRequest) - return - } - - keepRoot := filepath.Join(doneAbs, "keep") - if err := os.MkdirAll(keepRoot, 0o755); err != nil { - http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // ✅ 0) Wenn schon irgendwo in keep (root oder keep/) existiert: - // - wenn im keep-root: jetzt nach keep// nachziehen - if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok { - // p liegt entweder in keepRoot oder keepRoot/ - if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) { - // im Root => versuchen einzusortieren - modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */) - modelKey = sanitizeModelKey(modelKey) - - // Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename: - if modelKey == "" { - stem := strings.TrimSuffix(file, filepath.Ext(file)) - modelKey = sanitizeModelKey(modelNameFromFilename(stem)) - } - - if modelKey != "" { - dstDir := filepath.Join(keepRoot, modelKey) - if err := os.MkdirAll(dstDir, 0o755); err == nil { - dst, derr := uniqueDestPath(dstDir, file) - if derr == nil { - // best-effort move - _ = renameWithRetry(p, dst) - } - } - } - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "file": file, - "alreadyKept": true, - }) - return - } - - // ✅ 1) Quelle in done (root oder done/), aber NICHT aus keep - src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep") - if !ok { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - if fi == nil || fi.IsDir() { - http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) - return - } - - // ✅ 2) Ziel: keep//file - modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs) - dstDir := keepRoot - if modelKey != "" { - dstDir = filepath.Join(keepRoot, modelKey) - } - - if err := os.MkdirAll(dstDir, 0o755); err != nil { - http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - dst, err := uniqueDestPath(dstDir, file) - if err != nil { - http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) - return - } - - // rename mit retry (Windows file-lock) - if err := renameWithRetry(src, dst); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) - return - } - http.Error(w, "keep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - notifyDoneChanged() - - // ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ... - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "file": file, - "alreadyKept": false, - "newFile": filepath.Base(dst), // ✅ NEU - }) - -} - -func recordToggleHot(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST", http.StatusMethodNotAllowed) - return - } - - raw := strings.TrimSpace(r.URL.Query().Get("file")) - if raw == "" { - http.Error(w, "file fehlt", http.StatusBadRequest) - return - } - - file, err := url.QueryUnescape(raw) - if err != nil { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - file = strings.TrimSpace(file) - - // ✅ nur Basename erlauben - if file == "" || - strings.Contains(file, "/") || - strings.Contains(file, "\\") || - filepath.Base(file) != file { - http.Error(w, "ungültiger file", http.StatusBadRequest) - return - } - - ext := strings.ToLower(filepath.Ext(file)) - if ext != ".mp4" && ext != ".ts" { - http.Error(w, "nicht erlaubt", http.StatusForbidden) - return - } - - s := getSettings() - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil { - http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - if strings.TrimSpace(doneAbs) == "" { - http.Error(w, "doneDir ist leer", http.StatusBadRequest) - return - } - - // ✅ Quelle kann in done/, done/, keep/, keep/ liegen - src, from, fi, err := resolveDoneFileByName(doneAbs, file) - if err != nil { - http.Error(w, "datei nicht gefunden", http.StatusNotFound) - return - } - if fi != nil && fi.IsDir() { - http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) - return - } - - srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner - - // toggle: HOT Prefix - newFile := file - if strings.HasPrefix(file, "HOT ") { - newFile = strings.TrimPrefix(file, "HOT ") - } else { - newFile = "HOT " + file - } - - dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep) - if _, err := os.Stat(dst); err == nil { - http.Error(w, "ziel existiert bereits", http.StatusConflict) - return - } else if !os.IsNotExist(err) { - http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - if err := renameWithRetry(src, dst); err != nil { - if runtime.GOOS == "windows" && isSharingViolation(err) { - http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict) - return - } - http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) - return - } - - // ✅ KEIN generated-rename! - // Assets bleiben canonical (ohne HOT) - canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file))) - - renameJobsOutputBasename(file, newFile) - - notifyDoneChanged() - notifyJobsChanged() - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Cache-Control", "no-store") - _ = json.NewEncoder(w).Encode(map[string]any{ - "ok": true, - "oldFile": file, - "newFile": newFile, - "canonicalID": canonicalID, - "from": from, // "done" | "keep" - }) -} - -func maybeRemuxTS(path string) (string, error) { - path = strings.TrimSpace(path) - if path == "" { - return "", nil - } - - if !strings.EqualFold(filepath.Ext(path), ".ts") { - return "", nil - } - - mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4" - - // remux (ohne neu encoden) - if err := remuxTSToMP4(path, mp4); err != nil { - return "", err - } - - _ = os.Remove(path) // TS entfernen, wenn MP4 ok - return mp4, nil -} - -func maybeRemuxTSForJob(job *RecordJob, path string) (string, error) { - path = strings.TrimSpace(path) - if path == "" { - return "", nil - } - - if !strings.EqualFold(filepath.Ext(path), ".ts") { - return "", nil - } - - mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4" - - // input size für fallback - var inSize int64 - if fi, err := os.Stat(path); err == nil && !fi.IsDir() { - inSize = fi.Size() - } - - // duration (für sauberen progress) - var durSec float64 - { - durCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - durSec, _ = durationSecondsCached(durCtx, path) - cancel() - } - - const base = 10 - const span = 60 // 10..69 (70 startet "moving") - - lastProgress := base - lastTick := time.Now().Add(-time.Second) - - onRatio := func(r float64) { - if r < 0 { - r = 0 - } - if r > 1 { - r = 1 - } - p := base + int(r*float64(span)) - if p >= 70 { - p = 69 - } - - if p <= lastProgress { - return - } - // leicht throttlen - if time.Since(lastTick) < 150*time.Millisecond && p < 79 { - return - } - lastProgress = p - lastTick = time.Now() - setJobPhase(job, "remuxing", p) - } - - remuxCtx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer cancel() - - if err := remuxTSToMP4WithProgress(remuxCtx, path, mp4, durSec, inSize, onRatio); err != nil { - return "", err - } - - _ = os.Remove(path) // TS entfernen, wenn MP4 ok - setJobPhase(job, "remuxing", 69) // ✅ Remux finished (nie rückwärts) - return mp4, nil - -} - -func moveFile(src, dst string) error { - // zuerst Rename (schnell) - if err := os.Rename(src, dst); err == nil { - return nil - } else { - // Fallback: Copy+Remove (z.B. bei EXDEV) - in, err2 := os.Open(src) - if err2 != nil { - return err - } - defer in.Close() - - out, err2 := os.Create(dst) - if err2 != nil { - return err - } - if _, err2 := io.Copy(out, in); err2 != nil { - out.Close() - return err2 - } - if err2 := out.Close(); err2 != nil { - return err2 - } - return os.Remove(src) - } -} - -const windowsSharingViolation syscall.Errno = 32 // ERROR_SHARING_VIOLATION - -func isSharingViolation(err error) bool { - if runtime.GOOS != "windows" { - return false - } - // Windows: ERROR_SHARING_VIOLATION = 32, ERROR_LOCK_VIOLATION = 33 - var pe *os.PathError - if errors.As(err, &pe) { - if errno, ok := pe.Err.(syscall.Errno); ok { - return errno == syscall.Errno(32) || errno == syscall.Errno(33) - } - } - // Fallback über Text - s := strings.ToLower(err.Error()) - return strings.Contains(s, "sharing violation") || - strings.Contains(s, "used by another process") || - strings.Contains(s, "wird von einem anderen prozess verwendet") -} - -func removeWithRetry(path string) error { - var err error - for i := 0; i < 40; i++ { // ~4s bei 100ms - err = os.Remove(path) - if err == nil { - return nil - } - if isSharingViolation(err) { - time.Sleep(100 * time.Millisecond) - continue - } - return err - } - return err -} - -func renameWithRetry(oldPath, newPath string) error { - var err error - for i := 0; i < 40; i++ { - err = os.Rename(oldPath, newPath) - if err == nil { - return nil - } - if isSharingViolation(err) { - time.Sleep(100 * time.Millisecond) - continue - } - return err - } - return err -} - -func moveToDoneDir(src string) (string, error) { - src = strings.TrimSpace(src) - if src == "" { - return "", fmt.Errorf("src empty") - } - - s := getSettings() - - doneAbs, err := resolvePathRelativeToApp(s.DoneDir) - if err != nil || strings.TrimSpace(doneAbs) == "" { - // fallback - doneAbs = strings.TrimSpace(s.DoneDir) - } - if strings.TrimSpace(doneAbs) == "" { - return "", fmt.Errorf("doneDir empty") - } - - // Quelle normalisieren/abs machen (best effort) - srcAbs := filepath.Clean(src) - if !filepath.IsAbs(srcAbs) { - if abs, rerr := resolvePathRelativeToApp(srcAbs); rerr == nil && strings.TrimSpace(abs) != "" { - srcAbs = abs - } - } - - fi, err := os.Stat(srcAbs) - if err != nil || fi.IsDir() { - return "", fmt.Errorf("src not found: %v", err) - } - - file := filepath.Base(srcAbs) - - // Zielordner: immer done/ (keine model-subdirs) - dstDir := doneAbs - - if err := os.MkdirAll(dstDir, 0o755); err != nil { - return "", err - } - - // Bei Kollisionen eindeutigen Namen wählen - dst, err := uniqueDestPath(dstDir, file) - if err != nil { - return "", err - } - - // Robust verschieben (Windows / Locks / Cross-device) - if err := renameWithRetry(srcAbs, dst); err != nil { - return "", err - } - - // Duration-Cache invalidieren (du nutzt das ja) - purgeDurationCacheForPath(srcAbs) - - return dst, nil -} - -func recordStatus(w http.ResponseWriter, r *http.Request) { - id := r.URL.Query().Get("id") - if id == "" { - http.Error(w, "id fehlt", http.StatusBadRequest) - return - } - - jobsMu.Lock() - job, ok := jobs[id] - jobsMu.Unlock() - - if !ok { - http.Error(w, "job nicht gefunden", http.StatusNotFound) - return - } - - json.NewEncoder(w).Encode(job) -} - -func recordStop(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Nur POST", http.StatusMethodNotAllowed) - return - } - - id := r.URL.Query().Get("id") - - jobsMu.Lock() - job, ok := jobs[id] - jobsMu.Unlock() - if !ok { - http.Error(w, "job nicht gefunden", http.StatusNotFound) - return - } - - stopJobsInternal([]*RecordJob{job}) - - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(job) -} - -// --- DVR-ähnlicher Recorder-Ablauf --- -// Entspricht grob dem RecordStream aus dem Channel-Snippet: -func RecordStream( - ctx context.Context, - hc *HTTPClient, - domain string, - username string, - outputPath string, - httpCookie string, - job *RecordJob, -) error { - // 1) Seite laden - // Domain sauber zusammenbauen (mit/ohne Slash) - base := strings.TrimRight(domain, "/") - pageURL := base + "/" + username - - body, err := hc.FetchPage(ctx, pageURL, httpCookie) - if err != nil { - return fmt.Errorf("seite laden: %w", err) - } - - // 2) HLS-URL aus roomDossier extrahieren (wie DVR.ParseStream) - hlsURL, err := ParseStream(body) - if err != nil { - return fmt.Errorf("stream-parsing: %w", err) - } - - // 3) Playlist holen (wie stream.GetPlaylist im DVR) - playlist, err := FetchPlaylist(ctx, hc, hlsURL, httpCookie) - if err != nil { - return fmt.Errorf("playlist abrufen: %w", err) - } - - // ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar) - if job != nil { - _ = publishJob(job.ID) - } - - if job != nil && strings.TrimSpace(job.PreviewDir) == "" { - assetID := assetIDForJob(job) - if strings.TrimSpace(assetID) == "" { - assetID = job.ID - } - previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID) - - jobsMu.Lock() - job.PreviewDir = previewDir - jobsMu.Unlock() - - if err := startPreviewHLS(ctx, job, playlist.PlaylistURL, previewDir, httpCookie, hc.userAgent); err != nil { - fmt.Println("⚠️ preview start fehlgeschlagen:", err) - } - } - - // 4) Datei öffnen - file, err := os.Create(outputPath) - if err != nil { - return fmt.Errorf("datei erstellen: %w", err) - } - if job != nil { - _ = publishJob(job.ID) - } - - defer func() { - _ = file.Close() - }() - - // live size tracking (für UI) - var written int64 - var lastPush time.Time - var lastBytes int64 - - // 5) Segmente „watchen“ – analog zu WatchSegments + HandleSegment im DVR - err = playlist.WatchSegments(ctx, hc, httpCookie, func(b []byte, duration float64) error { - // Hier wäre im DVR ch.HandleSegment – bei dir einfach in eine Datei schreiben - if _, err := file.Write(b); err != nil { - return fmt.Errorf("schreibe segment: %w", err) - } - - // ✅ live size (UI) – throttled - written += int64(len(b)) - if job != nil { - now := time.Now() - if lastPush.IsZero() || now.Sub(lastPush) >= 750*time.Millisecond || (written-lastBytes) >= 2*1024*1024 { - jobsMu.Lock() - job.SizeBytes = written - jobsMu.Unlock() - notifyJobsChanged() - - lastPush = now - lastBytes = written - } - } - - // Könntest hier z.B. auch Dauer/Größe loggen, wenn du möchtest - _ = duration // aktuell unbenutzt - return nil - }) - if err != nil { - return fmt.Errorf("watch segments: %w", err) - } - - return nil -} - -// RecordStreamMFC nimmt vorerst die URL 1:1 und ruft ffmpeg direkt darauf auf. -// In der Praxis musst du hier meist erst eine HLS-URL aus dem HTML extrahieren. -// RecordStreamMFC ist jetzt nur noch ein Wrapper um den bewährten MFC-Flow (runMFC). -func RecordStreamMFC( - ctx context.Context, - hc *HTTPClient, - username string, - outputPath string, - job *RecordJob, -) error { - mfc := NewMyFreeCams(username) - - // ✅ Statt sofort zu failen: kurz auf PUBLIC warten - const waitPublicMax = 2 * time.Minute - deadline := time.Now().Add(waitPublicMax) - - var lastSt *Status - - for { - // Context cancel / stop - if err := ctx.Err(); err != nil { - return err - } - - st, err := mfc.GetStatus() - if err == nil { - tmp := st - lastSt = &tmp - - if st == StatusPublic { - break - } - } - - if time.Now().After(deadline) { - if lastSt == nil { - return fmt.Errorf("mfc: stream wurde nicht public innerhalb %s", waitPublicMax) - } - return fmt.Errorf("mfc: stream ist nicht public nach %s (letzter Status: %s)", waitPublicMax, *lastSt) - - } - - time.Sleep(5 * time.Second) - } - - // ✅ erst jetzt die Video URL holen (weil public) - m3u8URL, err := mfc.GetVideoURL(false) - if err != nil { - return fmt.Errorf("mfc get video url: %w", err) - } - if strings.TrimSpace(m3u8URL) == "" { - return fmt.Errorf("mfc: keine m3u8 URL gefunden") - } - - // ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar) - if job != nil { - _ = publishJob(job.ID) - } - - // ✅ Preview starten - if job != nil && job.PreviewDir == "" { - assetID := assetIDForJob(job) - if strings.TrimSpace(assetID) == "" { - assetID = job.ID - } - previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID) - - job.PreviewDir = previewDir - - if err := startPreviewHLS(ctx, job, m3u8URL, previewDir, "", hc.userAgent); err != nil { - fmt.Println("⚠️ preview start fehlgeschlagen:", err) - job.PreviewDir = "" // rollback - } - } - - // Aufnahme starten - return handleM3U8Mode(ctx, m3u8URL, outputPath, job) -} - -func detectProvider(raw string) string { - s := strings.ToLower(raw) - - if strings.Contains(s, "chaturbate.com") { - return "chaturbate" - } - if strings.Contains(s, "myfreecams.com") { - return "mfc" - } - return "unknown" -} - -// --- helper --- -func extractUsername(input string) string { - input = strings.TrimSpace(input) - input = strings.TrimPrefix(input, "https://") - input = strings.TrimPrefix(input, "http://") - input = strings.TrimPrefix(input, "www.") - if strings.HasPrefix(input, "chaturbate.com/") { - input = strings.TrimPrefix(input, "chaturbate.com/") - } - - // alles nach dem ersten Slash abschneiden (Pfadteile, /, etc.) - if idx := strings.IndexAny(input, "/?#"); idx != -1 { - input = input[:idx] - } - - // zur Sicherheit evtl. übrig gebliebene Slash/Backslash trimmen - return strings.Trim(input, "/\\") -} - -// Cookie-Hilfsfunktion (wie ParseCookies + AddCookie im DVR) -func addCookiesFromString(req *http.Request, cookieStr string) { - if cookieStr == "" { - return - } - pairs := strings.Split(cookieStr, ";") - for _, pair := range pairs { - parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) - if len(parts) != 2 { - continue - } - name := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - if name == "" { - continue - } - req.AddCookie(&http.Cookie{ - Name: name, - Value: value, - }) - } -} - -// ParseStream entspricht der DVR-Variante (roomDossier → hls_source) -func ParseStream(html string) (string, error) { - matches := roomDossierRegexp.FindStringSubmatch(html) - if len(matches) == 0 { - return "", errors.New("room dossier nicht gefunden") - } - - // DVR-Style Unicode-Decode - decoded, err := strconv.Unquote( - strings.Replace(strconv.Quote(matches[1]), `\\u`, `\u`, -1), - ) - if err != nil { - return "", fmt.Errorf("Unicode-decode failed: %w", err) - } - - var rd struct { - HLSSource string `json:"hls_source"` - } - if err := json.Unmarshal([]byte(decoded), &rd); err != nil { - return "", fmt.Errorf("JSON-parse failed: %w", err) - } - if rd.HLSSource == "" { - return "", errors.New("kein HLS-Quell-URL im JSON") - } - return rd.HLSSource, nil -} - -// --- Playlist/WatchSegments wie gehabt --- -type Playlist struct { - PlaylistURL string - RootURL string - Resolution int - Framerate int -} - -type Resolution struct { - Framerate map[int]string - Width int -} - // nimmt jetzt *HTTPClient entgegen func FetchPlaylist(ctx context.Context, hc *HTTPClient, hlsSource, httpCookie string) (*Playlist, error) { if hlsSource == "" { @@ -9555,450 +4575,6 @@ func FetchPlaylist(ctx context.Context, hc *HTTPClient, hlsSource, httpCookie st }, nil } -// nutzt ebenfalls *HTTPClient -func (p *Playlist) WatchSegments( - ctx context.Context, - hc *HTTPClient, - httpCookie string, - handler func([]byte, float64) error, -) error { - var lastSeq int64 = -1 - emptyRounds := 0 - const maxEmptyRounds = 60 // statt 5 - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Playlist holen - req, err := hc.NewRequest(ctx, http.MethodGet, p.PlaylistURL, httpCookie) - if err != nil { - return fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err) - } - - resp, err := hc.client.Do(req) - if err != nil { - emptyRounds++ - if emptyRounds >= maxEmptyRounds { - return errors.New("❌ Playlist nicht mehr erreichbar – Stream vermutlich offline") - } - time.Sleep(2 * time.Second) - continue - } - - playlist, listType, err := m3u8.DecodeFrom(resp.Body, true) - resp.Body.Close() - - if err != nil || listType != m3u8.MEDIA { - emptyRounds++ - if emptyRounds >= maxEmptyRounds { - return errors.New("❌ Fehlerhafte Playlist – möglicherweise offline") - } - time.Sleep(2 * time.Second) - continue - } - - media := playlist.(*m3u8.MediaPlaylist) - newSegment := false - - for _, segment := range media.Segments { - if segment == nil { - continue - } - if int64(segment.SeqId) <= lastSeq { - continue - } - - lastSeq = int64(segment.SeqId) - newSegment = true - - segmentURL := p.RootURL + segment.URI - - segReq, err := hc.NewRequest(ctx, http.MethodGet, segmentURL, httpCookie) - if err != nil { - continue - } - - segResp, err := hc.client.Do(segReq) - if err != nil { - continue - } - - data, err := io.ReadAll(segResp.Body) - segResp.Body.Close() - if err != nil || len(data) == 0 { - continue - } - - if err := handler(data, segment.Duration); err != nil { - return err - } - } - - if newSegment { - emptyRounds = 0 - } else { - emptyRounds++ - if emptyRounds >= maxEmptyRounds { - return errors.New("🛑 Keine neuen HLS-Segmente empfangen – Stream vermutlich beendet oder offline.") - } - } - - time.Sleep(1 * time.Second) - } -} - -/* ─────────────────────────────── - MyFreeCams (übernommener Flow) - ─────────────────────────────── */ - -type MyFreeCams struct { - Username string - Attrs map[string]string - VideoURL string -} - -func NewMyFreeCams(username string) *MyFreeCams { - return &MyFreeCams{ - Username: username, - Attrs: map[string]string{}, - } -} - -func (m *MyFreeCams) GetWebsiteURL() string { - return "https://www.myfreecams.com/#" + m.Username -} - -func (m *MyFreeCams) GetVideoURL(refresh bool) (string, error) { - if !refresh && m.VideoURL != "" { - return m.VideoURL, nil - } - - // Prüfen, ob alle benötigten Attribute vorhanden sind - if _, ok := m.Attrs["data-cam-preview-model-id-value"]; !ok { - return "", nil - } - sid := m.Attrs["data-cam-preview-server-id-value"] - midBase := m.Attrs["data-cam-preview-model-id-value"] - isWzobs := strings.ToLower(m.Attrs["data-cam-preview-is-wzobs-value"]) == "true" - - midInt, err := strconv.Atoi(midBase) - if err != nil { - return "", fmt.Errorf("model-id parse error: %w", err) - } - mid := 100000000 + midInt - a := "" - if isWzobs { - a = "a_" - } - - playlistURL := fmt.Sprintf( - "https://previews.myfreecams.com/hls/NxServer/%s/ngrp:mfc_%s%d.f4v_mobile_mhp1080_previewurl/playlist.m3u8", - sid, a, mid, - ) - - // Validieren (HTTP 200) & ggf. auf gewünschte Auflösung verlinken - u, err := getWantedResolutionPlaylist(playlistURL) - if err != nil { - return "", err - } - m.VideoURL = u - return m.VideoURL, nil -} - -func (m *MyFreeCams) GetStatus() (Status, error) { - // 1) share-Seite prüfen (existiert/nicht existiert) - shareURL := "https://share.myfreecams.com/" + m.Username - resp, err := http.Get(shareURL) - if err != nil { - return StatusUnknown, err - } - defer resp.Body.Close() - - if resp.StatusCode == 404 { - return StatusNotExist, nil - } - if resp.StatusCode != 200 { - return StatusUnknown, fmt.Errorf("HTTP %d", resp.StatusCode) - } - - // wir brauchen sowohl Bytes (für Suche) als auch Reader (für HTML) - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return StatusUnknown, err - } - - // 2) „tracking.php?“ suchen und prüfen, ob model_id vorhanden ist - start := bytes.Index(bodyBytes, []byte("https://www.myfreecams.com/php/tracking.php?")) - if start == -1 { - // ohne tracking Parameter -> behandeln wie nicht existent - return StatusNotExist, nil - } - end := bytes.IndexByte(bodyBytes[start:], '"') - if end == -1 { - return StatusUnknown, errors.New("tracking url parse failed") - } - raw := string(bodyBytes[start : start+end]) - u, err := url.Parse(raw) - if err != nil { - return StatusUnknown, fmt.Errorf("tracking url invalid: %w", err) - } - qs := u.Query() - if qs.Get("model_id") == "" { - return StatusNotExist, nil - } - - // 3) HTML parsen und
Attribute auslesen - doc, err := goquery.NewDocumentFromReader(bytes.NewReader(bodyBytes)) - if err != nil { - return StatusUnknown, err - } - - params := doc.Find(".campreview").First() - if params.Length() == 0 { - // keine campreview -> offline - return StatusOffline, nil - } - - attrs := map[string]string{} - params.Each(func(_ int, s *goquery.Selection) { - for _, a := range []string{ - "data-cam-preview-server-id-value", - "data-cam-preview-model-id-value", - "data-cam-preview-is-wzobs-value", - } { - if v, ok := s.Attr(a); ok { - attrs[a] = v - } - } - }) - m.Attrs = attrs - - // 4) Versuchen, VideoURL (Preview-HLS) zu ermitteln - uStr, err := m.GetVideoURL(true) - if err != nil { - return StatusUnknown, err - } - if uStr != "" { - return StatusPublic, nil - } - // campreview vorhanden, aber keine playable url -> „PRIVATE“ - return StatusPrivate, nil -} - -func runMFC(ctx context.Context, username string, outArg string) error { - mfc := NewMyFreeCams(username) - - st, err := mfc.GetStatus() - if err != nil { - return err - } - if st != StatusPublic { - return fmt.Errorf("Stream ist nicht öffentlich (Status: %s)", st) - } - - m3u8URL, err := mfc.GetVideoURL(false) - if err != nil { - return err - } - if m3u8URL == "" { - return errors.New("keine m3u8 URL gefunden") - } - - return handleM3U8Mode(ctx, m3u8URL, outArg, nil) -} - -/* ─────────────────────────────── - Gemeinsame HLS/M3U8-Helper (MFC) - ─────────────────────────────── */ - -func getWantedResolutionPlaylist(playlistURL string) (string, error) { - // Holt eine URL; wenn MASTER, wähle beste Variante; wenn MEDIA, gib die URL zurück. - resp, err := http.Get(playlistURL) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return "", fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode) - } - - playlist, listType, err := m3u8.DecodeFrom(resp.Body, true) - if err != nil { - return "", fmt.Errorf("m3u8 parse: %w", err) - } - if listType == m3u8.MEDIA { - return playlistURL, nil - } - - master := playlist.(*m3u8.MasterPlaylist) - var bestURI string - var bestWidth int - var bestFramerate float64 - - for _, v := range master.Variants { - if v == nil { - continue - } - // Resolution kommt als "WxH" – wir nutzen die Höhe als Vergleichswert. - w := 0 - if v.Resolution != "" { - parts := strings.Split(v.Resolution, "x") - if len(parts) == 2 { - if ww, err := strconv.Atoi(parts[1]); err == nil { - w = ww - } - } - } - fr := 30.0 - if v.FrameRate > 0 { - fr = v.FrameRate - } else if strings.Contains(v.Name, "FPS:60") { - fr = 60 - } - if w > bestWidth || (w == bestWidth && fr > bestFramerate) { - bestWidth = w - bestFramerate = fr - bestURI = v.URI - } - } - if bestURI == "" { - return "", errors.New("Master-Playlist ohne gültige Varianten") - } - - // Absolutieren - root := playlistURL[:strings.LastIndex(playlistURL, "/")+1] - if strings.HasPrefix(bestURI, "http://") || strings.HasPrefix(bestURI, "https://") { - return bestURI, nil - } - return root + bestURI, nil -} - -func handleM3U8Mode(ctx context.Context, m3u8URL, outFile string, job *RecordJob) error { - // Validierung - u, err := url.Parse(m3u8URL) - if err != nil || (u.Scheme != "http" && u.Scheme != "https") { - return fmt.Errorf("ungültige URL: %q", m3u8URL) - } - - // HTTP-Check MIT Context - req, err := http.NewRequestWithContext(ctx, "GET", m3u8URL, nil) - if err != nil { - return err - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - io.Copy(io.Discard, resp.Body) - resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode) - } - - if strings.TrimSpace(outFile) == "" { - return errors.New("output file path leer") - } - - // ffmpeg mit Context (STOP FUNKTIONIERT HIER!) - cmd := exec.CommandContext( - ctx, - ffmpegPath, - "-y", - "-hide_banner", - "-nostats", - "-loglevel", "warning", - "-i", m3u8URL, - "-c", "copy", - outFile, - ) - - var stderr bytes.Buffer - cmd.Stdout = io.Discard - cmd.Stderr = &stderr - - // ✅ live size polling während ffmpeg läuft - stopStat := make(chan struct{}) - - if job != nil { - go func() { - t := time.NewTicker(1 * time.Second) - defer t.Stop() - - var last int64 - for { - select { - case <-ctx.Done(): - return - case <-stopStat: - return - case <-t.C: - fi, err := os.Stat(outFile) - if err != nil { - continue - } - sz := fi.Size() - if sz > 0 && sz != last { - jobsMu.Lock() - job.SizeBytes = sz - jobsMu.Unlock() - notifyJobsChanged() - last = sz - } - } - } - }() - } - - // ✅ WICHTIG: ffmpeg wirklich laufen lassen - err = cmd.Run() - - close(stopStat) - - if err != nil { - msg := strings.TrimSpace(stderr.String()) - if msg != "" { - return fmt.Errorf("ffmpeg m3u8 failed: %w: %s", err, msg) - } - return fmt.Errorf("ffmpeg m3u8 failed: %w", err) - } - - return nil -} - -/* ─────────────────────────────── - Kleine Helper für MFC - ─────────────────────────────── */ - -func extractMFCUsername(input string) string { - s := strings.TrimSpace(input) - if s == "" { - return "" - } - - // 1) URL mit Fragment (#username) - if u, err := url.Parse(s); err == nil && u.Fragment != "" { - return strings.Trim(strings.TrimSpace(u.Fragment), "/") - } - - // 2) URL Pfad: letztes Segment nehmen - if u, err := url.Parse(s); err == nil && u.Host != "" { - p := strings.Trim(u.Path, "/") - if p == "" { - return "" - } - parts := strings.Split(p, "/") - return strings.TrimSpace(parts[len(parts)-1]) - } - - // 3) Fallback: raw - return s -} - func readLine() string { r := bufio.NewReader(os.Stdin) s, _ := r.ReadString('\n') diff --git a/backend/preview_hls.go b/backend/preview_hls.go new file mode 100644 index 0000000..fc0b9c3 --- /dev/null +++ b/backend/preview_hls.go @@ -0,0 +1,391 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" +) + +var previewFileRe = regexp.MustCompile(`^(index(_hq)?\.m3u8|seg_(low|hq)_\d+\.ts|seg_\d+\.ts)$`) + +func serveEmptyLiveM3U8(w http.ResponseWriter, r *http.Request) { + // Für Player: gültige Playlist statt 204 liefern + w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8") + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("X-Content-Type-Options", "nosniff") + // Optional: Player/Proxy darf schnell retryen + w.Header().Set("Retry-After", "1") + + // Bei HEAD nur Header schicken + if r.Method == http.MethodHead { + w.WriteHeader(http.StatusOK) + return + } + + // Minimal gültige LIVE-Playlist (keine Segmente, kein ENDLIST) + // Viele Player bleiben damit im "loading", statt hart zu failen. + body := "#EXTM3U\n" + + "#EXT-X-VERSION:3\n" + + "#EXT-X-TARGETDURATION:2\n" + + "#EXT-X-MEDIA-SEQUENCE:0\n" + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(body)) +} + +func stopPreview(job *RecordJob) { + jobsMu.Lock() + cmd := job.previewCmd + cancel := job.previewCancel + job.previewCmd = nil + job.previewCancel = nil + job.LiveThumbStarted = false + job.PreviewDir = "" + jobsMu.Unlock() + + if cancel != nil { + cancel() + } + if cmd != nil && cmd.Process != nil { + _ = cmd.Process.Kill() + } +} + +func servePreviewHLSFile(w http.ResponseWriter, r *http.Request, id, file string) { + file = strings.TrimSpace(file) + if file == "" || filepath.Base(file) != file || !previewFileRe.MatchString(file) { + http.Error(w, "ungültige file", http.StatusBadRequest) + return + } + + isIndex := file == "index.m3u8" || file == "index_hq.m3u8" + + jobsMu.Lock() + job, ok := jobs[id] + state := "" + if ok && job != nil { + state = strings.TrimSpace(job.PreviewState) + } + jobsMu.Unlock() + + // ========================= + // ✅ HEAD = nur Existenzcheck (kein hover nötig, kein Preview-Start) + // ========================= + if r.Method == http.MethodHead { + if !ok || job == nil { + w.WriteHeader(http.StatusNotFound) + return + } + if state == "private" { + w.WriteHeader(http.StatusForbidden) + return + } + if state == "offline" { + w.WriteHeader(http.StatusNotFound) + return + } + previewDir := strings.TrimSpace(job.PreviewDir) + if previewDir == "" { + w.WriteHeader(http.StatusNotFound) + return + } + p := filepath.Join(previewDir, file) + if st, err := os.Stat(p); err == nil && !st.IsDir() { + w.Header().Set("Cache-Control", "no-store") + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + return + } + + // ========================= + // ✅ NEU: Player darf Preview auch ohne Hover starten + // - Frontend hängt &play=1 an (empfohlen) + // - Wir akzeptieren zusätzlich: play=1 => treat as active + // ========================= + active := isHover(r) || strings.TrimSpace(r.URL.Query().Get("play")) == "1" + + if !active { + // Kein Hover/Play => niemals Live-HLS abgreifen + if isIndex { + serveEmptyLiveM3U8(w, r) + return + } + http.Error(w, "preview not active", http.StatusNotFound) + return + } + + // active => wenn Job unbekannt, sauber raus + if !ok || job == nil { + if isIndex { + serveEmptyLiveM3U8(w, r) + return + } + http.Error(w, "job nicht gefunden", http.StatusNotFound) + return + } + + // active => Preview starten/keepalive + ensurePreviewStarted(r, job) + touchPreview(job) + + // state ggf. nach Start nochmal lesen + jobsMu.Lock() + state = strings.TrimSpace(job.PreviewState) + jobsMu.Unlock() + + if state == "private" { + http.Error(w, "model private", http.StatusForbidden) + return + } + if state == "offline" { + http.Error(w, "model offline", http.StatusNotFound) + return + } + if state == "error" { + http.Error(w, "preview error", http.StatusServiceUnavailable) + return + } + + previewDir := strings.TrimSpace(job.PreviewDir) + if previewDir == "" { + if isIndex { + serveEmptyLiveM3U8(w, r) + return + } + http.Error(w, "preview nicht verfügbar", http.StatusNotFound) + return + } + + p := filepath.Join(previewDir, file) + + st, err := os.Stat(p) + if err != nil || st.IsDir() { + if isIndex { + serveEmptyLiveM3U8(w, r) + return + } + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + + ext := strings.ToLower(filepath.Ext(p)) + + // ✅ common: always no-store + w.Header().Set("Cache-Control", "no-store") + // ✅ avoids some proxy buffering surprises (harmless if ignored) + w.Header().Set("X-Accel-Buffering", "no") + + // ========================= + // ✅ .m3u8: rewrite (klein, ReadFile ok) + // ========================= + if ext == ".m3u8" { + raw, err := os.ReadFile(p) + if err != nil { + http.Error(w, "m3u8 read failed", http.StatusInternalServerError) + return + } + + rewritten := rewriteM3U8(raw, id) + + w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(rewritten) + return + } + + // ========================= + // ✅ Segmente: robust streamen + Range-support + // ========================= + switch ext { + case ".ts": + w.Header().Set("Content-Type", "video/mp2t") + case ".m4s": + w.Header().Set("Content-Type", "video/iso.segment") + default: + w.Header().Set("Content-Type", "application/octet-stream") + } + + // ✅ Optional aber sehr hilfreich: + // liefere ein Segment erst aus, wenn es nicht mehr wächst (verhindert "hängende" große .ts) + if ext == ".ts" || ext == ".m4s" { + if !waitForStableFile(p, 2, 120*time.Millisecond) { + // Segment ist vermutlich noch im Schreiben -> lieber 404, Player retryt + http.Error(w, "segment not ready", http.StatusNotFound) + return + } + } + + f, err := os.Open(p) + if err != nil { + http.Error(w, "open failed", http.StatusNotFound) + return + } + defer f.Close() + + // ✅ ServeContent macht Range korrekt und streamt ohne ReadAll. + // name ist nur für logs/cache; modTime für If-Modified-Since etc. + http.ServeContent(w, r, file, st.ModTime(), f) + +} + +func waitForStableFile(path string, checks int, interval time.Duration) bool { + // returns true if size is stable across N checks + var last int64 = -1 + for i := 0; i < checks; i++ { + st, err := os.Stat(path) + if err != nil || st.IsDir() { + return false + } + sz := st.Size() + if last >= 0 && sz == last { + return true + } + last = sz + time.Sleep(interval) + } + // if we never saw stability, assume not ready + return false +} + +func classifyPreviewFFmpegStderr(stderr string) (state string, httpStatus int) { + s := strings.ToLower(stderr) + + // ffmpeg schreibt typischerweise: + // "HTTP error 403 Forbidden" oder "Server returned 403 Forbidden" + if strings.Contains(s, "403 forbidden") || strings.Contains(s, "http error 403") || strings.Contains(s, "server returned 403") { + return "private", http.StatusForbidden + } + + // "HTTP error 404 Not Found" oder "Server returned 404 Not Found" + if strings.Contains(s, "404 not found") || strings.Contains(s, "http error 404") || strings.Contains(s, "server returned 404") { + return "offline", http.StatusNotFound + } + + return "", 0 +} + +func startPreviewHLS(ctx context.Context, job *RecordJob, m3u8URL, previewDir, httpCookie, userAgent string) error { + if strings.TrimSpace(ffmpegPath) == "" { + return fmt.Errorf("kein ffmpeg gefunden – setze FFMPEG_PATH oder lege ffmpeg(.exe) neben das Backend") + } + + if err := os.MkdirAll(previewDir, 0755); err != nil { + return err + } + + // ✅ PreviewState reset (neuer Start) + jobsMu.Lock() + job.PreviewState = "" + job.PreviewStateAt = "" + job.PreviewStateMsg = "" + jobsMu.Unlock() + notifyJobsChanged() + + commonIn := []string{"-y"} + if strings.TrimSpace(userAgent) != "" { + commonIn = append(commonIn, "-user_agent", userAgent) + } + if strings.TrimSpace(httpCookie) != "" { + commonIn = append(commonIn, "-headers", fmt.Sprintf("Cookie: %s\r\n", httpCookie)) + } + commonIn = append(commonIn, "-i", m3u8URL) + + hqArgs := append(commonIn, + "-vf", "scale=480:-2", + "-c:v", "libx264", "-preset", "veryfast", "-tune", "zerolatency", + "-pix_fmt", "yuv420p", + "-profile:v", "main", + "-level", "3.1", + "-threads", "4", + + // GOP ~ 2s (bei 24fps). Optional force_key_frames zusätzlich. + "-g", "48", "-keyint_min", "48", "-sc_threshold", "0", + // optional, wenn du noch große Segmente bekommst: + // "-force_key_frames", "expr:gte(t,n_forced*2)", + + "-map", "0:v:0", + "-map", "0:a:0?", + "-c:a", "aac", "-b:a", "128k", "-ac", "2", + + "-f", "hls", + "-hls_time", "2", + "-hls_list_size", "6", + "-hls_allow_cache", "0", + + // ✅ wichtig: temp_file + "-hls_flags", "delete_segments+append_list+independent_segments+temp_file", + + "-hls_segment_filename", filepath.Join(previewDir, "seg_hq_%05d.ts"), + + // ✅ Empfehlung: weglassen (du rewritest ohnehin) + // "-hls_base_url", baseURL, + + filepath.Join(previewDir, "index_hq.m3u8"), + ) + + cmd := exec.CommandContext(ctx, ffmpegPath, hqArgs...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + + jobsMu.Lock() + job.previewCmd = cmd + jobsMu.Unlock() + + go func() { + if err := previewSem.Acquire(ctx); err != nil { + jobsMu.Lock() + if job.previewCmd == cmd { + job.previewCmd = nil + } + jobsMu.Unlock() + return + } + defer previewSem.Release() + + if err := cmd.Run(); err != nil && ctx.Err() == nil { + st := strings.TrimSpace(stderr.String()) + + // ✅ 403/404 erkennen -> Private/Offline setzen + state, code := classifyPreviewFFmpegStderr(st) + + jobsMu.Lock() + if state != "" { + job.PreviewState = state + job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano) + job.PreviewStateMsg = fmt.Sprintf("ffmpeg input returned HTTP %d", code) + } else { + job.PreviewState = "error" + job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano) + if len(st) > 280 { + job.PreviewStateMsg = st[:280] + "…" + } else { + job.PreviewStateMsg = st + } + } + jobsMu.Unlock() + notifyJobsChanged() + + fmt.Printf("⚠️ preview hq ffmpeg failed: %v (%s)\n", err, st) + } + + jobsMu.Lock() + if job.previewCmd == cmd { + job.previewCmd = nil + } + jobsMu.Unlock() + }() + + // ✅ Live thumb writer starten (schreibt generated//thumbs.jpg regelmäßig neu) + startLiveThumbLoop(ctx, job) + + return nil +} diff --git a/backend/preview_jpeg.go b/backend/preview_jpeg.go new file mode 100644 index 0000000..20c8629 --- /dev/null +++ b/backend/preview_jpeg.go @@ -0,0 +1,135 @@ +package main + +import ( + "bytes" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "time" +) + +func prunePreviewCacheDir(previewDir string, maxFrames int, maxAge time.Duration) { + entries, err := os.ReadDir(previewDir) + if err != nil { + return + } + + type frame struct { + path string + mt time.Time + } + + now := time.Now() + + var frames []frame + + for _, e := range entries { + name := e.Name() + path := filepath.Join(previewDir, name) + + // .part Dateien immer weg + if strings.HasSuffix(name, ".part") { + _ = os.Remove(path) + continue + } + + // optional: preview.jpg neu erzeugen lassen, wenn uralt + if name == "preview.jpg" { + if info, err := e.Info(); err == nil { + if maxAge > 0 && now.Sub(info.ModTime()) > maxAge { + _ = os.Remove(path) + } + } + continue + } + + // Nur t_*.jpg verwalten + if strings.HasPrefix(name, "t_") && strings.HasSuffix(name, ".jpg") { + info, err := e.Info() + if err != nil { + continue + } + + // alte Frames löschen + if maxAge > 0 && now.Sub(info.ModTime()) > maxAge { + _ = os.Remove(path) + continue + } + + frames = append(frames, frame{path: path, mt: info.ModTime()}) + } + } + + // Anzahl begrenzen: älteste zuerst löschen + if maxFrames > 0 && len(frames) > maxFrames { + sort.Slice(frames, func(i, j int) bool { return frames[i].mt.Before(frames[j].mt) }) + toDelete := len(frames) - maxFrames + for i := 0; i < toDelete; i++ { + _ = os.Remove(frames[i].path) + } + } +} + +func servePreviewJPEGBytes(w http.ResponseWriter, img []byte) { + w.Header().Set("Content-Type", "image/jpeg") + w.Header().Set("Cache-Control", "public, max-age=31536000") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(img) +} + +func servePreviewJPEGBytesNoStore(w http.ResponseWriter, img []byte) { + w.Header().Set("Content-Type", "image/jpeg") + w.Header().Set("Cache-Control", "no-store, max-age=0") + w.Header().Set("Pragma", "no-cache") + w.Header().Set("Expires", "0") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(img) +} + +func serveLivePreviewJPEGBytes(w http.ResponseWriter, img []byte) { + w.Header().Set("Content-Type", "image/jpeg") + w.Header().Set("Cache-Control", "no-store, max-age=0, must-revalidate") + w.Header().Set("Pragma", "no-cache") + w.Header().Set("Expires", "0") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(img) +} + +func servePreviewJPEGFile(w http.ResponseWriter, r *http.Request, path string) { + w.Header().Set("Content-Type", "image/jpeg") + w.Header().Set("Cache-Control", "public, max-age=31536000") + w.Header().Set("X-Content-Type-Options", "nosniff") + http.ServeFile(w, r, path) +} + +func extractFirstFrameJPEG(path string) ([]byte, error) { + cmd := exec.Command( + ffmpegPath, + "-hide_banner", + "-loglevel", "error", + "-i", path, + "-frames:v", "1", + "-vf", "scale=720:-2", + "-q:v", "10", + "-f", "image2pipe", + "-vcodec", "mjpeg", + "pipe:1", + ) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("ffmpeg first-frame: %w (%s)", err, strings.TrimSpace(stderr.String())) + } + return out.Bytes(), nil +} diff --git a/backend/preview_m3u8_rewrite.go b/backend/preview_m3u8_rewrite.go new file mode 100644 index 0000000..3fc82d4 --- /dev/null +++ b/backend/preview_m3u8_rewrite.go @@ -0,0 +1,134 @@ +package main + +import ( + "bufio" + "bytes" + "net/url" + "path" + "regexp" + "strings" +) + +func rewriteM3U8(raw []byte, id string) []byte { + // Wir bauen alle URIs so um, dass sie wieder über /api/record/preview laufen. + // Wichtig: play=1 bleibt dran, damit Folge-Requests (segments, chunklists) auch ohne Hover gehen. + base := "/api/record/preview?id=" + url.QueryEscape(id) + "&file=" + + var out bytes.Buffer + sc := bufio.NewScanner(bytes.NewReader(raw)) + + // Scanner default token limit 64K – m3u8 ist normalerweise klein, passt. + // Wenn du riesige Playlists hast, kannst du Buffer erhöhen. + + for sc.Scan() { + line := sc.Text() + trim := strings.TrimSpace(line) + + if trim == "" { + out.WriteByte('\n') + continue + } + + // Kommentare/Tags: ggf. URI="..." in Tags rewriten + if strings.HasPrefix(trim, "#") { + // EXT-X-KEY:URI="..." + line = rewriteAttrURI(line, base) + out.WriteString(line) + out.WriteByte('\n') + continue + } + + // Nicht-Tag => URI (segment oder child-playlist) + u := trim + + // Absolut? dann lassen + if strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://") { + out.WriteString(line) + out.WriteByte('\n') + continue + } + + // Wenn es schon unser API ist, lassen + if strings.Contains(u, "/api/record/preview") { + out.WriteString(line) + out.WriteByte('\n') + continue + } + + // Nur basename nehmen (ffmpeg schreibt i.d.R. keine Subdirs) + name := path.Base(u) + + // Hier play=1 mitschicken: + out.WriteString(base + url.QueryEscape(name) + "&play=1") + out.WriteByte('\n') + } + + if err := sc.Err(); err != nil { + // Wenn Scanner aus irgendeinem Grund scheitert: lieber raw zurück (besser als kaputt) + return raw + } + return out.Bytes() +} + +func rewriteAttrURI(line, base string) string { + // Rewritet URI="xyz" in EXT-X-KEY / EXT-X-MAP / EXT-X-MEDIA / EXT-X-I-FRAME-STREAM-INF etc. + // Nur relative URIs werden angefasst. + const key = `URI="` + i := strings.Index(line, key) + if i < 0 { + return line + } + + j := strings.Index(line[i+len(key):], `"`) + if j < 0 { + return line + } + + start := i + len(key) + end := start + j + val := line[start:end] + valTrim := strings.TrimSpace(val) + + // absolut oder schon preview => nix tun + if strings.HasPrefix(valTrim, "http://") || strings.HasPrefix(valTrim, "https://") || strings.Contains(valTrim, "/api/record/preview") { + return line + } + + name := path.Base(valTrim) + repl := base + url.QueryEscape(name) + "&play=1" + + return line[:start] + repl + line[end:] +} + +func rewriteQuotedURI(line, id string) string { + re := regexp.MustCompile(`URI="([^"]+)"`) + return re.ReplaceAllStringFunc(line, func(m string) string { + sub := re.FindStringSubmatch(m) + if len(sub) != 2 { + return m + } + u := sub[1] + uu := strings.TrimSpace(u) + if uu == "" || strings.HasPrefix(uu, "http://") || strings.HasPrefix(uu, "https://") || strings.HasPrefix(uu, "/") { + return m + } + repl := "/api/record/preview?id=" + url.QueryEscape(id) + "&file=" + url.QueryEscape(uu) + return `URI="` + repl + `"` + }) +} + +func rewriteM3U8ToPreviewEndpoint(m3u8 string, id string) string { + lines := strings.Split(m3u8, "\n") + escapedID := url.QueryEscape(id) + + for i, line := range lines { + l := strings.TrimSpace(line) + if l == "" || strings.HasPrefix(l, "#") { + continue + } + // Segment/URI-Zeilen umschreiben + lines[i] = "/api/record/preview?id=" + escapedID + "&file=" + url.QueryEscape(l) + } + + return strings.Join(lines, "\n") +} diff --git a/backend/preview_status_svg.go b/backend/preview_status_svg.go new file mode 100644 index 0000000..eaf96da --- /dev/null +++ b/backend/preview_status_svg.go @@ -0,0 +1,86 @@ +package main + +import ( + "html" + "net/http" + "strings" +) + +func servePreviewStatusSVG(w http.ResponseWriter, label string, status int) { + w.Header().Set("Content-Type", "image/svg+xml; charset=utf-8") + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("X-Content-Type-Options", "nosniff") + + if status <= 0 { + status = http.StatusOK + } + + title := html.EscapeString(strings.TrimSpace(label)) + if title == "" { + title = "Preview" + } + + // 16:9 (passt zu deinen Cards) + svg := ` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ` + title + ` + + Preview nicht verfügbar + +` + + w.WriteHeader(status) + _, _ = w.Write([]byte(svg)) +} diff --git a/backend/record_handlers.go b/backend/record_handlers.go new file mode 100644 index 0000000..3a46733 --- /dev/null +++ b/backend/record_handlers.go @@ -0,0 +1,1611 @@ +package main + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" +) + +type RecordRequest struct { + URL string `json:"url"` + Cookie string `json:"cookie,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + Hidden bool `json:"hidden,omitempty"` +} + +type doneListResponse struct { + Items []*RecordJob `json:"items"` + TotalCount int `json:"totalCount"` + Page int `json:"page,omitempty"` + PageSize int `json:"pageSize,omitempty"` +} + +type doneMetaResp struct { + Count int `json:"count"` +} + +type durationReq struct { + Files []string `json:"files"` +} + +type durationItem struct { + File string `json:"file"` + DurationSeconds float64 `json:"durationSeconds,omitempty"` + Error string `json:"error,omitempty"` +} + +type undoDeleteToken struct { + Trash string `json:"trash"` // basename in .trash + RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model" + File string `json:"file"` // original basename, z.B. "HOT xyz.mp4" +} + +func encodeUndoDeleteToken(t undoDeleteToken) (string, error) { + b, err := json.Marshal(t) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) { + var t undoDeleteToken + b, err := base64.RawURLEncoding.DecodeString(raw) + if err != nil { + return t, err + } + if err := json.Unmarshal(b, &t); err != nil { + return t, err + } + return t, nil +} + +func isSafeRelDir(rel string) bool { + rel = strings.TrimSpace(rel) + if rel == "" { + return false + } + // normalize to slash for validation + rel = filepath.ToSlash(rel) + if strings.HasPrefix(rel, "/") { + return false + } + clean := path.Clean(rel) // path.Clean => forward slashes + if clean == "." { + return true + } + if strings.HasPrefix(clean, "../") || clean == ".." { + return false + } + // prevent weird traversal + if strings.Contains(clean, `\`) { + return false + } + return true +} + +func isSafeBasename(name string) bool { + name = strings.TrimSpace(name) + if name == "" { + return false + } + if strings.Contains(name, "/") || strings.Contains(name, "\\") { + return false + } + return filepath.Base(name) == name +} + +func recordList(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) + return + } + + jobsMu.Lock() + list := make([]*RecordJob, 0, len(jobs)) + for _, j := range jobs { + // ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht + if j == nil || j.Hidden { + continue + } + list = append(list, j) + } + jobsMu.Unlock() + + // optional: neueste zuerst + sort.Slice(list, func(i, j int) bool { + return list[i].StartedAt.After(list[j].StartedAt) + }) + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(list) +} + +func writeSSE(w http.ResponseWriter, data []byte) { + // SSE spec: jede Zeile mit "data:" prefixen + s := strings.ReplaceAll(string(data), "\r\n", "\n") + lines := strings.Split(s, "\n") + for _, line := range lines { + fmt.Fprintf(w, "data: %s\n", line) + } + fmt.Fprint(w, "\n") +} + +func handleDoneStream(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + ch := make(chan []byte, 16) + doneHub.add(ch) + defer doneHub.remove(ch) + + // optional: initial ping/hello, damit Client sofort "lebt" + fmt.Fprintf(w, "event: doneChanged\ndata: {\"type\":\"doneChanged\",\"seq\":%d,\"ts\":%d}\n\n", + atomic.LoadUint64(&doneSeq), time.Now().UnixMilli()) + flusher.Flush() + + ctx := r.Context() + for { + select { + case <-ctx.Done(): + return + case b := <-ch: + // wichtig: event-name setzen -> Client kann addEventListener("doneChanged", ...) + fmt.Fprintf(w, "event: doneChanged\ndata: %s\n\n", b) + flusher.Flush() + } + } +} + +func handleRecordVideo(w http.ResponseWriter, r *http.Request) { + // Priorität: id -> (dein bestehendes Mapping), sonst file + id := strings.TrimSpace(r.URL.Query().Get("id")) + if id != "" { + // ✅ wenn du schon eine bestehende Logik hast: Pfad aus JobStore holen und dann ServeContent nutzen + // path := lookupPathByJobID(id) + // ... + } + + file := strings.TrimSpace(r.URL.Query().Get("file")) + if file == "" && id == "" { + http.Error(w, "missing id or file", http.StatusBadRequest) + return + } + + var path string + var err error + + if file != "" { + path, err = findVideoPath(file) + if err != nil { + http.NotFound(w, r) + return + } + } else { + // TODO: wenn id verwendet wurde, path hier setzen + http.NotFound(w, r) + return + } + + f, err := openForReadShareDelete(path) + if err != nil { + http.Error(w, "open failed", http.StatusInternalServerError) + return + } + defer f.Close() + + st, err := f.Stat() + if err != nil { + http.Error(w, "stat failed", http.StatusInternalServerError) + return + } + + // ✅ wichtig für Browser/VideoJS + ext := strings.ToLower(filepath.Ext(path)) + switch ext { + case ".ts": + w.Header().Set("Content-Type", "video/mp2t") + default: + w.Header().Set("Content-Type", "video/mp4") + } + + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("Cache-Control", "no-store") + + // ✅ Range/206/Seeking korrekt + http.ServeContent(w, r, filepath.Base(path), st.ModTime(), f) +} + +func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) + return + } + + var req RecordRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + job, err := startRecordingInternal(req) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(job) +} + +func recordVideo(w http.ResponseWriter, r *http.Request) { + + origin := r.Header.Get("Origin") + if origin != "" { + // ✅ dev origin erlauben (oder "*" wenn’s dir egal ist) + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Range") + w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges") + } + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + + // ✅ Wiedergabe über Dateiname (für doneDir / recordDir) + if raw := strings.TrimSpace(r.URL.Query().Get("file")); raw != "" { + // explizit decoden (zur Sicherheit) + file, err := url.QueryUnescape(raw) + if err != nil { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + file = strings.TrimSpace(file) + + // kein Pfad, keine Backslashes, kein Traversal + if file == "" || + strings.Contains(file, "/") || + strings.Contains(file, "\\") || + filepath.Base(file) != file { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + + ext := strings.ToLower(filepath.Ext(file)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + s := getSettings() + recordAbs, err := resolvePathRelativeToApp(s.RecordDir) + if err != nil { + http.Error(w, "recordDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"), + // dann keep (inkl. 1 Level Subdir), dann recordDir + names := []string{file} + + // Falls UI noch ".ts" kennt, die Datei aber schon als ".mp4" existiert: + if ext == ".ts" { + mp4File := strings.TrimSuffix(file, ext) + ".mp4" + names = append(names, mp4File) + } + + var outPath string + for _, name := range names { + // done root + done// (skip "keep") + if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok { + outPath = p + break + } + // keep root + keep// + if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok { + outPath = p + break + } + // record root (+ optional 1 Level Subdir) + if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok { + outPath = p + break + } + } + + if outPath == "" { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + + // TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4 + if strings.ToLower(filepath.Ext(outPath)) == ".ts" { + newOut, err := maybeRemuxTS(outPath) + if err != nil { + http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(newOut) == "" { + http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError) + return + } + outPath = newOut + + // sicherstellen, dass wirklich eine MP4 existiert + fi, err := os.Stat(outPath) + if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" { + http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError) + return + } + } + + // ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern + if strings.ToLower(filepath.Ext(outPath)) == ".mp4" { + kind, _ := sniffVideoKind(outPath) + switch kind { + case "ts": + newOut, err := maybeRemuxTS(outPath) + if err != nil { + http.Error(w, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + outPath = newOut + case "html": + http.Error(w, "Server liefert HTML statt Video (Pfad/Lookup prüfen)", http.StatusInternalServerError) + return + } + } + + w.Header().Set("Cache-Control", "no-store") + + serveVideoFile(w, r, outPath) + return + + } + + // ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert) + id := strings.TrimSpace(r.URL.Query().Get("id")) + if id == "" { + http.Error(w, "id fehlt", http.StatusBadRequest) + return + } + + jobsMu.Lock() + job, ok := jobs[id] + jobsMu.Unlock() + if !ok { + http.Error(w, "job nicht gefunden", http.StatusNotFound) + return + } + + outPath := filepath.Clean(strings.TrimSpace(job.Output)) + if outPath == "" { + http.Error(w, "output fehlt", http.StatusNotFound) + return + } + + if !filepath.IsAbs(outPath) { + abs, err := resolvePathRelativeToApp(outPath) + if err != nil { + http.Error(w, "pfad auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + outPath = abs + } + + fi, err := os.Stat(outPath) + if err != nil || fi.IsDir() || fi.Size() == 0 { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + + // TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4 + if strings.ToLower(filepath.Ext(outPath)) == ".ts" { + newOut, err := maybeRemuxTS(outPath) + if err != nil { + http.Error(w, "TS Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(newOut) == "" { + http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError) + return + } + outPath = newOut + + fi, err := os.Stat(outPath) + if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" { + http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError) + return + } + } + + serveVideoFile(w, r, outPath) +} + +func recordStatus(w http.ResponseWriter, r *http.Request) { + id := r.URL.Query().Get("id") + if id == "" { + http.Error(w, "id fehlt", http.StatusBadRequest) + return + } + + jobsMu.Lock() + job, ok := jobs[id] + jobsMu.Unlock() + + if !ok { + http.Error(w, "job nicht gefunden", http.StatusNotFound) + return + } + + json.NewEncoder(w).Encode(job) +} + +func recordStop(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST", http.StatusMethodNotAllowed) + return + } + + id := r.URL.Query().Get("id") + + jobsMu.Lock() + job, ok := jobs[id] + jobsMu.Unlock() + if !ok { + http.Error(w, "job nicht gefunden", http.StatusNotFound) + return + } + + stopJobsInternal([]*RecordJob{job}) + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(job) +} + +func recordDoneList(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) + return + } + + // ✅ optional: auch /done/keep/ einbeziehen (Standard: false) + qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep"))) + includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes" + + // ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll) + normalizeQueryModel := func(raw string) string { + s := strings.TrimSpace(raw) + if s == "" { + return "" + } + s = strings.TrimPrefix(s, "http://") + s = strings.TrimPrefix(s, "https://") + + // letzter URL-Segment, falls jemand "…/modelname" übergibt + if strings.Contains(s, "/") { + parts := strings.Split(s, "/") + for i := len(parts) - 1; i >= 0; i-- { + p := strings.TrimSpace(parts[i]) + if p != "" { + s = p + break + } + } + } + // falls "host:model" übergeben wird + if strings.Contains(s, ":") { + s = strings.TrimSpace(strings.Split(s, ":")[len(strings.Split(s, ":"))-1]) + } + + s = strings.TrimPrefix(s, "@") + return strings.ToLower(strings.TrimSpace(s)) + } + + qModel := normalizeQueryModel(r.URL.Query().Get("model")) + + // optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste + page := 0 + pageSize := 0 + if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" { + if n, err := strconv.Atoi(v); err == nil && n > 0 { + page = n + } + } + if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" { + if n, err := strconv.Atoi(v); err == nil && n > 0 { + pageSize = n + } + } + + // optional: Sort + // supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc) + sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort"))) + if sortMode == "" { + sortMode = "completed_desc" + } + + // ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen + if sortMode == "model_asc" { + sortMode = "file_asc" + } + if sortMode == "model_desc" { + sortMode = "file_desc" + } + + // ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren) + qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all"))) + fetchAll := qAll == "1" || qAll == "true" || qAll == "yes" + if fetchAll { + page = 0 + pageSize = 0 + } + + // ✅ .trash niemals als "done item" zählen/listen + isTrashOutput := func(p string) bool { + pp := strings.ToLower(filepath.ToSlash(strings.TrimSpace(p))) + return strings.Contains(pp, "/.trash/") || strings.HasSuffix(pp, "/.trash") + } + + // --- helpers (ModelKey aus Filename/Dir ableiten) --- + + modelFromStem := func(stem string) string { + // stem: lower, ohne ext, ohne HOT + if stem == "" { + return "" + } + if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { + return strings.ToLower(strings.TrimSpace(m[1])) + } + // fallback: alles vor letztem "_" (oder kompletter stem) + if i := strings.LastIndex(stem, "_"); i > 0 { + return strings.ToLower(strings.TrimSpace(stem[:i])) + } + return strings.ToLower(strings.TrimSpace(stem)) + } + + modelFromFullPath := func(full string) string { + name := strings.ToLower(filepath.Base(full)) + stem := strings.TrimSuffix(name, filepath.Ext(name)) + stem = strings.TrimPrefix(stem, "hot ") + mk := modelFromStem(stem) + + // fallback: wenn Dateiname nichts taugt, aus Ordner nehmen (/done//file) + if mk == "" { + parent := strings.ToLower(filepath.Base(filepath.Dir(full))) + parent = strings.TrimSpace(parent) + if parent != "" && parent != "keep" { + mk = parent + } + } + return mk + } + + isTrashPath := func(full string) bool { + p := strings.ReplaceAll(full, "\\", "/") + // match: ".../.trash/file.ext" oder ".../.trash" + return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") + } + + // --- resolve done path --- + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben + if strings.TrimSpace(doneAbs) == "" { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(doneListResponse{ + Items: []*RecordJob{}, + TotalCount: 0, + Page: page, + PageSize: pageSize, + }) + return + } + + type scanDir struct { + dir string + skipKeep bool // nur für doneAbs: "keep" nicht doppelt scannen + } + + dirs := []scanDir{{dir: doneAbs, skipKeep: true}} + if includeKeep { + dirs = append(dirs, scanDir{dir: filepath.Join(doneAbs, "keep"), skipKeep: false}) + } + + list := make([]*RecordJob, 0, 256) + + addFile := func(full string, fi os.FileInfo) { + // ✅ .trash niemals zählen / zurückgeben + if isTrashPath(full) { + return + } + + name := filepath.Base(full) + ext := strings.ToLower(filepath.Ext(name)) + if ext != ".mp4" && ext != ".ts" { + return + } + + // ✅ .trash aus Done-Liste ausschließen (auch für totalCount/tab counter) + if isTrashOutput(full) { + return + } + + // ✅ NEU: Model-Filter vor dem teureren Meta-Kram + if qModel != "" { + if mk := modelFromFullPath(full); mk != qModel { + return + } + } + + base := strings.TrimSuffix(name, filepath.Ext(name)) + t := fi.ModTime() + + // StartedAt aus Dateiname (Fallback: ModTime) + start := t + stem := base + if strings.HasPrefix(stem, "HOT ") { + stem = strings.TrimPrefix(stem, "HOT ") + } + if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { + mm, _ := strconv.Atoi(m[2]) + dd, _ := strconv.Atoi(m[3]) + yy, _ := strconv.Atoi(m[4]) + hh, _ := strconv.Atoi(m[5]) + mi, _ := strconv.Atoi(m[6]) + ss, _ := strconv.Atoi(m[7]) + start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local) + } + + dur := 0.0 + + // 1) meta.json aus generated//meta.json lesen (schnell) + id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full))) + + srcURL := "" + if strings.TrimSpace(id) != "" { + if mp, err := generatedMetaFile(id); err == nil { + + if d, ok := readVideoMetaDuration(mp, fi); ok { + dur = d + } + + if u, ok := readVideoMetaSourceURL(mp, fi); ok { + srcURL = u + } + } + } + + // 2) Fallback: RAM-Cache only (immer noch schnell, kein ffprobe) + if dur <= 0 { + dur = durationSecondsCacheOnly(full, fi) + } + + // 3) KEIN ffprobe hier! (sonst wird die API wieder langsam) + + list = append(list, &RecordJob{ + ID: base, + Output: full, + SourceURL: srcURL, + Status: JobFinished, + StartedAt: start, + EndedAt: &t, + DurationSeconds: dur, + SizeBytes: fi.Size(), + }) + } + + for _, sd := range dirs { + entries, err := os.ReadDir(sd.dir) + if err != nil { + if os.IsNotExist(err) { + if sd.dir == doneAbs { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(doneListResponse{ + Items: []*RecordJob{}, + TotalCount: 0, + Page: page, + PageSize: pageSize, + }) + return + + } + continue + } + if sd.dir == doneAbs { + http.Error(w, "doneDir lesen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + continue + } + + for _, e := range entries { + // Subdir: 1 Level rein (z.B. /done// oder /done/keep//) + if e.IsDir() { + // ✅ .trash Ordner niemals scannen + if e.Name() == ".trash" { + continue + } + + if sd.skipKeep && e.Name() == "keep" { + continue + } + + // ✅ .trash nie scannen + if strings.EqualFold(e.Name(), ".trash") { + continue + } + + sub := filepath.Join(sd.dir, e.Name()) + subEntries, err := os.ReadDir(sub) + if err != nil { + continue + } + for _, se := range subEntries { + if se.IsDir() { + continue + } + full := filepath.Join(sub, se.Name()) + fi, err := os.Stat(full) + if err != nil || fi.IsDir() || fi.Size() == 0 { + continue + } + addFile(full, fi) + } + continue + } + + full := filepath.Join(sd.dir, e.Name()) + fi, err := os.Stat(full) + if err != nil || fi.IsDir() || fi.Size() == 0 { + continue + } + addFile(full, fi) + } + } + + // helpers (Sort) + fileForSort := func(j *RecordJob) string { + f := strings.ToLower(filepath.Base(j.Output)) + // HOT Prefix aus Sortierung rausnehmen + f = strings.TrimPrefix(f, "hot ") + return f + } + durationForSort := func(j *RecordJob) (sec float64, ok bool) { + if j.DurationSeconds > 0 { + return j.DurationSeconds, true + } + return 0, false + } + + // Sortierung + sort.Slice(list, func(i, j int) bool { + a, b := list[i], list[j] + ta, tb := time.Time{}, time.Time{} + if a.EndedAt != nil { + ta = *a.EndedAt + } + if b.EndedAt != nil { + tb = *b.EndedAt + } + + switch sortMode { + case "completed_asc": + if !ta.Equal(tb) { + return ta.Before(tb) + } + return fileForSort(a) < fileForSort(b) + case "completed_desc": + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + case "file_asc": + fa, fb := fileForSort(a), fileForSort(b) + if fa != fb { + return fa < fb + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + case "file_desc": + fa, fb := fileForSort(a), fileForSort(b) + if fa != fb { + return fa > fb + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + + case "duration_asc": + da, okA := durationForSort(a) + db, okB := durationForSort(b) + if okA != okB { + return okA // unbekannt nach hinten + } + if okA && okB && da != db { + return da < db + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + case "duration_desc": + da, okA := durationForSort(a) + db, okB := durationForSort(b) + if okA != okB { + return okA + } + if okA && okB && da != db { + return da > db + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + + case "size_asc": + if a.SizeBytes != b.SizeBytes { + return a.SizeBytes < b.SizeBytes + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + case "size_desc": + if a.SizeBytes != b.SizeBytes { + return a.SizeBytes > b.SizeBytes + } + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + default: + if !ta.Equal(tb) { + return ta.After(tb) + } + return fileForSort(a) < fileForSort(b) + } + }) + + // ✅ optional: count mitsenden + qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount"))) + withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes" + + // ✅ Gesamtanzahl IMMER vor Pagination merken + totalCount := len(list) + + // ✅ Pagination nur auf "items" anwenden (list bleibt für totalCount intakt) + items := list + if pageSize > 0 && !fetchAll { + if page <= 0 { + page = 1 + } + start := (page - 1) * pageSize + if start < 0 { + start = 0 + } + if start >= totalCount { + items = []*RecordJob{} + } else { + end := start + pageSize + if end > totalCount { + end = totalCount + } + items = list[start:end] + } + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + + // ✅ Wenn Frontend "withCount=1" nutzt: {count, items} + if withCount { + _ = json.NewEncoder(w).Encode(map[string]any{ + "count": totalCount, + "items": items, + }) + return + } + + // ✅ Standard-Response: immer auch totalCount mitsenden + _ = json.NewEncoder(w).Encode(doneListResponse{ + Items: items, + TotalCount: totalCount, + Page: page, + PageSize: pageSize, + }) + return + +} + +func recordDeleteVideo(w http.ResponseWriter, r *http.Request) { + // Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE + if r.Method != http.MethodPost && r.Method != http.MethodDelete { + http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed) + return + } + + raw := strings.TrimSpace(r.URL.Query().Get("file")) + if raw == "" { + http.Error(w, "file fehlt", http.StatusBadRequest) + return + } + + // sicher decoden + file, err := url.QueryUnescape(raw) + if err != nil { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + file = strings.TrimSpace(file) + + // ✅ nur Basename erlauben (keine Unterordner, kein Traversal) + if file == "" || + strings.Contains(file, "/") || + strings.Contains(file, "\\") || + filepath.Base(file) != file { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + + ext := strings.ToLower(filepath.Ext(file)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(doneAbs) == "" { + http.Error(w, "doneDir ist leer", http.StatusBadRequest) + return + } + + // ✅ done + done/ sowie keep + keep/ + target, from, fi, err := resolveDoneFileByName(doneAbs, file) + if err != nil { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + if fi != nil && fi.IsDir() { + http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) + return + } + + // ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben + trashDir := filepath.Join(doneAbs, ".trash") + + // ✅ Wenn im Single-slot Trash schon was liegt: ID merken, + // aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde. + prevBase := "" + prevCanonical := "" + + if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 { + var prev struct { + File string `json:"file"` + } + if err := json.Unmarshal(b, &prev); err == nil { + prevFile := strings.TrimSpace(prev.File) + if prevFile != "" { + prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile)) + prevCanonical = stripHotPrefix(prevBase) + } + } + } + + // Trash komplett leeren => ältere Undos sind automatisch ungültig + // ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen. + if err := os.RemoveAll(trashDir); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict) + return + } + http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta// entfernen. + if prevCanonical != "" { + removeGeneratedForID(prevCanonical) + + // Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind + if prevBase != "" && prevBase != prevCanonical { + removeGeneratedForID(prevBase) + } + } + + if err := os.MkdirAll(trashDir, 0o755); err != nil { + http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // Original-Dir relativ zu doneAbs merken (inkl. keep/ oder ) + origDir := filepath.Dir(target) + relDir, err := filepath.Rel(doneAbs, origDir) + if err != nil { + http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + relDir = filepath.ToSlash(relDir) + if strings.TrimSpace(relDir) == "" { + relDir = "." + } + + // ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können + tok, err := encodeUndoDeleteToken(undoDeleteToken{ + Trash: "", // setzen wir gleich (trashName) + RelDir: relDir, // hast du oben schon berechnet + File: file, + }) + if err != nil { + http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + trashName := tok + "__" + file // eindeutig + Token sichtbar in filename + trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_") + dst := filepath.Join(trashDir, trashName) + + // ✅ Token muss auch wissen, wie der Trashname heißt + // (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json) + + // move mit retry (Windows file-lock robust) + if err := renameWithRetry(target, dst); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict) + return + } + http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // ✅ last.json schreiben: nur dieser Token ist gültig + type trashMeta struct { + Token string `json:"token"` // exakt der Query-Token (encoded) + TrashName string `json:"trashName"` // Dateiname in .trash + RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs + File string `json:"file"` // originaler Name (basename) + DeletedAt int64 `json:"deletedAt"` + } + + meta := trashMeta{ + Token: tok, + TrashName: trashName, + RelDir: relDir, + File: file, + DeletedAt: time.Now().Unix(), + } + + b, _ := json.Marshal(meta) + _ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644) + + // Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich) + purgeDurationCacheForPath(target) + removeJobsByOutputBasename(file) + + notifyDoneChanged() + notifyJobsChanged() + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "file": file, + "from": from, // "done" | "keep" + "undoToken": tok, // ✅ für Undo + "trashed": true, + }) + +} + +func recordRestoreVideo(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) + return + } + + raw := strings.TrimSpace(r.URL.Query().Get("token")) + if raw == "" { + http.Error(w, "token fehlt", http.StatusBadRequest) + return + } + + // ✅ doneDir auflösen + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(doneAbs) == "" { + http.Error(w, "doneDir ist leer", http.StatusBadRequest) + return + } + + // ✅ Single-slot: last.json lesen und Token strikt validieren + trashDir := filepath.Join(doneAbs, ".trash") + metaPath := filepath.Join(trashDir, "last.json") + + b, err := os.ReadFile(metaPath) + if err != nil { + http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound) + return + } + + var meta struct { + Token string `json:"token"` + TrashName string `json:"trashName"` + RelDir string `json:"relDir"` + File string `json:"file"` + DeletedAt int64 `json:"deletedAt"` + } + if err := json.Unmarshal(b, &meta); err != nil { + http.Error(w, "trash meta ungültig", http.StatusInternalServerError) + return + } + if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" { + http.Error(w, "trash meta unvollständig", http.StatusInternalServerError) + return + } + + // ✅ Nur der letzte Token ist gültig + if raw != meta.Token { + http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound) + return + } + + // ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json) + tok, err := decodeUndoDeleteToken(raw) + if err != nil { + http.Error(w, "token ungültig", http.StatusBadRequest) + return + } + + // ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden + if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) { + http.Error(w, "token inhalt ungültig", http.StatusBadRequest) + return + } + + // ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll) + if tok.File != meta.File || tok.RelDir != meta.RelDir { + http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound) + return + } + + ext := strings.ToLower(filepath.Ext(meta.File)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + // Quelle: exakt die zuletzt gelöschte Datei + src := filepath.Join(trashDir, meta.TrashName) + + // Zielordner rekonstruieren (relativ zu doneAbs) + rel := meta.RelDir + if rel == "." { + rel = "" + } + dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel)) + dstDirClean := filepath.Clean(dstDir) + doneClean := filepath.Clean(doneAbs) + + // safety: dstDir muss innerhalb doneAbs liegen + if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) && + !strings.EqualFold(dstDirClean, doneClean) { + http.Error(w, "zielpfad ungültig", http.StatusBadRequest) + return + } + + if err := os.MkdirAll(dstDirClean, 0o755); err != nil { + http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + dst, err := uniqueDestPath(dstDirClean, meta.File) + if err != nil { + http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) + return + } + + if err := renameWithRetry(src, dst); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) + return + } + http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // ✅ Optional: Trash leeren, damit Token danach definitiv tot ist + _ = os.RemoveAll(trashDir) + _ = os.MkdirAll(trashDir, 0o755) + + notifyDoneChanged() + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "file": meta.File, + "restoredFile": filepath.Base(dst), // kann __dup enthalten + }) +} + +func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) + return + } + + raw := strings.TrimSpace(r.URL.Query().Get("file")) + if raw == "" { + http.Error(w, "file fehlt", http.StatusBadRequest) + return + } + + file, err := url.QueryUnescape(raw) + if err != nil { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + file = strings.TrimSpace(file) + + if !isSafeBasename(file) { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + + ext := strings.ToLower(filepath.Ext(file)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(doneAbs) == "" { + http.Error(w, "doneDir ist leer", http.StatusBadRequest) + return + } + + // Quelle muss in keep (root oder keep/) liegen + src, from, fi, err := resolveDoneFileByName(doneAbs, file) + if err != nil { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + if from != "keep" { + http.Error(w, "datei ist nicht in keep", http.StatusConflict) + return + } + if fi != nil && fi.IsDir() { + http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) + return + } + + // Ziel: zurück nach done/ (flach, ohne model-subdirs) + dstDir := doneAbs + + if err := os.MkdirAll(dstDir, 0o755); err != nil { + http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + dst, err := uniqueDestPath(dstDir, file) + if err != nil { + http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) + return + } + + if err := renameWithRetry(src, dst); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) + return + } + http.Error(w, "unkeep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + notifyDoneChanged() + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "oldFile": file, + "newFile": filepath.Base(dst), + }) +} + +func recordKeepVideo(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) + return + } + + raw := strings.TrimSpace(r.URL.Query().Get("file")) + if raw == "" { + http.Error(w, "file fehlt", http.StatusBadRequest) + return + } + + file, err := url.QueryUnescape(raw) + if err != nil { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + file = strings.TrimSpace(file) + + // ✅ nur Basename erlauben + if file == "" || + strings.Contains(file, "/") || + strings.Contains(file, "\\") || + filepath.Base(file) != file { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + + ext := strings.ToLower(filepath.Ext(file)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(doneAbs) == "" { + http.Error(w, "doneDir ist leer", http.StatusBadRequest) + return + } + + keepRoot := filepath.Join(doneAbs, "keep") + if err := os.MkdirAll(keepRoot, 0o755); err != nil { + http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // ✅ 0) Wenn schon irgendwo in keep (root oder keep/) existiert: + // - wenn im keep-root: jetzt nach keep// nachziehen + if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok { + // p liegt entweder in keepRoot oder keepRoot/ + if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) { + // im Root => versuchen einzusortieren + modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */) + modelKey = sanitizeModelKey(modelKey) + + // Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename: + if modelKey == "" { + stem := strings.TrimSuffix(file, filepath.Ext(file)) + modelKey = sanitizeModelKey(modelNameFromFilename(stem)) + } + + if modelKey != "" { + dstDir := filepath.Join(keepRoot, modelKey) + if err := os.MkdirAll(dstDir, 0o755); err == nil { + dst, derr := uniqueDestPath(dstDir, file) + if derr == nil { + // best-effort move + _ = renameWithRetry(p, dst) + } + } + } + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "file": file, + "alreadyKept": true, + }) + return + } + + // ✅ 1) Quelle in done (root oder done/), aber NICHT aus keep + src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep") + if !ok { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + if fi == nil || fi.IsDir() { + http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) + return + } + + // ✅ 2) Ziel: keep//file + modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs) + dstDir := keepRoot + if modelKey != "" { + dstDir = filepath.Join(keepRoot, modelKey) + } + + if err := os.MkdirAll(dstDir, 0o755); err != nil { + http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + dst, err := uniqueDestPath(dstDir, file) + if err != nil { + http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) + return + } + + // rename mit retry (Windows file-lock) + if err := renameWithRetry(src, dst); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) + return + } + http.Error(w, "keep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + notifyDoneChanged() + + // ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ... + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "file": file, + "alreadyKept": false, + "newFile": filepath.Base(dst), // ✅ NEU + }) + +} + +func recordToggleHot(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Nur POST", http.StatusMethodNotAllowed) + return + } + + raw := strings.TrimSpace(r.URL.Query().Get("file")) + if raw == "" { + http.Error(w, "file fehlt", http.StatusBadRequest) + return + } + + file, err := url.QueryUnescape(raw) + if err != nil { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + file = strings.TrimSpace(file) + + // ✅ nur Basename erlauben + if file == "" || + strings.Contains(file, "/") || + strings.Contains(file, "\\") || + filepath.Base(file) != file { + http.Error(w, "ungültiger file", http.StatusBadRequest) + return + } + + ext := strings.ToLower(filepath.Ext(file)) + if ext != ".mp4" && ext != ".ts" { + http.Error(w, "nicht erlaubt", http.StatusForbidden) + return + } + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil { + http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + if strings.TrimSpace(doneAbs) == "" { + http.Error(w, "doneDir ist leer", http.StatusBadRequest) + return + } + + // ✅ Quelle kann in done/, done/, keep/, keep/ liegen + src, from, fi, err := resolveDoneFileByName(doneAbs, file) + if err != nil { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + if fi != nil && fi.IsDir() { + http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) + return + } + + srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner + + // toggle: HOT Prefix + newFile := file + if strings.HasPrefix(file, "HOT ") { + newFile = strings.TrimPrefix(file, "HOT ") + } else { + newFile = "HOT " + file + } + + dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep) + if _, err := os.Stat(dst); err == nil { + http.Error(w, "ziel existiert bereits", http.StatusConflict) + return + } else if !os.IsNotExist(err) { + http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + if err := renameWithRetry(src, dst); err != nil { + if runtime.GOOS == "windows" && isSharingViolation(err) { + http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict) + return + } + http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + + // ✅ KEIN generated-rename! + // Assets bleiben canonical (ohne HOT) + canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file))) + + renameJobsOutputBasename(file, newFile) + + notifyDoneChanged() + notifyJobsChanged() + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-store") + _ = json.NewEncoder(w).Encode(map[string]any{ + "ok": true, + "oldFile": file, + "newFile": newFile, + "canonicalID": canonicalID, + "from": from, // "done" | "keep" + }) +} diff --git a/backend/record_helpers_paths.go b/backend/record_helpers_paths.go new file mode 100644 index 0000000..c3d24b2 --- /dev/null +++ b/backend/record_helpers_paths.go @@ -0,0 +1,193 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "path/filepath" + "strings" +) + +func resolvePathRelativeToApp(p string) (string, error) { + p = strings.TrimSpace(p) + if p == "" { + return "", nil + } + + p = filepath.Clean(filepath.FromSlash(p)) + if filepath.IsAbs(p) { + return p, nil + } + + exe, err := os.Executable() + if err == nil { + exeDir := filepath.Dir(exe) + low := strings.ToLower(exeDir) + + // Heuristik: go run / tests -> exe liegt in Temp/go-build + isTemp := strings.Contains(low, `\appdata\local\temp`) || + strings.Contains(low, `\temp\`) || + strings.Contains(low, `\tmp\`) || + strings.Contains(low, `\go-build`) || + strings.Contains(low, `/tmp/`) || + strings.Contains(low, `/go-build`) + + if !isTemp { + return filepath.Join(exeDir, p), nil + } + } + + // Fallback: Working Directory (Dev) + wd, err := os.Getwd() + if err != nil { + return "", err + } + return filepath.Join(wd, p), nil +} + +func getRecordingsDir() string { + s := getSettings() + + abs, err := resolvePathRelativeToApp(s.RecordDir) + if err == nil && strings.TrimSpace(abs) != "" { + return abs + } + + // Fallback (falls resolve fehlschlägt) + return strings.TrimSpace(s.RecordDir) +} + +func getKeepDir() string { + s := getSettings() + + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil || strings.TrimSpace(doneAbs) == "" { + doneAbs = strings.TrimSpace(s.DoneDir) + } + if strings.TrimSpace(doneAbs) == "" { + return "" + } + + return filepath.Join(doneAbs, "keep") +} + +func getDoneDir() string { + s := getSettings() + + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err == nil && strings.TrimSpace(doneAbs) != "" { + return doneAbs + } + + return strings.TrimSpace(s.DoneDir) +} + +func findVideoPath(file string) (string, error) { + base := filepath.Base(file) // verhindert path traversal + + // TODO: passe diese Root-Dirs an deine echten Pfade an: + roots := []string{ + getRecordingsDir(), // z.B. downloads/output root + getDoneDir(), // ✅ NEU: fertige Dateien liegen typischerweise hier + getKeepDir(), // keep root + } + + // 1) direkt in den Roots + for _, root := range roots { + root = strings.TrimSpace(root) + if root == "" { + continue + } + p := filepath.Join(root, base) + if st, err := os.Stat(p); err == nil && !st.IsDir() { + return p, nil + } + } + + // 2) 1 Ebene Unterordner: root/*/file + for _, root := range roots { + root = strings.TrimSpace(root) + if root == "" { + continue + } + matches, _ := filepath.Glob(filepath.Join(root, "*", base)) + for _, p := range matches { + if st, err := os.Stat(p); err == nil && !st.IsDir() { + return p, nil + } + } + } + + return "", os.ErrNotExist +} + +func setNoStoreHeaders(w http.ResponseWriter) { + // verhindert Browser/Proxy Caching (wichtig für Logs/Status) + w.Header().Set("Cache-Control", "no-store, max-age=0") + w.Header().Set("Pragma", "no-cache") + w.Header().Set("Expires", "0") +} + +func findFileInDirOrOneLevelSubdirs(root string, file string, skipDirName string) (string, os.FileInfo, bool) { + // direct + p := filepath.Join(root, file) + if fi, err := os.Stat(p); err == nil && !fi.IsDir() && fi.Size() > 0 { + return p, fi, true + } + + entries, err := os.ReadDir(root) + if err != nil { + return "", nil, false + } + + for _, e := range entries { + if !e.IsDir() { + continue + } + if skipDirName != "" && e.Name() == skipDirName { + continue + } + pp := filepath.Join(root, e.Name(), file) + if fi, err := os.Stat(pp); err == nil && !fi.IsDir() && fi.Size() > 0 { + return pp, fi, true + } + } + + return "", nil, false +} + +func resolveDoneFileByName(doneAbs string, file string) (full string, from string, fi os.FileInfo, err error) { + // 1) done (root + /done//) — "keep" wird übersprungen + if p, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep"); ok { + return p, "done", fi, nil + } + + // 2) keep (root + /done/keep//) + keepDir := filepath.Join(doneAbs, "keep") + if p, fi, ok := findFileInDirOrOneLevelSubdirs(keepDir, file, ""); ok { + return p, "keep", fi, nil + } + + return "", "", nil, fmt.Errorf("not found") +} + +func isTrashPath(p string) bool { + p = strings.ReplaceAll(p, "\\", "/") + return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") +} + +func durationFromMetaIfFresh(videoPath, assetDir string, fi os.FileInfo) (float64, bool) { + metaPath := filepath.Join(assetDir, "meta.json") + return readVideoMetaDuration(metaPath, fi) +} + +func durationSecondsCacheOnly(path string, fi os.FileInfo) float64 { + durCache.mu.Lock() + e, ok := durCache.m[path] + durCache.mu.Unlock() + + if ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 { + return e.sec + } + return 0 +} diff --git a/backend/record_start.go b/backend/record_start.go new file mode 100644 index 0000000..67d0811 --- /dev/null +++ b/backend/record_start.go @@ -0,0 +1,456 @@ +package main + +import ( + "context" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "time" + + "github.com/google/uuid" +) + +func startRecordingInternal(req RecordRequest) (*RecordJob, error) { + url := strings.TrimSpace(req.URL) + if url == "" { + return nil, errors.New("url fehlt") + } + + // Duplicate-running guard (identische URL) + jobsMu.Lock() + for _, j := range jobs { + if j != nil && j.Status == JobRunning && strings.TrimSpace(j.SourceURL) == url { + // ✅ Wenn ein versteckter Auto-Check-Job läuft und der User manuell startet -> sofort sichtbar machen + if j.Hidden && !req.Hidden { + j.Hidden = false + jobsMu.Unlock() + + notifyJobsChanged() + return j, nil + } + + jobsMu.Unlock() + return j, nil + } + } + + // ✅ Timestamp + Output schon hier setzen, damit UI sofort Model/Filename/Details hat + startedAt := time.Now() + provider := detectProvider(url) + + // best-effort Username aus URL + username := "" + switch provider { + case "chaturbate": + username = extractUsername(url) + case "mfc": + username = extractMFCUsername(url) + } + if strings.TrimSpace(username) == "" { + username = "unknown" + } + + // Dateiname (konsistent zu runJob: gleicher Timestamp) + filename := fmt.Sprintf("%s_%s.ts", username, startedAt.Format("01_02_2006__15-04-05")) + + // best-effort: absoluter RecordDir (fallback auf Settings-Wert) + s := getSettings() + recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir) + recordDir := strings.TrimSpace(recordDirAbs) + if recordDir == "" { + recordDir = strings.TrimSpace(s.RecordDir) + } + outPath := filepath.Join(recordDir, filename) + + jobID := uuid.NewString() + ctx, cancel := context.WithCancel(context.Background()) + + job := &RecordJob{ + ID: jobID, + SourceURL: url, + Status: JobRunning, + StartedAt: startedAt, + Output: outPath, // ✅ sofort befüllt + Hidden: req.Hidden, // ✅ NEU + cancel: cancel, + } + + jobs[jobID] = job + jobsMu.Unlock() + + // ✅ NEU: Hidden-Jobs nicht sofort ins UI broadcasten + if !job.Hidden { + notifyJobsChanged() + } + + go runJob(ctx, job, req) + return job, nil +} + +func runJob(ctx context.Context, job *RecordJob, req RecordRequest) { + hc := NewHTTPClient(req.UserAgent) + provider := detectProvider(req.URL) + + var err error + + // ✅ nutze den Timestamp vom Job (damit Start/Output konsistent sind) + now := job.StartedAt + if now.IsZero() { + now = time.Now() + } + + // ---- Aufnahme starten (Output-Pfad sauber relativ zur EXE auflösen) ---- + switch provider { + case "chaturbate": + if !hasChaturbateCookies(req.Cookie) { + err = errors.New("cf_clearance und session_id (oder sessionid) Cookies sind für Chaturbate erforderlich") + break + } + + s := getSettings() + recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir) + if rerr != nil || strings.TrimSpace(recordDirAbs) == "" { + err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr) + break + } + _ = os.MkdirAll(recordDirAbs, 0o755) + + username := extractUsername(req.URL) + filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05")) + + // ✅ wenn Output schon beim Start gesetzt wurde, nutze ihn (falls absolut) + jobsMu.Lock() + existingOut := strings.TrimSpace(job.Output) + jobsMu.Unlock() + + outPath := existingOut + if outPath == "" || !filepath.IsAbs(outPath) { + outPath = filepath.Join(recordDirAbs, filename) + } + + // Output nur aktualisieren, wenn es sich ändert + if strings.TrimSpace(existingOut) != strings.TrimSpace(outPath) { + jobsMu.Lock() + job.Output = outPath + jobsMu.Unlock() + notifyJobsChanged() + } + + err = RecordStream(ctx, hc, "https://chaturbate.com/", username, outPath, req.Cookie, job) + + case "mfc": + s := getSettings() + recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir) + if rerr != nil || strings.TrimSpace(recordDirAbs) == "" { + err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr) + break + } + _ = os.MkdirAll(recordDirAbs, 0o755) + + username := extractMFCUsername(req.URL) + filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05")) + outPath := filepath.Join(recordDirAbs, filename) + + jobsMu.Lock() + job.Output = outPath + jobsMu.Unlock() + notifyJobsChanged() + + err = RecordStreamMFC(ctx, hc, username, outPath, job) + + default: + err = errors.New("unsupported provider") + } + + // ---- Recording fertig: EndedAt/Error setzen ---- + end := time.Now() + + // Zielstatus bestimmen (finaler Status wird erst NACH Postwork gesetzt!) + target := JobFinished + var errText string + if err != nil { + if errors.Is(err, context.Canceled) { + target = JobStopped + } else { + target = JobFailed + errText = err.Error() + } + } + + // direkt nach provider record endet (egal ob err != nil oder nil) + stopPreview(job) + + // EndedAt + Error speichern (kurz locken) + jobsMu.Lock() + job.EndedAt = &end + if errText != "" { + job.Error = errText + } + out := strings.TrimSpace(job.Output) + jobsMu.Unlock() + notifyJobsChanged() + + // Falls Output fehlt (z.B. provider error), direkt final status setzen + if out == "" { + jobsMu.Lock() + job.Status = target + job.Phase = "" + job.Progress = 100 + job.PostWorkKey = "" + job.PostWork = nil + jobsMu.Unlock() + notifyJobsChanged() + notifyDoneChanged() + return + } + + // ✅ Postwork: remux/move/ffprobe/assets begrenzen -> in Queue + postOut := out + postTarget := target + + postKey := "postwork:" + job.ID + + // ✅ WICHTIG: + // - Status noch NICHT auf JobStopped/JobFinished setzen, sonst verschwindet er aus der Downloads-Tabelle. + // - Stattdessen Phase "postwork" + Progress hochsetzen (monoton). + // - Zusätzlich: PostWorkKey setzen + initialen Queue-Status ins Job-JSON hängen. + jobsMu.Lock() + job.Phase = "postwork" + if job.Progress < 70 { + job.Progress = 70 + } + + job.PostWorkKey = postKey + // initialer Status (meist "missing", bis Enqueue done ist – wir updaten direkt danach nochmal) + { + s := postWorkQ.StatusForKey(postKey) + job.PostWork = &s + } + jobsMu.Unlock() + notifyJobsChanged() + + okQueued := postWorkQ.Enqueue(PostWorkTask{ + Key: postKey, + Added: time.Now(), + Run: func(ctx context.Context) error { + // beim Start: Queue-Status refresh (sollte jetzt "running" werden) + { + st := postWorkQ.StatusForKey(postKey) + jobsMu.Lock() + job.PostWork = &st + // optional: wenn du "queued" Progress optisch unterscheiden willst + if job.Phase == "postwork" && job.Progress < 71 { + job.Progress = 71 + } + + jobsMu.Unlock() + notifyJobsChanged() + } + + out := strings.TrimSpace(postOut) + if out == "" { + jobsMu.Lock() + job.Phase = "" + job.Progress = 100 + job.Status = postTarget + job.PostWorkKey = "" + job.PostWork = nil + jobsMu.Unlock() + notifyJobsChanged() + notifyDoneChanged() + return nil + } + + // Helper: Progress nur nach oben (gegen "rückwärts") + setPhase := func(phase string, pct int) { + jobsMu.Lock() + if pct < job.Progress { + pct = job.Progress + } + job.Phase = phase + job.Progress = pct + + // Queue-Status auch bei Phase-Wechsel aktuell halten (nice für UI) + st := postWorkQ.StatusForKey(postKey) + job.PostWork = &st + + jobsMu.Unlock() + notifyJobsChanged() + } + + // 1) Remux (nur wenn TS) + if strings.EqualFold(filepath.Ext(out), ".ts") { + setPhase("remuxing", 72) + if newOut, err2 := maybeRemuxTSForJob(job, out); err2 == nil && strings.TrimSpace(newOut) != "" { + out = strings.TrimSpace(newOut) + jobsMu.Lock() + job.Output = out + jobsMu.Unlock() + notifyJobsChanged() + } + } + + // 2) Move to done (best-effort) + setPhase("moving", 78) + if moved, err2 := moveToDoneDir(out); err2 == nil && strings.TrimSpace(moved) != "" { + out = strings.TrimSpace(moved) + jobsMu.Lock() + job.Output = out + jobsMu.Unlock() + notifyJobsChanged() + // ✅ erst JETZT ist done wirklich betroffen + notifyDoneChanged() + } + + // 3) Optional: kleine Downloads automatisch löschen + setPhase("postwork", 82) + if fi, serr := os.Stat(out); serr == nil && fi != nil && !fi.IsDir() { + jobsMu.Lock() + job.SizeBytes = fi.Size() + jobsMu.Unlock() + notifyJobsChanged() + + s := getSettings() + minMB := s.AutoDeleteSmallDownloadsBelowMB + if s.AutoDeleteSmallDownloads && minMB > 0 { + threshold := int64(minMB) * 1024 * 1024 + if fi.Size() > 0 && fi.Size() < threshold { + base := filepath.Base(out) + id := stripHotPrefix(strings.TrimSuffix(base, filepath.Ext(base))) + + if derr := removeWithRetry(out); derr == nil || os.IsNotExist(derr) { + removeGeneratedForID(id) + if doneAbs, rerr := resolvePathRelativeToApp(getSettings().DoneDir); rerr == nil && strings.TrimSpace(doneAbs) != "" { + _ = os.RemoveAll(filepath.Join(doneAbs, "preview", id)) + _ = os.RemoveAll(filepath.Join(doneAbs, "thumbs", id)) + } + purgeDurationCacheForPath(out) + + jobsMu.Lock() + delete(jobs, job.ID) + jobsMu.Unlock() + notifyJobsChanged() + notifyDoneChanged() + + fmt.Println("🧹 auto-deleted:", base, "size:", formatBytesSI(fi.Size())) + return nil + } else { + fmt.Println("⚠️ auto-delete failed:", derr) + } + } + } + } + + // 4) Dauer (ffprobe) + setPhase("ffprobe", 84) + { + dctx, cancel := context.WithTimeout(ctx, 6*time.Second) + if sec, derr := durationSecondsCached(dctx, out); derr == nil && sec > 0 { + jobsMu.Lock() + job.DurationSeconds = sec + jobsMu.Unlock() + notifyJobsChanged() + } + cancel() + } + + // 5) Video-Props + setPhase("probe", 86) + { + pctx, cancel := context.WithTimeout(ctx, 6*time.Second) + w, h, fps, perr := probeVideoProps(pctx, out) + cancel() + + if perr == nil { + jobsMu.Lock() + job.VideoWidth = w + job.VideoHeight = h + job.FPS = fps + jobsMu.Unlock() + notifyJobsChanged() + } + } + + // 6) Assets (thumbs.jpg + preview.mp4) + const ( + assetsStart = 86 + assetsEnd = 99 + ) + + setPhase("assets", assetsStart) + + lastPct := -1 + lastTick := time.Time{} + + update := func(r float64) { + if r < 0 { + r = 0 + } + if r > 1 { + r = 1 + } + + pct := assetsStart + int(math.Round(r*float64(assetsEnd-assetsStart))) + if pct < assetsStart { + pct = assetsStart + } + if pct > assetsEnd { + pct = assetsEnd + } + + if pct == lastPct { + return + } + if !lastTick.IsZero() && time.Since(lastTick) < 150*time.Millisecond { + return + } + lastPct = pct + lastTick = time.Now() + setPhase("assets", pct) + } + + if err := ensureAssetsForVideoWithProgress(out, job.SourceURL, update); err != nil { + fmt.Println("⚠️ ensureAssetsForVideo:", err) + } + setPhase("assets", assetsEnd) + + // 7) Finalize: JETZT finalen Status setzen (damit er erst dann aus Downloads verschwindet) + jobsMu.Lock() + job.Status = postTarget + job.Phase = "" + job.Progress = 100 + job.PostWorkKey = "" + job.PostWork = nil + jobsMu.Unlock() + notifyJobsChanged() + notifyDoneChanged() + + return nil + }, + }) + + if okQueued { + // ✅ direkt nach erfolgreichem Enqueue nochmal Status holen (nun "queued" + Position möglich) + st := postWorkQ.StatusForKey(postKey) + jobsMu.Lock() + job.PostWork = &st + jobsMu.Unlock() + notifyJobsChanged() + } else { + // Queue voll -> Fallback: finalisieren + jobsMu.Lock() + job.Status = postTarget + job.Phase = "" + job.Progress = 100 + job.PostWorkKey = "" + job.PostWork = nil + jobsMu.Unlock() + notifyJobsChanged() + notifyDoneChanged() + } + + return +} diff --git a/backend/record_stream_cb.go b/backend/record_stream_cb.go new file mode 100644 index 0000000..9f2637d --- /dev/null +++ b/backend/record_stream_cb.go @@ -0,0 +1,338 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/grafov/m3u8" +) + +// --- DVR-ähnlicher Recorder-Ablauf --- +// Entspricht grob dem RecordStream aus dem Channel-Snippet: +func RecordStream( + ctx context.Context, + hc *HTTPClient, + domain string, + username string, + outputPath string, + httpCookie string, + job *RecordJob, +) error { + // 1) Seite laden + // Domain sauber zusammenbauen (mit/ohne Slash) + base := strings.TrimRight(domain, "/") + pageURL := base + "/" + username + + body, err := hc.FetchPage(ctx, pageURL, httpCookie) + if err != nil { + return fmt.Errorf("seite laden: %w", err) + } + + // 2) HLS-URL aus roomDossier extrahieren (wie DVR.ParseStream) + hlsURL, err := ParseStream(body) + if err != nil { + return fmt.Errorf("stream-parsing: %w", err) + } + + // 3) Playlist holen (wie stream.GetPlaylist im DVR) + playlist, err := FetchPlaylist(ctx, hc, hlsURL, httpCookie) + if err != nil { + return fmt.Errorf("playlist abrufen: %w", err) + } + + // ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar) + if job != nil { + _ = publishJob(job.ID) + } + + if job != nil && strings.TrimSpace(job.PreviewDir) == "" { + assetID := assetIDForJob(job) + if strings.TrimSpace(assetID) == "" { + assetID = job.ID + } + previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID) + + jobsMu.Lock() + job.PreviewDir = previewDir + jobsMu.Unlock() + + if err := startPreviewHLS(ctx, job, playlist.PlaylistURL, previewDir, httpCookie, hc.userAgent); err != nil { + fmt.Println("⚠️ preview start fehlgeschlagen:", err) + } + } + + // 4) Datei öffnen + file, err := os.Create(outputPath) + if err != nil { + return fmt.Errorf("datei erstellen: %w", err) + } + if job != nil { + _ = publishJob(job.ID) + } + + defer func() { + _ = file.Close() + }() + + // live size tracking (für UI) + var written int64 + var lastPush time.Time + var lastBytes int64 + + // 5) Segmente „watchen“ – analog zu WatchSegments + HandleSegment im DVR + err = playlist.WatchSegments(ctx, hc, httpCookie, func(b []byte, duration float64) error { + // Hier wäre im DVR ch.HandleSegment – bei dir einfach in eine Datei schreiben + if _, err := file.Write(b); err != nil { + return fmt.Errorf("schreibe segment: %w", err) + } + + // ✅ live size (UI) – throttled + written += int64(len(b)) + if job != nil { + now := time.Now() + if lastPush.IsZero() || now.Sub(lastPush) >= 750*time.Millisecond || (written-lastBytes) >= 2*1024*1024 { + jobsMu.Lock() + job.SizeBytes = written + jobsMu.Unlock() + notifyJobsChanged() + + lastPush = now + lastBytes = written + } + } + + // Könntest hier z.B. auch Dauer/Größe loggen, wenn du möchtest + _ = duration // aktuell unbenutzt + return nil + }) + if err != nil { + return fmt.Errorf("watch segments: %w", err) + } + + return nil +} + +// ParseStream entspricht der DVR-Variante (roomDossier → hls_source) +func ParseStream(html string) (string, error) { + matches := roomDossierRegexp.FindStringSubmatch(html) + if len(matches) == 0 { + return "", errors.New("room dossier nicht gefunden") + } + + // DVR-Style Unicode-Decode + decoded, err := strconv.Unquote( + strings.Replace(strconv.Quote(matches[1]), `\\u`, `\u`, -1), + ) + if err != nil { + return "", fmt.Errorf("Unicode-decode failed: %w", err) + } + + var rd struct { + HLSSource string `json:"hls_source"` + } + if err := json.Unmarshal([]byte(decoded), &rd); err != nil { + return "", fmt.Errorf("JSON-parse failed: %w", err) + } + if rd.HLSSource == "" { + return "", errors.New("kein HLS-Quell-URL im JSON") + } + return rd.HLSSource, nil +} + +// --- Playlist/WatchSegments wie gehabt --- +type Playlist struct { + PlaylistURL string + RootURL string + Resolution int + Framerate int +} + +type Resolution struct { + Framerate map[int]string + Width int +} + +// nutzt ebenfalls *HTTPClient +func (p *Playlist) WatchSegments( + ctx context.Context, + hc *HTTPClient, + httpCookie string, + handler func([]byte, float64) error, +) error { + var lastSeq int64 = -1 + emptyRounds := 0 + const maxEmptyRounds = 60 // statt 5 + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Playlist holen + req, err := hc.NewRequest(ctx, http.MethodGet, p.PlaylistURL, httpCookie) + if err != nil { + return fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err) + } + + resp, err := hc.client.Do(req) + if err != nil { + emptyRounds++ + if emptyRounds >= maxEmptyRounds { + return errors.New("❌ Playlist nicht mehr erreichbar – Stream vermutlich offline") + } + time.Sleep(2 * time.Second) + continue + } + + playlist, listType, err := m3u8.DecodeFrom(resp.Body, true) + resp.Body.Close() + + if err != nil || listType != m3u8.MEDIA { + emptyRounds++ + if emptyRounds >= maxEmptyRounds { + return errors.New("❌ Fehlerhafte Playlist – möglicherweise offline") + } + time.Sleep(2 * time.Second) + continue + } + + media := playlist.(*m3u8.MediaPlaylist) + newSegment := false + + for _, segment := range media.Segments { + if segment == nil { + continue + } + if int64(segment.SeqId) <= lastSeq { + continue + } + + lastSeq = int64(segment.SeqId) + newSegment = true + + segmentURL := p.RootURL + segment.URI + + segReq, err := hc.NewRequest(ctx, http.MethodGet, segmentURL, httpCookie) + if err != nil { + continue + } + + segResp, err := hc.client.Do(segReq) + if err != nil { + continue + } + + data, err := io.ReadAll(segResp.Body) + segResp.Body.Close() + if err != nil || len(data) == 0 { + continue + } + + if err := handler(data, segment.Duration); err != nil { + return err + } + } + + if newSegment { + emptyRounds = 0 + } else { + emptyRounds++ + if emptyRounds >= maxEmptyRounds { + return errors.New("🛑 Keine neuen HLS-Segmente empfangen – Stream vermutlich beendet oder offline.") + } + } + + time.Sleep(1 * time.Second) + } +} + +// Cookie-Hilfsfunktion (wie ParseCookies + AddCookie im DVR) +func addCookiesFromString(req *http.Request, cookieStr string) { + if cookieStr == "" { + return + } + pairs := strings.Split(cookieStr, ";") + for _, pair := range pairs { + parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) + if len(parts) != 2 { + continue + } + name := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + if name == "" { + continue + } + req.AddCookie(&http.Cookie{ + Name: name, + Value: value, + }) + } +} + +// --- helper --- +func extractUsername(input string) string { + input = strings.TrimSpace(input) + input = strings.TrimPrefix(input, "https://") + input = strings.TrimPrefix(input, "http://") + input = strings.TrimPrefix(input, "www.") + if strings.HasPrefix(input, "chaturbate.com/") { + input = strings.TrimPrefix(input, "chaturbate.com/") + } + + // alles nach dem ersten Slash abschneiden (Pfadteile, /, etc.) + if idx := strings.IndexAny(input, "/?#"); idx != -1 { + input = input[:idx] + } + + // zur Sicherheit evtl. übrig gebliebene Slash/Backslash trimmen + return strings.Trim(input, "/\\") +} + +func hasChaturbateCookies(cookieStr string) bool { + m := parseCookieString(cookieStr) + _, hasCF := m["cf_clearance"] + // akzeptiere session_id ODER sessionid ODER sessionid/sessionId Varianten (case-insensitive durch ToLower) + _, hasSessID := m["session_id"] + _, hasSessIdAlt := m["sessionid"] // falls es ohne underscore kommt + return hasCF && (hasSessID || hasSessIdAlt) +} + +func parseCookieString(cookieStr string) map[string]string { + out := map[string]string{} + for _, pair := range strings.Split(cookieStr, ";") { + parts := strings.SplitN(strings.TrimSpace(pair), "=", 2) + if len(parts) != 2 { + continue + } + name := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + if name == "" { + continue + } + out[strings.ToLower(name)] = value + } + return out +} + +func detectProvider(raw string) string { + s := strings.ToLower(raw) + + if strings.Contains(s, "chaturbate.com") { + return "chaturbate" + } + if strings.Contains(s, "myfreecams.com") { + return "mfc" + } + return "unknown" +} diff --git a/backend/record_stream_mfc.go b/backend/record_stream_mfc.go new file mode 100644 index 0000000..5cc1216 --- /dev/null +++ b/backend/record_stream_mfc.go @@ -0,0 +1,439 @@ +package main + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/PuerkitoBio/goquery" + "github.com/grafov/m3u8" +) + +// RecordStreamMFC nimmt vorerst die URL 1:1 und ruft ffmpeg direkt darauf auf. +// In der Praxis musst du hier meist erst eine HLS-URL aus dem HTML extrahieren. +// RecordStreamMFC ist jetzt nur noch ein Wrapper um den bewährten MFC-Flow (runMFC). +func RecordStreamMFC( + ctx context.Context, + hc *HTTPClient, + username string, + outputPath string, + job *RecordJob, +) error { + mfc := NewMyFreeCams(username) + + // ✅ Statt sofort zu failen: kurz auf PUBLIC warten + const waitPublicMax = 2 * time.Minute + deadline := time.Now().Add(waitPublicMax) + + var lastSt *Status + + for { + // Context cancel / stop + if err := ctx.Err(); err != nil { + return err + } + + st, err := mfc.GetStatus() + if err == nil { + tmp := st + lastSt = &tmp + + if st == StatusPublic { + break + } + } + + if time.Now().After(deadline) { + if lastSt == nil { + return fmt.Errorf("mfc: stream wurde nicht public innerhalb %s", waitPublicMax) + } + return fmt.Errorf("mfc: stream ist nicht public nach %s (letzter Status: %s)", waitPublicMax, *lastSt) + + } + + time.Sleep(5 * time.Second) + } + + // ✅ erst jetzt die Video URL holen (weil public) + m3u8URL, err := mfc.GetVideoURL(false) + if err != nil { + return fmt.Errorf("mfc get video url: %w", err) + } + if strings.TrimSpace(m3u8URL) == "" { + return fmt.Errorf("mfc: keine m3u8 URL gefunden") + } + + // ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar) + if job != nil { + _ = publishJob(job.ID) + } + + // ✅ Preview starten + if job != nil && job.PreviewDir == "" { + assetID := assetIDForJob(job) + if strings.TrimSpace(assetID) == "" { + assetID = job.ID + } + previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID) + + job.PreviewDir = previewDir + + if err := startPreviewHLS(ctx, job, m3u8URL, previewDir, "", hc.userAgent); err != nil { + fmt.Println("⚠️ preview start fehlgeschlagen:", err) + job.PreviewDir = "" // rollback + } + } + + // Aufnahme starten + return handleM3U8Mode(ctx, m3u8URL, outputPath, job) +} + +type MyFreeCams struct { + Username string + Attrs map[string]string + VideoURL string +} + +func NewMyFreeCams(username string) *MyFreeCams { + return &MyFreeCams{ + Username: username, + Attrs: map[string]string{}, + } +} + +func (m *MyFreeCams) GetWebsiteURL() string { + return "https://www.myfreecams.com/#" + m.Username +} + +func (m *MyFreeCams) GetVideoURL(refresh bool) (string, error) { + if !refresh && m.VideoURL != "" { + return m.VideoURL, nil + } + + // Prüfen, ob alle benötigten Attribute vorhanden sind + if _, ok := m.Attrs["data-cam-preview-model-id-value"]; !ok { + return "", nil + } + sid := m.Attrs["data-cam-preview-server-id-value"] + midBase := m.Attrs["data-cam-preview-model-id-value"] + isWzobs := strings.ToLower(m.Attrs["data-cam-preview-is-wzobs-value"]) == "true" + + midInt, err := strconv.Atoi(midBase) + if err != nil { + return "", fmt.Errorf("model-id parse error: %w", err) + } + mid := 100000000 + midInt + a := "" + if isWzobs { + a = "a_" + } + + playlistURL := fmt.Sprintf( + "https://previews.myfreecams.com/hls/NxServer/%s/ngrp:mfc_%s%d.f4v_mobile_mhp1080_previewurl/playlist.m3u8", + sid, a, mid, + ) + + // Validieren (HTTP 200) & ggf. auf gewünschte Auflösung verlinken + u, err := getWantedResolutionPlaylist(playlistURL) + if err != nil { + return "", err + } + m.VideoURL = u + return m.VideoURL, nil +} + +func (m *MyFreeCams) GetStatus() (Status, error) { + // 1) share-Seite prüfen (existiert/nicht existiert) + shareURL := "https://share.myfreecams.com/" + m.Username + resp, err := http.Get(shareURL) + if err != nil { + return StatusUnknown, err + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return StatusNotExist, nil + } + if resp.StatusCode != 200 { + return StatusUnknown, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + // wir brauchen sowohl Bytes (für Suche) als auch Reader (für HTML) + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return StatusUnknown, err + } + + // 2) „tracking.php?“ suchen und prüfen, ob model_id vorhanden ist + start := bytes.Index(bodyBytes, []byte("https://www.myfreecams.com/php/tracking.php?")) + if start == -1 { + // ohne tracking Parameter -> behandeln wie nicht existent + return StatusNotExist, nil + } + end := bytes.IndexByte(bodyBytes[start:], '"') + if end == -1 { + return StatusUnknown, errors.New("tracking url parse failed") + } + raw := string(bodyBytes[start : start+end]) + u, err := url.Parse(raw) + if err != nil { + return StatusUnknown, fmt.Errorf("tracking url invalid: %w", err) + } + qs := u.Query() + if qs.Get("model_id") == "" { + return StatusNotExist, nil + } + + // 3) HTML parsen und
Attribute auslesen + doc, err := goquery.NewDocumentFromReader(bytes.NewReader(bodyBytes)) + if err != nil { + return StatusUnknown, err + } + + params := doc.Find(".campreview").First() + if params.Length() == 0 { + // keine campreview -> offline + return StatusOffline, nil + } + + attrs := map[string]string{} + params.Each(func(_ int, s *goquery.Selection) { + for _, a := range []string{ + "data-cam-preview-server-id-value", + "data-cam-preview-model-id-value", + "data-cam-preview-is-wzobs-value", + } { + if v, ok := s.Attr(a); ok { + attrs[a] = v + } + } + }) + m.Attrs = attrs + + // 4) Versuchen, VideoURL (Preview-HLS) zu ermitteln + uStr, err := m.GetVideoURL(true) + if err != nil { + return StatusUnknown, err + } + if uStr != "" { + return StatusPublic, nil + } + // campreview vorhanden, aber keine playable url -> „PRIVATE“ + return StatusPrivate, nil +} + +func runMFC(ctx context.Context, username string, outArg string) error { + mfc := NewMyFreeCams(username) + + st, err := mfc.GetStatus() + if err != nil { + return err + } + if st != StatusPublic { + return fmt.Errorf("Stream ist nicht öffentlich (Status: %s)", st) + } + + m3u8URL, err := mfc.GetVideoURL(false) + if err != nil { + return err + } + if m3u8URL == "" { + return errors.New("keine m3u8 URL gefunden") + } + + return handleM3U8Mode(ctx, m3u8URL, outArg, nil) +} + +func getWantedResolutionPlaylist(playlistURL string) (string, error) { + // Holt eine URL; wenn MASTER, wähle beste Variante; wenn MEDIA, gib die URL zurück. + resp, err := http.Get(playlistURL) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return "", fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode) + } + + playlist, listType, err := m3u8.DecodeFrom(resp.Body, true) + if err != nil { + return "", fmt.Errorf("m3u8 parse: %w", err) + } + if listType == m3u8.MEDIA { + return playlistURL, nil + } + + master := playlist.(*m3u8.MasterPlaylist) + var bestURI string + var bestWidth int + var bestFramerate float64 + + for _, v := range master.Variants { + if v == nil { + continue + } + // Resolution kommt als "WxH" – wir nutzen die Höhe als Vergleichswert. + w := 0 + if v.Resolution != "" { + parts := strings.Split(v.Resolution, "x") + if len(parts) == 2 { + if ww, err := strconv.Atoi(parts[1]); err == nil { + w = ww + } + } + } + fr := 30.0 + if v.FrameRate > 0 { + fr = v.FrameRate + } else if strings.Contains(v.Name, "FPS:60") { + fr = 60 + } + if w > bestWidth || (w == bestWidth && fr > bestFramerate) { + bestWidth = w + bestFramerate = fr + bestURI = v.URI + } + } + if bestURI == "" { + return "", errors.New("Master-Playlist ohne gültige Varianten") + } + + // Absolutieren + root := playlistURL[:strings.LastIndex(playlistURL, "/")+1] + if strings.HasPrefix(bestURI, "http://") || strings.HasPrefix(bestURI, "https://") { + return bestURI, nil + } + return root + bestURI, nil +} + +func handleM3U8Mode(ctx context.Context, m3u8URL, outFile string, job *RecordJob) error { + // Validierung + u, err := url.Parse(m3u8URL) + if err != nil || (u.Scheme != "http" && u.Scheme != "https") { + return fmt.Errorf("ungültige URL: %q", m3u8URL) + } + + // HTTP-Check MIT Context + req, err := http.NewRequestWithContext(ctx, "GET", m3u8URL, nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode) + } + + if strings.TrimSpace(outFile) == "" { + return errors.New("output file path leer") + } + + // ffmpeg mit Context (STOP FUNKTIONIERT HIER!) + cmd := exec.CommandContext( + ctx, + ffmpegPath, + "-y", + "-hide_banner", + "-nostats", + "-loglevel", "warning", + "-i", m3u8URL, + "-c", "copy", + outFile, + ) + + var stderr bytes.Buffer + cmd.Stdout = io.Discard + cmd.Stderr = &stderr + + // ✅ live size polling während ffmpeg läuft + stopStat := make(chan struct{}) + + if job != nil { + go func() { + t := time.NewTicker(1 * time.Second) + defer t.Stop() + + var last int64 + for { + select { + case <-ctx.Done(): + return + case <-stopStat: + return + case <-t.C: + fi, err := os.Stat(outFile) + if err != nil { + continue + } + sz := fi.Size() + if sz > 0 && sz != last { + jobsMu.Lock() + job.SizeBytes = sz + jobsMu.Unlock() + notifyJobsChanged() + last = sz + } + } + } + }() + } + + // ✅ WICHTIG: ffmpeg wirklich laufen lassen + err = cmd.Run() + + close(stopStat) + + if err != nil { + msg := strings.TrimSpace(stderr.String()) + if msg != "" { + return fmt.Errorf("ffmpeg m3u8 failed: %w: %s", err, msg) + } + return fmt.Errorf("ffmpeg m3u8 failed: %w", err) + } + + return nil +} + +/* ─────────────────────────────── + Kleine Helper für MFC + ─────────────────────────────── */ + +func extractMFCUsername(input string) string { + s := strings.TrimSpace(input) + if s == "" { + return "" + } + + // 1) URL mit Fragment (#username) + if u, err := url.Parse(s); err == nil && u.Fragment != "" { + return strings.Trim(strings.TrimSpace(u.Fragment), "/") + } + + // 2) URL Pfad: letztes Segment nehmen + if u, err := url.Parse(s); err == nil && u.Host != "" { + p := strings.Trim(u.Path, "/") + if p == "" { + return "" + } + parts := strings.Split(p, "/") + return strings.TrimSpace(parts[len(parts)-1]) + } + + // 3) Fallback: raw + return s +} diff --git a/backend/routes.go b/backend/routes.go new file mode 100644 index 0000000..3396fc8 --- /dev/null +++ b/backend/routes.go @@ -0,0 +1,104 @@ +package main + +import ( + "fmt" + "net/http" +) + +// routes.go (package main) +func registerRoutes(mux *http.ServeMux, auth *AuthManager) *ModelStore { + // -------------------------- + // 1) Public Auth Endpoints + // -------------------------- + mux.HandleFunc("/api/auth/login", authLoginHandler(auth)) + mux.HandleFunc("/api/auth/logout", authLogoutHandler(auth)) + mux.HandleFunc("/api/auth/me", authMeHandler(auth)) + + // 2FA (Authenticator/TOTP) + mux.HandleFunc("/api/auth/2fa/setup", auth2FASetupHandler(auth)) + mux.HandleFunc("/api/auth/2fa/enable", auth2FAEnableHandler(auth)) + // mux.HandleFunc("/api/auth/2fa/disable", auth2FADisableHandler(auth)) + + // -------------------------- + // 2) Protected API Mux + // -------------------------- + api := http.NewServeMux() + + api.HandleFunc("/api/cookies", cookiesHandler) + + api.HandleFunc("/api/record/done/stream", handleDoneStream) + api.HandleFunc("/api/perf/stream", perfStreamHandler) + api.HandleFunc("/api/status/disk", diskStatusHandler) + + api.HandleFunc("/api/autostart/state", autostartStateHandler) + api.HandleFunc("/api/autostart/state/stream", autostartStateStreamHandler) + api.HandleFunc("/api/autostart/pause", autostartPauseQuickHandler) + api.HandleFunc("/api/autostart/resume", autostartResumeHandler) + + api.HandleFunc("/api/settings", recordSettingsHandler) + api.HandleFunc("/api/settings/browse", settingsBrowse) + api.HandleFunc("/api/settings/cleanup", settingsCleanupHandler) + + api.HandleFunc("/api/record", startRecordingFromRequest) + api.HandleFunc("/api/record/status", recordStatus) + api.HandleFunc("/api/record/stop", recordStop) + api.HandleFunc("/api/record/preview", recordPreview) + api.HandleFunc("/api/record/list", recordList) + api.HandleFunc("/api/record/stream", recordStream) + api.HandleFunc("/api/record/video", recordVideo) + api.HandleFunc("/api/record/done", recordDoneList) + api.HandleFunc("/api/record/delete", recordDeleteVideo) + api.HandleFunc("/api/record/toggle-hot", recordToggleHot) + api.HandleFunc("/api/record/keep", recordKeepVideo) + api.HandleFunc("/api/record/unkeep", recordUnkeepVideo) + api.HandleFunc("/api/record/restore", recordRestoreVideo) + + api.HandleFunc("/api/chaturbate/online", chaturbateOnlineHandler) + api.HandleFunc("/api/chaturbate/biocontext", chaturbateBioContextHandler) + + api.HandleFunc("/api/generated/teaser", generatedTeaser) + api.HandleFunc("/api/generated/cover", generatedCover) + api.HandleFunc("/api/generated/coverinfo/list", generatedCoverInfoList) + + // Tasks + api.HandleFunc("/api/tasks/generate-assets", tasksGenerateAssets) + + // -------------------------- + // 3) ModelStore + // -------------------------- + modelsPath, _ := resolvePathRelativeToApp("data/models_store.db") + fmt.Println("📦 Models DB:", modelsPath) + + store := NewModelStore(modelsPath) + if err := store.Load(); err != nil { + fmt.Println("⚠️ models load:", err) + } + + setCoverModelStore(store) + RegisterModelAPI(api, store) + setChaturbateOnlineModelStore(store) + + // -------------------------- + // 4) Mount Protected API + // -------------------------- + // /api/auth/* ist schon public am root mux und gewinnt als längeres Pattern. + mux.Handle("/api/", requireAuth(auth, api, false)) + + // -------------------------- + // 5) Mount Protected SPA (/) + // -------------------------- + frontend, ok := makeFrontendHandler() + if ok && frontend != nil { + // allowPaths: login + assets müssen öffentlich sein, sonst Redirect-Loop + mux.Handle("/", requireAuth(auth, frontend, true, + "/login", + "/assets/", + "/favicon.ico", + "/manifest.webmanifest", + "/robots.txt", + "/service-worker.js", + )) + } + + return store +} diff --git a/backend/serve_video.go b/backend/serve_video.go new file mode 100644 index 0000000..b8368df --- /dev/null +++ b/backend/serve_video.go @@ -0,0 +1,218 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "time" +) + +func serveVideoFile(w http.ResponseWriter, r *http.Request, path string) { + f, err := openForReadShareDelete(path) + if err != nil { + http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) + return + } + defer f.Close() + + fi, err := f.Stat() + if err != nil || fi.IsDir() || fi.Size() == 0 { + http.Error(w, "datei nicht gefunden", http.StatusNotFound) + return + } + + w.Header().Set("Cache-Control", "no-store") + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("X-Content-Type-Options", "nosniff") + + ext := strings.ToLower(filepath.Ext(path)) + switch ext { + case ".ts": + w.Header().Set("Content-Type", "video/mp2t") + default: + w.Header().Set("Content-Type", "video/mp4") + } + + // ServeContent unterstützt Range Requests (wichtig für Video) + http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f) +} + +func sniffVideoKind(path string) (string, error) { + f, err := openForReadShareDelete(path) + if err != nil { + return "", err + } + defer f.Close() + + buf := make([]byte, 64) + n, _ := f.Read(buf) + buf = buf[:n] + + // HTML? + trim := bytes.TrimSpace(buf) + if len(trim) >= 1 && trim[0] == '<' { + return "html", nil + } + + // MPEG-TS: 0x47 sync byte + if len(buf) >= 1 && buf[0] == 0x47 { + return "ts", nil + } + + // MP4: "ftyp" typischerweise bei Offset 4 + if len(buf) >= 8 && string(buf[4:8]) == "ftyp" { + return "mp4", nil + } + + return "unknown", nil +} + +func maybeRemuxTS(path string) (string, error) { + path = strings.TrimSpace(path) + if path == "" { + return "", nil + } + + if !strings.EqualFold(filepath.Ext(path), ".ts") { + return "", nil + } + + mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4" + + // remux (ohne neu encoden) + if err := remuxTSToMP4(path, mp4); err != nil { + return "", err + } + + _ = os.Remove(path) // TS entfernen, wenn MP4 ok + return mp4, nil +} + +func maybeRemuxTSForJob(job *RecordJob, path string) (string, error) { + path = strings.TrimSpace(path) + if path == "" { + return "", nil + } + + if !strings.EqualFold(filepath.Ext(path), ".ts") { + return "", nil + } + + mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4" + + // input size für fallback + var inSize int64 + if fi, err := os.Stat(path); err == nil && !fi.IsDir() { + inSize = fi.Size() + } + + // duration (für sauberen progress) + var durSec float64 + { + durCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + durSec, _ = durationSecondsCached(durCtx, path) + cancel() + } + + const base = 10 + const span = 60 // 10..69 (70 startet "moving") + + lastProgress := base + lastTick := time.Now().Add(-time.Second) + + onRatio := func(r float64) { + if r < 0 { + r = 0 + } + if r > 1 { + r = 1 + } + p := base + int(r*float64(span)) + if p >= 70 { + p = 69 + } + + if p <= lastProgress { + return + } + // leicht throttlen + if time.Since(lastTick) < 150*time.Millisecond && p < 79 { + return + } + lastProgress = p + lastTick = time.Now() + setJobPhase(job, "remuxing", p) + } + + remuxCtx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + if err := remuxTSToMP4WithProgress(remuxCtx, path, mp4, durSec, inSize, onRatio); err != nil { + return "", err + } + + _ = os.Remove(path) // TS entfernen, wenn MP4 ok + setJobPhase(job, "remuxing", 69) // ✅ Remux finished (nie rückwärts) + return mp4, nil + +} + +func moveToDoneDir(src string) (string, error) { + src = strings.TrimSpace(src) + if src == "" { + return "", fmt.Errorf("src empty") + } + + s := getSettings() + + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil || strings.TrimSpace(doneAbs) == "" { + // fallback + doneAbs = strings.TrimSpace(s.DoneDir) + } + if strings.TrimSpace(doneAbs) == "" { + return "", fmt.Errorf("doneDir empty") + } + + // Quelle normalisieren/abs machen (best effort) + srcAbs := filepath.Clean(src) + if !filepath.IsAbs(srcAbs) { + if abs, rerr := resolvePathRelativeToApp(srcAbs); rerr == nil && strings.TrimSpace(abs) != "" { + srcAbs = abs + } + } + + fi, err := os.Stat(srcAbs) + if err != nil || fi.IsDir() { + return "", fmt.Errorf("src not found: %v", err) + } + + file := filepath.Base(srcAbs) + + // Zielordner: immer done/ (keine model-subdirs) + dstDir := doneAbs + + if err := os.MkdirAll(dstDir, 0o755); err != nil { + return "", err + } + + // Bei Kollisionen eindeutigen Namen wählen + dst, err := uniqueDestPath(dstDir, file) + if err != nil { + return "", err + } + + // Robust verschieben (Windows / Locks / Cross-device) + if err := renameWithRetry(srcAbs, dst); err != nil { + return "", err + } + + // Duration-Cache invalidieren (du nutzt das ja) + purgeDurationCacheForPath(srcAbs) + + return dst, nil +} diff --git a/backend/server.go b/backend/server.go new file mode 100644 index 0000000..6835ac5 --- /dev/null +++ b/backend/server.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "net/http" + "os" +) + +// --- main --- +func main() { + loadSettings() + + fixKeepRootFilesIntoModelSubdirs() + postWorkQ.StartWorkers(1) + startPostWorkStatusRefresher() + + go startGeneratedGarbageCollector() + + mux := http.NewServeMux() + + // ✅ AuthManager erstellen (Beispiel) + // Du brauchst hier typischerweise: + // - ein Secret/Key (Cookie signen / Sessions) + // - Username+Pass Hash oder config + // - optional 2FA store + auth, err := NewAuthManager() + if err != nil { + fmt.Println("❌ auth init:", err) + os.Exit(1) + } + + if err != nil { + fmt.Println("❌ auth init:", err) + os.Exit(1) + } + + store := registerRoutes(mux, auth) + + go startChaturbateOnlinePoller(store) + go startChaturbateAutoStartWorker(store) + go startMyFreeCamsAutoStartWorker(store) + go startDiskSpaceGuard() + + if _, err := ensureCoversDir(); err != nil { + fmt.Println("⚠️ covers dir:", err) + } + + fmt.Println("🌐 HTTP-API aktiv: http://localhost:9999") + + handler := withCORS(mux) + if err := http.ListenAndServe(":9999", handler); err != nil { + fmt.Println("❌ HTTP-Server Fehler:", err) + os.Exit(1) + } +} diff --git a/backend/tasks_assets.go b/backend/tasks_assets.go new file mode 100644 index 0000000..1aeab8a --- /dev/null +++ b/backend/tasks_assets.go @@ -0,0 +1,364 @@ +package main + +import ( + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +// --------------------------- +// Tasks: Missing Assets erzeugen +// --------------------------- + +type AssetsTaskState struct { + Running bool `json:"running"` + Total int `json:"total"` + Done int `json:"done"` + GeneratedThumbs int `json:"generatedThumbs"` + GeneratedPreviews int `json:"generatedPreviews"` + Skipped int `json:"skipped"` + StartedAt time.Time `json:"startedAt"` + FinishedAt *time.Time `json:"finishedAt,omitempty"` + Error string `json:"error,omitempty"` +} + +var assetsTaskMu sync.Mutex +var assetsTaskState AssetsTaskState +var assetsTaskCancel context.CancelFunc + +func tasksGenerateAssets(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + assetsTaskMu.Lock() + st := assetsTaskState + assetsTaskMu.Unlock() + writeJSON(w, http.StatusOK, st) + return + + case http.MethodPost: + assetsTaskMu.Lock() + if assetsTaskState.Running { + st := assetsTaskState + assetsTaskMu.Unlock() + writeJSON(w, http.StatusOK, st) + return + } + + // ✅ cancelbaren Context erzeugen + ctx, cancel := context.WithCancel(context.Background()) + assetsTaskCancel = cancel + + assetsTaskState = AssetsTaskState{ + Running: true, + StartedAt: time.Now(), + } + st := assetsTaskState + assetsTaskMu.Unlock() + + go runGenerateMissingAssets(ctx) + + writeJSON(w, http.StatusOK, st) + return + + case http.MethodDelete: + assetsTaskMu.Lock() + cancel := assetsTaskCancel + running := assetsTaskState.Running + assetsTaskMu.Unlock() + + if !running || cancel == nil { + // nichts zu stoppen + w.WriteHeader(http.StatusNoContent) + return + } + + cancel() + + // optional: sofortiges Feedback in state.error + assetsTaskMu.Lock() + if assetsTaskState.Running { + assetsTaskState.Error = "abgebrochen" + } + st := assetsTaskState + assetsTaskMu.Unlock() + + writeJSON(w, http.StatusOK, st) + return + + default: + http.Error(w, "Nur GET/POST", http.StatusMethodNotAllowed) + return + } +} + +func runGenerateMissingAssets(ctx context.Context) { + finishWithErr := func(err error) { + now := time.Now() + assetsTaskMu.Lock() + assetsTaskState.Running = false + assetsTaskState.FinishedAt = &now + if err != nil { + assetsTaskState.Error = err.Error() + } + assetsTaskMu.Unlock() + } + + defer func() { + assetsTaskMu.Lock() + assetsTaskCancel = nil + assetsTaskMu.Unlock() + }() + + s := getSettings() + doneAbs, err := resolvePathRelativeToApp(s.DoneDir) + if err != nil || strings.TrimSpace(doneAbs) == "" { + finishWithErr(fmt.Errorf("doneDir auflösung fehlgeschlagen: %v", err)) + return + } + + type item struct { + name string + path string + } + + // .trash niemals verarbeiten + isTrashPath := func(full string) bool { + p := strings.ToLower(strings.ReplaceAll(full, "\\", "/")) + return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") + } + + seen := map[string]struct{}{} + items := make([]item, 0, 512) + + addIfVideo := func(full string) { + if isTrashPath(full) { + return + } + + name := filepath.Base(full) + low := strings.ToLower(name) + if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") { + return + } + ext := strings.ToLower(filepath.Ext(name)) + if ext != ".mp4" && ext != ".ts" { + return + } + + // Dedupe + if _, ok := seen[full]; ok { + return + } + seen[full] = struct{}{} + items = append(items, item{name: name, path: full}) + } + + scanOneLevel := func(dir string) { + ents, err := os.ReadDir(dir) + if err != nil { + return + } + for _, e := range ents { + // .trash-Ordner nie scannen + if e.IsDir() && strings.EqualFold(e.Name(), ".trash") { + continue + } + + full := filepath.Join(dir, e.Name()) + if e.IsDir() { + sub, err := os.ReadDir(full) + if err != nil { + continue + } + for _, se := range sub { + if se.IsDir() { + continue + } + addIfVideo(filepath.Join(full, se.Name())) + } + continue + } + addIfVideo(full) + } + } + + // ✅ done + done// + done/keep + done/keep// + scanOneLevel(doneAbs) + scanOneLevel(filepath.Join(doneAbs, "keep")) + + assetsTaskMu.Lock() + assetsTaskState.Total = len(items) + assetsTaskState.Done = 0 + assetsTaskState.GeneratedThumbs = 0 + assetsTaskState.GeneratedPreviews = 0 + assetsTaskState.Skipped = 0 + assetsTaskState.Error = "" + assetsTaskMu.Unlock() + + for i, it := range items { + if err := ctx.Err(); err != nil { + finishWithErr(err) + return + } + + base := strings.TrimSuffix(it.name, filepath.Ext(it.name)) + id := stripHotPrefix(base) + if strings.TrimSpace(id) == "" { + assetsTaskMu.Lock() + assetsTaskState.Done = i + 1 + assetsTaskMu.Unlock() + continue + } + + assetDir, derr := ensureGeneratedDir(id) + if derr != nil { + assetsTaskMu.Lock() + assetsTaskState.Error = "mindestens ein Eintrag konnte nicht verarbeitet werden (siehe Logs)" + assetsTaskState.Done = i + 1 + assetsTaskMu.Unlock() + fmt.Println("⚠️ ensureGeneratedDir:", derr) + continue + } + + thumbPath := filepath.Join(assetDir, "thumbs.jpg") + previewPath := filepath.Join(assetDir, "preview.mp4") + metaPath := filepath.Join(assetDir, "meta.json") + + thumbOK := func() bool { + fi, err := os.Stat(thumbPath) + return err == nil && !fi.IsDir() && fi.Size() > 0 + }() + previewOK := func() bool { + fi, err := os.Stat(previewPath) + return err == nil && !fi.IsDir() && fi.Size() > 0 + }() + + // Datei-Info (für Meta-Validierung) + vfi, verr := os.Stat(it.path) + if verr != nil || vfi.IsDir() || vfi.Size() <= 0 { + assetsTaskMu.Lock() + assetsTaskState.Done = i + 1 + assetsTaskMu.Unlock() + continue + } + + // ✅ SourceURL best-effort: aus bestehender meta.json, wenn vorhanden/valide + sourceURL := "" + if u, ok := readVideoMetaSourceURL(metaPath, vfi); ok { + sourceURL = u + } + + // ✅ Dauer zuerst aus meta.json, sonst 1× ffprobe & meta.json schreiben + durSec := 0.0 + metaOK := false + + if d, ok := readVideoMetaDuration(metaPath, vfi); ok { + durSec = d + metaOK = true + + // meta ist valide (Duration ok), aber falls wir (irgendwoher) eine SourceURL hätten + // und sie in meta noch fehlt -> meta anreichern ohne ffprobe. + if strings.TrimSpace(sourceURL) != "" { + if u, ok := readVideoMetaSourceURL(metaPath, vfi); !ok || strings.TrimSpace(u) == "" { + _ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL) + } + } + } else { + dctx, cancel := context.WithTimeout(ctx, 6*time.Second) + d, derr := durationSecondsCached(dctx, it.path) + cancel() + + if derr == nil && d > 0 { + durSec = d + // ✅ HIER: nicht writeVideoMeta(metaPath, fi, dur, sourceURL) !! + // sondern Duration-only writer nutzen + _ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL) + metaOK = true + } + } + + if thumbOK && previewOK && metaOK { + assetsTaskMu.Lock() + assetsTaskState.Skipped++ + assetsTaskState.Done = i + 1 + assetsTaskMu.Unlock() + continue + } + + // ---------------- + // Thumbs + // ---------------- + if !thumbOK { + genCtx, cancel := context.WithTimeout(ctx, 45*time.Second) + if err := thumbSem.Acquire(genCtx); err != nil { + cancel() + finishWithErr(err) + return + } + cancel() // Timeout-Context freigeben, Semaphore bleibt gehalten + defer thumbSem.Release() + + t := 0.0 + if durSec > 0 { + t = durSec * 0.5 + } + + img, e1 := extractFrameAtTimeJPEG(it.path, t) + if e1 != nil || len(img) == 0 { + img, e1 = extractLastFrameJPEG(it.path) + if e1 != nil || len(img) == 0 { + img, e1 = extractFirstFrameJPEG(it.path) + } + } + + // Release wurde defer’t, aber wir wollen pro Iteration releasen: + thumbSem.Release() + + if e1 == nil && len(img) > 0 { + if err := atomicWriteFile(thumbPath, img); err == nil { + assetsTaskMu.Lock() + assetsTaskState.GeneratedThumbs++ + assetsTaskMu.Unlock() + } else { + fmt.Println("⚠️ thumb write:", err) + } + } + } + + // ---------------- + // Preview + // ---------------- + if !previewOK { + genCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + if err := genSem.Acquire(genCtx); err != nil { + cancel() + finishWithErr(err) + return + } + + err := generateTeaserClipsMP4(genCtx, it.path, previewPath, 1.0, 18) + + genSem.Release() + cancel() + + if err == nil { + assetsTaskMu.Lock() + assetsTaskState.GeneratedPreviews++ + assetsTaskMu.Unlock() + } else { + fmt.Println("⚠️ preview clips:", err) + } + } + + assetsTaskMu.Lock() + assetsTaskState.Done = i + 1 + assetsTaskMu.Unlock() + } + + finishWithErr(nil) +} diff --git a/backend/teaser_preview_ffmpeg.go b/backend/teaser_preview_ffmpeg.go new file mode 100644 index 0000000..614bb16 --- /dev/null +++ b/backend/teaser_preview_ffmpeg.go @@ -0,0 +1,422 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "math" + "os" + "os/exec" + "strconv" + "strings" + "time" +) + +// Minimale Segmentdauer, damit ffmpeg nicht mit zu kurzen Schnipseln zickt. +const minSegmentDuration = 0.50 // Sekunden + +type TeaserPreviewOptions struct { + Segments int + SegmentDuration float64 + + Width int + Preset string + CRF int + + // wird von uns "hart" auf true gesetzt (Audio ist NICHT optional) + Audio bool + AudioBitrate string + + UseVsync2 bool +} + +// stepSizeAndOffset verteilt die Startpunkte über das Video. +// Rückgabe: stepSize, offset (beide in Sekunden). +func (o TeaserPreviewOptions) stepSizeAndOffset(dur float64) (float64, float64) { + if dur <= 0 { + return 0, 0 + } + + n := o.Segments + if n < 1 { + n = 1 + } + + segDur := o.SegmentDuration + if segDur <= 0 { + segDur = 1 + } + if segDur < minSegmentDuration { + segDur = minSegmentDuration + } + + // letzter sinnvoller Start (kleiner Sicherheitsabstand) + maxStart := dur - 0.05 - segDur + if maxStart < 0 { + maxStart = 0 + } + + // 1 Segment -> Mitte + if n == 1 { + return 0, maxStart * 0.5 + } + + // kleine Ränder, damit nicht immer ganz am Anfang/Ende + margin := 0.05 * maxStart + if margin < 0 { + margin = 0 + } + span := maxStart - 2*margin + if span < 0 { + span = maxStart + margin = 0 + } + + step := 0.0 + if n > 1 { + step = span / float64(n-1) + } + return step, margin +} + +func generateTeaserClipsMP4(ctx context.Context, srcPath, outPath string, clipLenSec float64, maxClips int) error { + return generateTeaserClipsMP4WithProgress(ctx, srcPath, outPath, clipLenSec, maxClips, nil) +} + +func generateTeaserClipsMP4WithProgress( + ctx context.Context, + srcPath, outPath string, + clipLenSec float64, + maxClips int, + onRatio func(r float64), +) error { + // kompatible Defaults aus deiner Signatur -> Options + opts := TeaserPreviewOptions{ + Segments: maxClips, + SegmentDuration: clipLenSec, + + // stash-like Defaults + Width: 640, + Preset: "veryfast", + CRF: 21, + Audio: true, + AudioBitrate: "128k", + UseVsync2: false, + } + return generateTeaserPreviewMP4WithProgress(ctx, srcPath, outPath, opts, onRatio) +} + +func generateTeaserChunkMP4(ctx context.Context, src, out string, start, dur float64, opts TeaserPreviewOptions) error { + + // ✅ Audio ist Pflicht (nicht optional) + opts.Audio = true + + tmp := strings.TrimSuffix(out, ".mp4") + ".part.mp4" + segDur := dur + if segDur < minSegmentDuration { + segDur = minSegmentDuration + } + + args := []string{ + "-y", "-hide_banner", "-loglevel", "error", + } + args = append(args, ffmpegInputTol...) + args = append(args, + "-ss", fmt.Sprintf("%.3f", start), + "-t", fmt.Sprintf("%.3f", segDur), + "-i", src, + "-map", "0:v:0", + "-c:v", "libx264", + "-pix_fmt", "yuv420p", + "-profile:v", "high", + "-level", "4.2", + "-preset", opts.Preset, + "-crf", strconv.Itoa(opts.CRF), + "-threads", "4", + ) + + if opts.UseVsync2 { + args = append(args, "-vsync", "2") + } + + if opts.Audio { + args = append(args, + "-map", "0:a:0", // Audio Pflicht + "-c:a", "aac", + "-b:a", opts.AudioBitrate, + "-ac", "2", + "-shortest", + ) + } else { + args = append(args, "-an") + } + + args = append(args, "-movflags", "+faststart", tmp) + + cmd := exec.CommandContext(ctx, ffmpegPath, args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + _ = os.Remove(tmp) + return fmt.Errorf("ffmpeg teaser chunk failed: %v (%s)", err, strings.TrimSpace(stderr.String())) + } + _ = os.Remove(out) + return os.Rename(tmp, out) +} + +func generateTeaserPreviewMP4WithProgress( + ctx context.Context, + srcPath, outPath string, + opts TeaserPreviewOptions, + onRatio func(r float64), +) error { + + // ✅ Audio ist Pflicht (nicht optional) + opts.Audio = true + + // Defaults + if opts.SegmentDuration <= 0 { + opts.SegmentDuration = 1 + } + if opts.Segments <= 0 { + opts.Segments = 18 + } + if opts.Width <= 0 { + opts.Width = 640 + } + if opts.Preset == "" { + opts.Preset = "veryfast" + } + if opts.CRF <= 0 { + opts.CRF = 21 + } + if opts.AudioBitrate == "" { + opts.AudioBitrate = "128k" + } + segDur := opts.SegmentDuration + if segDur < minSegmentDuration { + segDur = minSegmentDuration + } + + // Dauer holen (einmalig; wird gecached) + dur, _ := durationSecondsCached(ctx, srcPath) + + // Kurzvideo-Fallback wie "die andere": + // Wenn Video kürzer als Segments*SegmentDuration -> Single Preview über komplette Dauer + if dur > 0 && dur < segDur*float64(opts.Segments) { + // als 1 Segment behandeln, Duration = dur + opts.Segments = 1 + segDur = dur + } + + // Wenn Dauer unbekannt/zu klein: ab 0 ein Stück + if !(dur > 0) { + if onRatio != nil { + onRatio(0) + } + // hier könntest du auch segDur verwenden; ich nehme min(8, segDur) ähnlich wie vorher + err := generateTeaserChunkMP4(ctx, srcPath, outPath, 0, math.Min(8, segDur), opts) + if onRatio != nil { + onRatio(1) + } + return err + } + + // Startpunkte wie "die andere": offset + i*stepSize + stepSize, offset := opts.stepSizeAndOffset(dur) + + starts := make([]float64, 0, opts.Segments) + for i := 0; i < opts.Segments; i++ { + t := offset + float64(i)*stepSize + + // clamp: sicherstellen, dass wir nicht über Ende hinaus trimmen + maxStart := math.Max(0, dur-0.05-segDur) + if t < 0 { + t = 0 + } + if t > maxStart { + t = maxStart + } + if t < 0.05 { + t = 0.05 + } + starts = append(starts, t) + } + + expectedOutSec := float64(len(starts)) * segDur + tmp := strings.TrimSuffix(outPath, ".mp4") + ".part.mp4" + + args := []string{ + "-y", + "-nostats", + "-progress", "pipe:1", + "-hide_banner", + "-loglevel", "error", + } + + // Inputs: pro Segment eigener -ss/-t/-i (wie bei dir) + for _, t := range starts { + args = append(args, ffmpegInputTol...) + args = append(args, + "-ss", fmt.Sprintf("%.3f", t), + "-t", fmt.Sprintf("%.3f", segDur), + "-i", srcPath, + ) + } + + // filter_complex bauen + var fc strings.Builder + for i := range starts { + // stash-like: ScaleWidth(640), pix_fmt yuv420p, profile high/level 4.2 später in output args + fmt.Fprintf(&fc, + "[%d:v]scale=%d:-2,setsar=1,setpts=PTS-STARTPTS[v%d];", + i, opts.Width, i, + ) + + if opts.Audio { + // dein “concat-safe” Audio normalisieren (gute Idee) + fmt.Fprintf(&fc, + "[%d:a]aresample=48000,aformat=channel_layouts=stereo,asetpts=PTS-STARTPTS[a%d];", + i, i, + ) + } + } + + // interleaved concat inputs + for i := range starts { + if opts.Audio { + fmt.Fprintf(&fc, "[v%d][a%d]", i, i) + } else { + fmt.Fprintf(&fc, "[v%d]", i) + } + } + + if opts.Audio { + fmt.Fprintf(&fc, "concat=n=%d:v=1:a=1[v][a]", len(starts)) + } else { + fmt.Fprintf(&fc, "concat=n=%d:v=1:a=0[v]", len(starts)) + } + + args = append(args, "-filter_complex", fc.String()) + + // map outputs + args = append(args, "-map", "[v]") + if opts.Audio { + args = append(args, "-map", "[a]") + } + + // Video encode (stash-like) + args = append(args, + "-c:v", "libx264", + "-pix_fmt", "yuv420p", + "-profile:v", "high", + "-level", "4.2", + "-preset", opts.Preset, + "-crf", strconv.Itoa(opts.CRF), + "-threads", "4", + ) + + if opts.UseVsync2 { + args = append(args, "-vsync", "2") + } + + // Audio encode optional (stash-like 128k), plus dein -ac 2 + if opts.Audio { + args = append(args, + "-c:a", "aac", + "-b:a", opts.AudioBitrate, + "-ac", "2", + "-shortest", + ) + } + + args = append(args, "-movflags", "+faststart", tmp) + + cmd := exec.CommandContext(ctx, ffmpegPath, args...) + + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + return err + } + + sc := bufio.NewScanner(stdout) + sc.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + var lastSent float64 + var lastAt time.Time + + send := func(outSec float64, force bool) { + if onRatio == nil { + return + } + if expectedOutSec > 0 && outSec > 0 { + r := outSec / expectedOutSec + if r < 0 { + r = 0 + } + if r > 1 { + r = 1 + } + if r-lastSent < 0.01 && !force { + return + } + if !lastAt.IsZero() && time.Since(lastAt) < 150*time.Millisecond && !force { + return + } + lastSent = r + lastAt = time.Now() + onRatio(r) + return + } + if force { + onRatio(1) + } + } + + var outSec float64 + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if line == "" { + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + k, v := parts[0], parts[1] + + switch k { + case "out_time_ms": + if n, perr := strconv.ParseInt(strings.TrimSpace(v), 10, 64); perr == nil && n > 0 { + outSec = float64(n) / 1_000_000.0 + send(outSec, false) + } + case "out_time": + if s := parseFFmpegOutTime(v); s > 0 { + outSec = s + send(outSec, false) + } + case "progress": + if strings.TrimSpace(v) == "end" { + send(outSec, true) + } + } + } + + if err := cmd.Wait(); err != nil { + _ = os.Remove(tmp) + return fmt.Errorf("ffmpeg teaser preview failed: %v (%s)", err, strings.TrimSpace(stderr.String())) + } + + _ = os.Remove(outPath) + return os.Rename(tmp, outPath) +}