// backend\record_handlers.go package main import ( "context" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "runtime" "sort" "strconv" "strings" "sync" "sync/atomic" "syscall" "time" ) type RecordRequest struct { URL string `json:"url"` Cookie string `json:"cookie,omitempty"` UserAgent string `json:"userAgent,omitempty"` Hidden bool `json:"hidden,omitempty"` } type doneListResponse struct { Items []*RecordJob `json:"items"` TotalCount int `json:"totalCount"` Page int `json:"page,omitempty"` PageSize int `json:"pageSize,omitempty"` } type doneMetaResp struct { Count int `json:"count"` } type durationReq struct { Files []string `json:"files"` } type durationItem struct { File string `json:"file"` DurationSeconds float64 `json:"durationSeconds,omitempty"` Error string `json:"error,omitempty"` } type undoDeleteToken struct { Trash string `json:"trash"` // basename in .trash RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model" File string `json:"file"` // original basename, z.B. "HOT xyz.mp4" } func encodeUndoDeleteToken(t undoDeleteToken) (string, error) { b, err := json.Marshal(t) if err != nil { return "", err } return base64.RawURLEncoding.EncodeToString(b), nil } func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) { var t undoDeleteToken b, err := base64.RawURLEncoding.DecodeString(raw) if err != nil { return t, err } if err := json.Unmarshal(b, &t); err != nil { return t, err } return t, nil } func isSafeRelDir(rel string) bool { rel = strings.TrimSpace(rel) if rel == "" { return false } // normalize to slash for validation rel = filepath.ToSlash(rel) if strings.HasPrefix(rel, "/") { return false } clean := path.Clean(rel) // path.Clean => forward slashes if clean == "." { return true } if strings.HasPrefix(clean, "../") || clean == ".." { return false } // prevent weird traversal if strings.Contains(clean, `\`) { return false } return true } func isSafeBasename(name string) bool { name = strings.TrimSpace(name) if name == "" { return false } if strings.Contains(name, "/") || strings.Contains(name, "\\") { return false } return filepath.Base(name) == name } func recordList(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) return } jobsMu.Lock() list := make([]*RecordJob, 0, len(jobs)) for _, j := range jobs { // ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht if j == nil || j.Hidden { continue } list = append(list, j) } jobsMu.Unlock() // optional: neueste zuerst sort.Slice(list, func(i, j int) bool { return list[i].StartedAt.After(list[j].StartedAt) }) w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(list) } func writeSSE(w http.ResponseWriter, data []byte) { // SSE spec: jede Zeile mit "data:" prefixen s := strings.ReplaceAll(string(data), "\r\n", "\n") lines := strings.Split(s, "\n") for _, line := range lines { fmt.Fprintf(w, "data: %s\n", line) } fmt.Fprint(w, "\n") } func handleDoneStream(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") flusher, ok := w.(http.Flusher) if !ok { http.Error(w, "streaming unsupported", http.StatusInternalServerError) return } ch := make(chan []byte, 16) doneHub.add(ch) defer doneHub.remove(ch) // optional: initial ping/hello, damit Client sofort "lebt" fmt.Fprintf(w, "event: doneChanged\ndata: {\"type\":\"doneChanged\",\"seq\":%d,\"ts\":%d}\n\n", atomic.LoadUint64(&doneSeq), time.Now().UnixMilli()) flusher.Flush() ctx := r.Context() for { select { case <-ctx.Done(): return case b := <-ch: // wichtig: event-name setzen -> Client kann addEventListener("doneChanged", ...) fmt.Fprintf(w, "event: doneChanged\ndata: %s\n\n", b) flusher.Flush() } } } func handleRecordVideo(w http.ResponseWriter, r *http.Request) { recordVideo(w, r) } func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) return } var req RecordRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } job, err := startRecordingInternal(req) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(job) } // ---- track if headers/body were already written ---- // (Go methods must be at package scope) type rwTrack struct { http.ResponseWriter wrote bool } func (t *rwTrack) WriteHeader(statusCode int) { if t.wrote { return } t.wrote = true t.ResponseWriter.WriteHeader(statusCode) } func (t *rwTrack) Write(p []byte) (int, error) { if !t.wrote { t.wrote = true } return t.ResponseWriter.Write(p) } func recordVideo(w http.ResponseWriter, r *http.Request) { // ---- wrap writer to detect "already wrote" ---- tw := &rwTrack{ResponseWriter: w} w = tw writeErr := func(code int, msg string) { // Wenn schon Header/Body raus sind, dürfen wir KEIN http.Error mehr machen, // sonst gibt's "superfluous response.WriteHeader". if tw.wrote { fmt.Println("[recordVideo] late error (headers already sent):", code, msg) return } http.Error(w, msg, code) // nutzt WriteHeader+Write -> tw.wrote wird automatisch true } writeStatus := func(code int) { if tw.wrote { return } w.WriteHeader(code) // geht durch rwTrack.WriteHeader } // ---- CORS ---- origin := r.Header.Get("Origin") if origin != "" { w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Vary", "Origin") w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS") // Wichtig: Browser schicken bei Video-Range-Requests oft If-Range / If-Modified-Since / If-None-Match. // Wenn du die nicht erlaubst, schlägt der Preflight fehl -> VideoJS sieht "NETWORK error". w.Header().Set("Access-Control-Allow-Headers", "Range, If-Range, If-Modified-Since, If-None-Match") w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges, ETag, Last-Modified, X-Transcode-Offset-Seconds") w.Header().Set("Access-Control-Allow-Credentials", "true") } if r.Method == http.MethodOptions { writeStatus(http.StatusNoContent) return } // ---- query normalize ---- // Neu: resolution=LOW|MEDIUM|HIGH|ORIGINAL res := strings.TrimSpace(r.URL.Query().Get("resolution")) // Backwards-Compat: falls altes Frontend noch quality nutzt if res == "" { res = strings.TrimSpace(r.URL.Query().Get("quality")) } // Normalize: auto/original => leer (== "ORIGINAL" Profil) if strings.EqualFold(res, "auto") || strings.EqualFold(res, "original") { res = "" } // Validieren (wenn gesetzt) if res != "" { if _, ok := profileFromResolution(res); !ok { writeErr(http.StatusBadRequest, "ungültige resolution") return } } rawProgress := strings.TrimSpace(r.URL.Query().Get("progress")) if rawProgress == "" { rawProgress = strings.TrimSpace(r.URL.Query().Get("p")) } // ---- startSec parse (seek position in seconds) ---- startSec := 0 startFrac := -1.0 // wenn 0..1 => Progress-Fraction (currentProgress) raw := strings.TrimSpace(r.URL.Query().Get("start")) if raw == "" { raw = strings.TrimSpace(r.URL.Query().Get("t")) } parseFracOrSeconds := func(s string) { s = strings.TrimSpace(s) if s == "" { return } // allow "hh:mm:ss" / "mm:ss" if strings.Contains(s, ":") { parts := strings.Split(s, ":") ok := true vals := make([]int, 0, len(parts)) for _, p := range parts { p = strings.TrimSpace(p) n, err := strconv.Atoi(p) if err != nil || n < 0 { ok = false break } vals = append(vals, n) } if ok { if len(vals) == 2 { startSec = vals[0]*60 + vals[1] return } else if len(vals) == 3 { startSec = vals[0]*3600 + vals[1]*60 + vals[2] return } } return } // number: seconds OR fraction f, err := strconv.ParseFloat(s, 64) if err != nil { return } if f <= 0 { return } // < 1.0 => treat as fraction (currentProgress) if f > 0 && f < 1.0 { startFrac = f return } // >= 1.0 => treat as seconds (floor) startSec = int(f) } parseFracOrSeconds(raw) // optional explicit progress overrides fraction if rawProgress != "" { f, err := strconv.ParseFloat(strings.TrimSpace(rawProgress), 64) if err == nil && f > 0 && f < 1.0 { startFrac = f } } if startSec < 0 { startSec = 0 } // ---- resolve outPath from file or id ---- resolveOutPath := func() (string, bool) { // ✅ Wiedergabe über Dateiname (für doneDir / recordDir) if rawFile := strings.TrimSpace(r.URL.Query().Get("file")); rawFile != "" { file, err := url.QueryUnescape(rawFile) if err != nil { writeErr(http.StatusBadRequest, "ungültiger file") return "", false } file = strings.TrimSpace(file) // kein Pfad, keine Backslashes, kein Traversal if file == "" || strings.Contains(file, "/") || strings.Contains(file, "\\") || filepath.Base(file) != file { writeErr(http.StatusBadRequest, "ungültiger file") return "", false } ext := strings.ToLower(filepath.Ext(file)) if ext != ".mp4" && ext != ".ts" { writeErr(http.StatusForbidden, "nicht erlaubt") return "", false } s := getSettings() recordAbs, err := resolvePathRelativeToApp(s.RecordDir) if err != nil { writeErr(http.StatusInternalServerError, "recordDir auflösung fehlgeschlagen: "+err.Error()) return "", false } doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { writeErr(http.StatusInternalServerError, "doneDir auflösung fehlgeschlagen: "+err.Error()) return "", false } // Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"), // dann keep (inkl. 1 Level Subdir), dann recordDir names := []string{file} if ext == ".ts" { names = append(names, strings.TrimSuffix(file, ext)+".mp4") } var outPath string for _, name := range names { if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok { outPath = p break } if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok { outPath = p break } if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok { outPath = p break } } if outPath == "" { writeErr(http.StatusNotFound, "datei nicht gefunden") return "", false } return filepath.Clean(strings.TrimSpace(outPath)), true } // ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert) id := strings.TrimSpace(r.URL.Query().Get("id")) if id == "" { writeErr(http.StatusBadRequest, "id fehlt") return "", false } jobsMu.Lock() job, ok := jobs[id] jobsMu.Unlock() if !ok { writeErr(http.StatusNotFound, "job nicht gefunden") return "", false } outPath := filepath.Clean(strings.TrimSpace(job.Output)) if outPath == "" { writeErr(http.StatusNotFound, "output fehlt") return "", false } if !filepath.IsAbs(outPath) { abs, err := resolvePathRelativeToApp(outPath) if err != nil { writeErr(http.StatusInternalServerError, "pfad auflösung fehlgeschlagen: "+err.Error()) return "", false } outPath = abs } fi, err := os.Stat(outPath) if err != nil || fi.IsDir() || fi.Size() == 0 { writeErr(http.StatusNotFound, "datei nicht gefunden") return "", false } return outPath, true } outPath, ok := resolveOutPath() if !ok { return } // ---- convert progress fraction to seconds (if needed) ---- if startSec == 0 && startFrac > 0 && startFrac < 1.0 { // ffprobe duration (cached) if err := ensureFFprobeAvailable(); err == nil { pctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) dur, derr := getVideoDurationSecondsCached(pctx, outPath) cancel() if derr == nil && dur > 0 { startSec = int(startFrac * dur) } } } // sanitize + optional bucket align (wie bei GOP-ish seeking) if startSec < 0 { startSec = 0 } startSec = (startSec / 2) * 2 // ---- TS -> MP4 (on-demand remux) ---- if strings.ToLower(filepath.Ext(outPath)) == ".ts" { newOut, err := maybeRemuxTS(outPath) if err != nil { writeErr(http.StatusInternalServerError, "TS Remux fehlgeschlagen: "+err.Error()) return } if strings.TrimSpace(newOut) == "" { writeErr(http.StatusInternalServerError, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt") return } outPath = filepath.Clean(strings.TrimSpace(newOut)) fi, err := os.Stat(outPath) if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" { writeErr(http.StatusInternalServerError, "Remux-Ergebnis ungültig") return } } // ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern if strings.ToLower(filepath.Ext(outPath)) == ".mp4" { kind, _ := sniffVideoKind(outPath) switch kind { case "ts": newOut, err := maybeRemuxTS(outPath) if err != nil { writeErr(http.StatusInternalServerError, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error()) return } outPath = filepath.Clean(strings.TrimSpace(newOut)) case "html": writeErr(http.StatusInternalServerError, "Server liefert HTML statt Video (Pfad/Lookup prüfen)") return } } // ---- Quality / Transcode handling ---- w.Header().Set("Cache-Control", "no-store") stream := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("stream"))) wantStream := stream == "1" || stream == "true" || stream == "yes" // ✅ Wenn quality gesetzt ist: if res != "" { prof, _ := profileFromResolution(res) // ✅ wenn Quelle schon <= Zielhöhe: ORIGINAL liefern // ABER NUR wenn wir NICHT seeken und NICHT streamen wollen. if prof.Height > 0 && startSec == 0 && !wantStream { if err := ensureFFprobeAvailable(); err == nil { pctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) defer cancel() if srcH, err := getVideoHeightCached(pctx, outPath); err == nil && srcH > 0 { if srcH <= prof.Height+8 { serveVideoFile(w, r, outPath) return } } } } // ✅ 1) Seek (startSec>0): Standard = Segment-Datei transcodieren & dann normal ausliefern (Range-fähig) // stream=1 kann weiterhin den "live pipe" erzwingen. if startSec > 0 && !wantStream { segPath, terr := maybeTranscodeForRequest(r.Context(), outPath, res, startSec) if terr != nil { writeErr(http.StatusInternalServerError, "transcode failed: "+terr.Error()) return } // ✅ Offset NUR setzen, wenn wir wirklich ab startSec ausliefern (Segment) w.Header().Set("X-Transcode-Offset-Seconds", strconv.Itoa(startSec)) serveVideoFile(w, r, segPath) return } // ✅ 2) stream=1 ODER startSec>0 mit stream=true: pipe-stream if wantStream || startSec > 0 { if startSec > 0 { // ✅ Offset NUR setzen, wenn wir wirklich ab startSec ausliefern (Stream) w.Header().Set("X-Transcode-Offset-Seconds", strconv.Itoa(startSec)) } if err := serveTranscodedStreamAt(r.Context(), w, outPath, prof, startSec); err != nil { if errors.Is(err, context.Canceled) { return } writeErr(http.StatusInternalServerError, "transcode stream failed: "+err.Error()) return } return } // ✅ 3) startSec==0: Full-file Cache-Transcode (wie vorher) if startSec == 0 { segPath, terr := maybeTranscodeForRequest(r.Context(), outPath, res, 0) if terr != nil { writeErr(http.StatusInternalServerError, "transcode failed: "+terr.Error()) return } serveVideoFile(w, r, segPath) return } } // ✅ Full-file Cache-Transcode nur wenn startSec == 0 if res != "" && startSec == 0 { var terr error outPath, terr = maybeTranscodeForRequest(r.Context(), outPath, res, startSec) if terr != nil { writeErr(http.StatusInternalServerError, "transcode failed: "+terr.Error()) return } } serveVideoFile(w, r, outPath) } type flushWriter struct { w http.ResponseWriter f http.Flusher } func (fw flushWriter) Write(p []byte) (int, error) { n, err := fw.w.Write(p) if fw.f != nil { fw.f.Flush() } return n, err } func isClientDisconnectErr(err error) bool { if err == nil { return false } if errors.Is(err, context.Canceled) || errors.Is(err, net.ErrClosed) || errors.Is(err, io.ErrClosedPipe) { return true } // Windows / net/http typische Fälle var op *net.OpError if errors.As(err, &op) { // op.Err kann syscall.Errno(10054/10053/...) sein if se, ok := op.Err.(syscall.Errno); ok { switch int(se) { case 10054, 10053, 10058: // WSAECONNRESET, WSAECONNABORTED, WSAESHUTDOWN return true } } } msg := strings.ToLower(err.Error()) if strings.Contains(msg, "broken pipe") || strings.Contains(msg, "connection reset") || strings.Contains(msg, "forcibly closed") || strings.Contains(msg, "wsasend") || strings.Contains(msg, "wsarecv") { return true } return false } func serveTranscodedStream(ctx context.Context, w http.ResponseWriter, inPath string, prof TranscodeProfile) error { return serveTranscodedStreamAt(ctx, w, inPath, prof, 0) } func serveTranscodedStreamAt(ctx context.Context, w http.ResponseWriter, inPath string, prof TranscodeProfile, startSec int) error { if err := ensureFFmpegAvailable(); err != nil { return err } // ffmpeg args (mit -ss vor -i) args := buildFFmpegStreamArgsAt(inPath, prof, startSec) cmd := exec.CommandContext(ctx, "ffmpeg", args...) stdout, err := cmd.StdoutPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } // stderr MUSS gelesen werden, sonst kann ffmpeg blockieren go func() { _, _ = io.ReadAll(stderr) _ = cmd.Wait() }() w.Header().Set("Cache-Control", "no-store") w.Header().Set("Content-Type", "video/mp4") w.Header().Set("Accept-Ranges", "none") w.WriteHeader(http.StatusOK) // kontinuierlich flushen var out io.Writer = w if f, ok := w.(http.Flusher); ok { out = flushWriter{w: w, f: f} } _, copyErr := io.Copy(out, stdout) // Client abgebrochen -> kein Fehler if copyErr != nil { if isClientDisconnectErr(copyErr) { return nil } } // Wenn der Request context weg ist: ebenfalls ok (Quality-Wechsel, Seek, Tab zu) if ctx.Err() != nil && errors.Is(ctx.Err(), context.Canceled) { return nil } return copyErr } func buildFFmpegStreamArgsAt(inPath string, prof TranscodeProfile, startSec int) []string { args := buildFFmpegStreamArgs(inPath, prof) if startSec <= 0 { return args } // Insert "-ss " before "-i" out := make([]string, 0, len(args)+2) inserted := false for i := 0; i < len(args); i++ { if !inserted && args[i] == "-i" { out = append(out, "-ss", strconv.Itoa(startSec)) inserted = true } out = append(out, args[i]) } // Fallback: falls "-i" nicht gefunden wird, häng's vorne dran if !inserted { return append([]string{"-ss", strconv.Itoa(startSec)}, args...) } return out } func recordStatus(w http.ResponseWriter, r *http.Request) { id := r.URL.Query().Get("id") if id == "" { http.Error(w, "id fehlt", http.StatusBadRequest) return } jobsMu.Lock() job, ok := jobs[id] jobsMu.Unlock() if !ok { http.Error(w, "job nicht gefunden", http.StatusNotFound) return } json.NewEncoder(w).Encode(job) } func recordStop(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST", http.StatusMethodNotAllowed) return } id := r.URL.Query().Get("id") jobsMu.Lock() job, ok := jobs[id] jobsMu.Unlock() if !ok { http.Error(w, "job nicht gefunden", http.StatusNotFound) return } stopJobsInternal([]*RecordJob{job}) w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(job) } func buildDoneIndex(doneAbs string) ([]doneIndexItem, map[string][]int) { items := make([]doneIndexItem, 0, 2048) sortedIdx := make(map[string][]int) isTrashPath := func(full string) bool { p := strings.ToLower(filepath.ToSlash(strings.TrimSpace(full))) return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") } addFile := func(full string, fi os.FileInfo) { if fi == nil || fi.IsDir() || fi.Size() == 0 { return } if isTrashPath(full) { return } name := filepath.Base(full) ext := strings.ToLower(filepath.Ext(name)) if ext != ".mp4" && ext != ".ts" { return } // keep? p := strings.ToLower(filepath.ToSlash(full)) fromKeep := strings.Contains(p, "/keep/") // started/ended t := fi.ModTime() start := t base := strings.TrimSuffix(name, filepath.Ext(name)) stem := strings.TrimPrefix(base, "HOT ") if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { mm, _ := strconv.Atoi(m[2]) dd, _ := strconv.Atoi(m[3]) yy, _ := strconv.Atoi(m[4]) hh, _ := strconv.Atoi(m[5]) mi, _ := strconv.Atoi(m[6]) ss, _ := strconv.Atoi(m[7]) start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local) } // modelKey (lower) – nutze deine bestehende Logik mk := strings.ToLower(strings.TrimSpace(modelKeyFromFilenameOrPath(name, full, doneAbs))) if mk == "" { // fallback: parent dir (skip keep) parent := strings.ToLower(strings.TrimSpace(filepath.Base(filepath.Dir(full)))) if parent != "" && parent != "keep" { mk = parent } } // fileSort (hot-prefix raus) fs := strings.ToLower(name) fs = strings.TrimPrefix(fs, "hot ") // duration + srcURL (wie bei dir: meta.json, dann cache-only) dur := 0.0 srcURL := "" id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full))) if strings.TrimSpace(id) != "" { if mp, err := generatedMetaFile(id); err == nil { if d, ok := readVideoMetaDuration(mp, fi); ok { dur = d } if u, ok := readVideoMetaSourceURL(mp, fi); ok { srcURL = u } } } if dur <= 0 { dur = durationSecondsCacheOnly(full, fi) } ended := t items = append(items, doneIndexItem{ job: &RecordJob{ ID: base, Output: full, SourceURL: srcURL, Status: JobFinished, StartedAt: start, EndedAt: &ended, DurationSeconds: dur, SizeBytes: fi.Size(), }, endedAt: ended, fileSort: fs, fromKeep: fromKeep, modelKey: mk, }) } scanDir := func(dir string, skipKeep bool) { entries, err := os.ReadDir(dir) if err != nil { return } for _, e := range entries { if e.IsDir() { if strings.EqualFold(e.Name(), ".trash") { continue } if skipKeep && e.Name() == "keep" { continue } sub := filepath.Join(dir, e.Name()) subs, err := os.ReadDir(sub) if err != nil { continue } for _, se := range subs { if se.IsDir() { continue } full := filepath.Join(sub, se.Name()) fi, err := os.Stat(full) if err != nil { continue } addFile(full, fi) } continue } full := filepath.Join(dir, e.Name()) fi, err := os.Stat(full) if err != nil { continue } addFile(full, fi) } } // done (ohne keep) scanDir(doneAbs, true) // keep (optional im Index, damit includeKeep schnell ist) scanDir(filepath.Join(doneAbs, "keep"), false) // Pre-sort für häufigen Fall: includeKeep true/false und die Sort-Modes // (nur wenn KEIN model-Filter genutzt wird) mkSorted := func(includeKeep bool, sortMode string) []int { idx := make([]int, 0, len(items)) for i := range items { if !includeKeep && items[i].fromKeep { continue } idx = append(idx, i) } durationForSort := func(it doneIndexItem) (float64, bool) { if it.job.DurationSeconds > 0 { return it.job.DurationSeconds, true } return 0, false } sort.Slice(idx, func(a, b int) bool { A := items[idx[a]] B := items[idx[b]] ta, tb := A.endedAt, B.endedAt switch sortMode { case "completed_asc": if !ta.Equal(tb) { return ta.Before(tb) } return A.fileSort < B.fileSort case "completed_desc": if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "file_asc": if A.fileSort != B.fileSort { return A.fileSort < B.fileSort } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "file_desc": if A.fileSort != B.fileSort { return A.fileSort > B.fileSort } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "duration_asc": da, okA := durationForSort(A) db, okB := durationForSort(B) if okA != okB { return okA } if okA && okB && da != db { return da < db } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "duration_desc": da, okA := durationForSort(A) db, okB := durationForSort(B) if okA != okB { return okA } if okA && okB && da != db { return da > db } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "size_asc": if A.job.SizeBytes != B.job.SizeBytes { return A.job.SizeBytes < B.job.SizeBytes } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort case "size_desc": if A.job.SizeBytes != B.job.SizeBytes { return A.job.SizeBytes > B.job.SizeBytes } if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort default: if !ta.Equal(tb) { return ta.After(tb) } return A.fileSort < B.fileSort } }) return idx } modes := []string{ "completed_desc", "completed_asc", "file_asc", "file_desc", "duration_asc", "duration_desc", "size_asc", "size_desc", } for _, m := range modes { sortedIdx["0|"+m] = mkSorted(false, m) sortedIdx["1|"+m] = mkSorted(true, m) } return items, sortedIdx } // ⬆️ Ergänze im Import-Block (falls noch nicht drin): // import "sync" type doneIndexItem struct { job *RecordJob endedAt time.Time fileSort string fromKeep bool modelKey string // lower } type doneIndexCache struct { mu sync.Mutex builtAt time.Time seq uint64 doneAbs string items []doneIndexItem sortedIdx map[string][]int // key: "|" } var doneCache doneIndexCache func recordDoneList(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) return } // ✅ optional: auch /done/keep/ einbeziehen (Standard: false) qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep"))) includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes" // ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll) normalizeQueryModel := func(raw string) string { s := strings.TrimSpace(raw) if s == "" { return "" } s = strings.TrimPrefix(s, "http://") s = strings.TrimPrefix(s, "https://") // letzter URL-Segment, falls jemand "…/modelname" übergibt if strings.Contains(s, "/") { parts := strings.Split(s, "/") for i := len(parts) - 1; i >= 0; i-- { p := strings.TrimSpace(parts[i]) if p != "" { s = p break } } } // falls "host:model" übergeben wird if strings.Contains(s, ":") { parts := strings.Split(s, ":") s = strings.TrimSpace(parts[len(parts)-1]) } s = strings.TrimPrefix(s, "@") return strings.ToLower(strings.TrimSpace(s)) } qModel := normalizeQueryModel(r.URL.Query().Get("model")) // optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste page := 0 pageSize := 0 if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" { if n, err := strconv.Atoi(v); err == nil && n > 0 { page = n } } if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" { if n, err := strconv.Atoi(v); err == nil && n > 0 { pageSize = n } } // optional: Sort // supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc) sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort"))) if sortMode == "" { sortMode = "completed_desc" } // ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen if sortMode == "model_asc" { sortMode = "file_asc" } if sortMode == "model_desc" { sortMode = "file_desc" } // ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren) qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all"))) fetchAll := qAll == "1" || qAll == "true" || qAll == "yes" if fetchAll { page = 0 pageSize = 0 } // ✅ optional: count mitsenden qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount"))) withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes" // ✅ .trash niemals als "done item" zählen/listen isTrashOutput := func(p string) bool { pp := strings.ToLower(filepath.ToSlash(strings.TrimSpace(p))) return strings.Contains(pp, "/.trash/") || strings.HasSuffix(pp, "/.trash") } isTrashPath := func(full string) bool { p := strings.ReplaceAll(full, "\\", "/") return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash") } // --- helpers (ModelKey aus Filename/Dir ableiten) --- modelFromStem := func(stem string) string { // stem: lower, ohne ext, ohne HOT if stem == "" { return "" } if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { return strings.ToLower(strings.TrimSpace(m[1])) } // fallback: alles vor letztem "_" (oder kompletter stem) if i := strings.LastIndex(stem, "_"); i > 0 { return strings.ToLower(strings.TrimSpace(stem[:i])) } return strings.ToLower(strings.TrimSpace(stem)) } modelFromFullPath := func(full string) string { name := strings.ToLower(filepath.Base(full)) stem := strings.TrimSuffix(name, filepath.Ext(name)) stem = strings.TrimPrefix(stem, "hot ") mk := modelFromStem(stem) // fallback: wenn Dateiname nichts taugt, aus Ordner nehmen (/done//file) if mk == "" { parent := strings.ToLower(filepath.Base(filepath.Dir(full))) parent = strings.TrimSpace(parent) if parent != "" && parent != "keep" { mk = parent } } return mk } // helpers (Sort) fileForSortName := func(filename string) string { f := strings.ToLower(filename) f = strings.TrimPrefix(f, "hot ") return f } durationForSort := func(j *RecordJob) (sec float64, ok bool) { if j.DurationSeconds > 0 { return j.DurationSeconds, true } return 0, false } compareIdx := func(items []doneIndexItem, sortMode string, ia, ib int) bool { a := items[ia] b := items[ib] ta, tb := a.endedAt, b.endedAt switch sortMode { case "completed_asc": if !ta.Equal(tb) { return ta.Before(tb) } return a.fileSort < b.fileSort case "completed_desc": if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "file_asc": if a.fileSort != b.fileSort { return a.fileSort < b.fileSort } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "file_desc": if a.fileSort != b.fileSort { return a.fileSort > b.fileSort } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "duration_asc": da, okA := durationForSort(a.job) db, okB := durationForSort(b.job) if okA != okB { return okA // unknown nach hinten } if okA && okB && da != db { return da < db } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "duration_desc": da, okA := durationForSort(a.job) db, okB := durationForSort(b.job) if okA != okB { return okA } if okA && okB && da != db { return da > db } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "size_asc": if a.job.SizeBytes != b.job.SizeBytes { return a.job.SizeBytes < b.job.SizeBytes } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort case "size_desc": if a.job.SizeBytes != b.job.SizeBytes { return a.job.SizeBytes > b.job.SizeBytes } if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort default: if !ta.Equal(tb) { return ta.After(tb) } return a.fileSort < b.fileSort } } // --- resolve done path --- s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben if strings.TrimSpace(doneAbs) == "" { w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(doneListResponse{ Items: []*RecordJob{}, TotalCount: 0, Page: page, PageSize: pageSize, }) return } // --------- Cache rebuild (nur bei doneSeq-Change oder TTL) --------- buildDoneIndex := func(doneAbs string) ([]doneIndexItem, map[string][]int) { items := make([]doneIndexItem, 0, 2048) addFile := func(full string, fi os.FileInfo, fromKeep bool) { if fi == nil || fi.IsDir() || fi.Size() == 0 { return } // ✅ .trash niemals zählen / zurückgeben if isTrashPath(full) || isTrashOutput(full) { return } name := filepath.Base(full) ext := strings.ToLower(filepath.Ext(name)) if ext != ".mp4" && ext != ".ts" { return } base := strings.TrimSuffix(name, filepath.Ext(name)) t := fi.ModTime() // StartedAt aus Dateiname (Fallback: ModTime) start := t stem := base if strings.HasPrefix(stem, "HOT ") { stem = strings.TrimPrefix(stem, "HOT ") } if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil { mm, _ := strconv.Atoi(m[2]) dd, _ := strconv.Atoi(m[3]) yy, _ := strconv.Atoi(m[4]) hh, _ := strconv.Atoi(m[5]) mi, _ := strconv.Atoi(m[6]) ss, _ := strconv.Atoi(m[7]) start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local) } dur := 0.0 srcURL := "" // 1) meta.json aus generated//meta.json lesen (schnell) id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full))) if strings.TrimSpace(id) != "" { if mp, err := generatedMetaFile(id); err == nil { if d, ok := readVideoMetaDuration(mp, fi); ok { dur = d } if u, ok := readVideoMetaSourceURL(mp, fi); ok { srcURL = u } } } // ✅ Kein Cache-only Fallback hier. // Wenn meta fehlt, bleibt dur erstmal 0 und wird beim Ausliefern (Pagination) via ensureVideoMetaForFileBestEffort erzeugt. ended := t mk := modelFromFullPath(full) fs := fileForSortName(name) items = append(items, doneIndexItem{ job: &RecordJob{ ID: base, Output: full, SourceURL: srcURL, Status: JobFinished, StartedAt: start, EndedAt: &ended, DurationSeconds: dur, SizeBytes: fi.Size(), }, endedAt: ended, fileSort: fs, fromKeep: fromKeep, modelKey: mk, }) } // scan one level: doneAbs + doneAbs//* scanRoot := func(root string, fromKeep bool, skipKeepDir bool) { entries, err := os.ReadDir(root) if err != nil { return } for _, e := range entries { if e.IsDir() { // ✅ .trash Ordner niemals scannen if strings.EqualFold(e.Name(), ".trash") { continue } // ✅ keep nicht doppelt scannen (wenn root==doneAbs) if skipKeepDir && e.Name() == "keep" { continue } sub := filepath.Join(root, e.Name()) subEntries, err := os.ReadDir(sub) if err != nil { continue } for _, se := range subEntries { if se.IsDir() { continue } full := filepath.Join(sub, se.Name()) fi, err := se.Info() if err != nil { // fallback fi2, err2 := os.Stat(full) if err2 != nil { continue } fi = fi2 } addFile(full, fi, fromKeep) } continue } full := filepath.Join(root, e.Name()) fi, err := e.Info() if err != nil { fi2, err2 := os.Stat(full) if err2 != nil { continue } fi = fi2 } addFile(full, fi, fromKeep) } } // doneAbs ohne keep scanRoot(doneAbs, false, true) // keep (wenn existiert) scanRoot(filepath.Join(doneAbs, "keep"), true, false) // pre-sorted indices: includeKeep 0/1 und pro sortMode sorted := make(map[string][]int) buildSorted := func(inc bool, mode string) []int { idx := make([]int, 0, len(items)) for i := range items { if !inc && items[i].fromKeep { continue } idx = append(idx, i) } sort.Slice(idx, func(a, b int) bool { return compareIdx(items, mode, idx[a], idx[b]) }) return idx } modes := []string{ "completed_desc", "completed_asc", "file_asc", "file_desc", "duration_asc", "duration_desc", "size_asc", "size_desc", } for _, m := range modes { sorted["0|"+m] = buildSorted(false, m) sorted["1|"+m] = buildSorted(true, m) } return items, sorted } // rebuild wenn doneSeq geändert oder TTL curSeq := atomic.LoadUint64(&doneSeq) now := time.Now() doneCache.mu.Lock() needRebuild := doneCache.seq != curSeq || doneCache.doneAbs != doneAbs || now.Sub(doneCache.builtAt) > 30*time.Second if needRebuild { // Wenn doneAbs nicht existiert: leere Daten im Cache if _, err := os.Stat(doneAbs); err != nil && os.IsNotExist(err) { doneCache.items = nil doneCache.sortedIdx = map[string][]int{ "0|completed_desc": {}, "1|completed_desc": {}, } doneCache.seq = curSeq doneCache.doneAbs = doneAbs doneCache.builtAt = now } else { items, sorted := buildDoneIndex(doneAbs) doneCache.items = items doneCache.sortedIdx = sorted doneCache.seq = curSeq doneCache.doneAbs = doneAbs doneCache.builtAt = now } } items := doneCache.items sortedAll := doneCache.sortedIdx doneCache.mu.Unlock() // --------- Request-spezifische Auswahl (Model-Filter, includeKeep, sort, paging) --------- incKey := "0" if includeKeep { incKey = "1" } // idx enthält indices in items var idx []int if qModel == "" { idx = sortedAll[incKey+"|"+sortMode] if idx == nil { // fallback idx = sortedAll[incKey+"|completed_desc"] if idx == nil { idx = make([]int, 0) } } } else { // Model-Filter: nur Teilmenge, dann sortieren idx = make([]int, 0, 256) for i := range items { if !includeKeep && items[i].fromKeep { continue } if items[i].modelKey == qModel { idx = append(idx, i) } } sort.Slice(idx, func(a, b int) bool { return compareIdx(items, sortMode, idx[a], idx[b]) }) } totalCount := len(idx) // Pagination anwenden (nur auf idx) start := 0 end := totalCount if pageSize > 0 && !fetchAll { if page <= 0 { page = 1 } start = (page - 1) * pageSize if start < 0 { start = 0 } if start >= totalCount { start = totalCount } end = start + pageSize if end > totalCount { end = totalCount } } // Response jobs bauen out := make([]*RecordJob, 0, max(0, end-start)) for _, ii := range idx[start:end] { base := items[ii].job if base == nil { continue } // ✅ Kopie erzeugen (wichtig: keine Race/Mutations am Cache-Objekt) c := *base // ✅ Meta immer aus meta.json (ggf. generieren, wenn fehlt) // Kurzes Timeout pro Item, damit eine Seite nicht "hängen" kann. pctx, cancel := context.WithTimeout(r.Context(), 3*time.Second) m, ok := ensureVideoMetaForFileBestEffort(pctx, c.Output, c.SourceURL) cancel() // Wenn Meta ok: Felder IMMER daraus setzen if ok && m != nil { c.Meta = m c.DurationSeconds = m.DurationSeconds c.SizeBytes = m.FileSize c.VideoWidth = m.VideoWidth c.VideoHeight = m.VideoHeight c.FPS = m.FPS // SourceURL: wenn Job leer, aus Meta übernehmen if strings.TrimSpace(c.SourceURL) == "" && strings.TrimSpace(m.SourceURL) != "" { c.SourceURL = strings.TrimSpace(m.SourceURL) } } else { // Falls wirklich gar keine Meta gebaut werden kann: wenigstens Size korrekt setzen if fi, err := os.Stat(c.Output); err == nil && fi != nil && !fi.IsDir() && fi.Size() > 0 { c.SizeBytes = fi.Size() } } out = append(out, &c) } w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") // ✅ Wenn Frontend "withCount=1" nutzt: {count, items} if withCount { _ = json.NewEncoder(w).Encode(map[string]any{ "count": totalCount, "items": out, }) return } // ✅ Standard-Response: immer auch totalCount mitsenden _ = json.NewEncoder(w).Encode(doneListResponse{ Items: out, TotalCount: totalCount, Page: page, PageSize: pageSize, }) } // mini helper, falls du keinen hast func max(a, b int) int { if a > b { return a } return b } func recordDeleteVideo(w http.ResponseWriter, r *http.Request) { // Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE if r.Method != http.MethodPost && r.Method != http.MethodDelete { http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed) return } raw := strings.TrimSpace(r.URL.Query().Get("file")) if raw == "" { http.Error(w, "file fehlt", http.StatusBadRequest) return } // sicher decoden file, err := url.QueryUnescape(raw) if err != nil { http.Error(w, "ungültiger file", http.StatusBadRequest) return } file = strings.TrimSpace(file) // ✅ nur Basename erlauben (keine Unterordner, kein Traversal) if file == "" || strings.Contains(file, "/") || strings.Contains(file, "\\") || filepath.Base(file) != file { http.Error(w, "ungültiger file", http.StatusBadRequest) return } ext := strings.ToLower(filepath.Ext(file)) if ext != ".mp4" && ext != ".ts" { http.Error(w, "nicht erlaubt", http.StatusForbidden) return } s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if strings.TrimSpace(doneAbs) == "" { http.Error(w, "doneDir ist leer", http.StatusBadRequest) return } // ✅ done + done/ sowie keep + keep/ target, from, fi, err := resolveDoneFileByName(doneAbs, file) if err != nil { http.Error(w, "datei nicht gefunden", http.StatusNotFound) return } if fi != nil && fi.IsDir() { http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) return } // ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben trashDir := filepath.Join(doneAbs, ".trash") // ✅ Wenn im Single-slot Trash schon was liegt: ID merken, // aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde. prevBase := "" prevCanonical := "" if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 { var prev struct { File string `json:"file"` } if err := json.Unmarshal(b, &prev); err == nil { prevFile := strings.TrimSpace(prev.File) if prevFile != "" { prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile)) prevCanonical = stripHotPrefix(prevBase) } } } // Trash komplett leeren => ältere Undos sind automatisch ungültig // ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen. if err := os.RemoveAll(trashDir); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict) return } http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta// entfernen. if prevCanonical != "" { removeGeneratedForID(prevCanonical) // Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind if prevBase != "" && prevBase != prevCanonical { removeGeneratedForID(prevBase) } } // ✅ NEU: auch Transcode-Cache zum endgültig gelöschten Video entfernen if prevCanonical != "" { removeTranscodesForID(doneAbs, prevCanonical) // Best-effort (falls irgendwo doch mal abweichende IDs genutzt wurden) if prevBase != "" && prevBase != prevCanonical { removeTranscodesForID(doneAbs, stripHotPrefix(prevBase)) } } if err := os.MkdirAll(trashDir, 0o755); err != nil { http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // Original-Dir relativ zu doneAbs merken (inkl. keep/ oder ) origDir := filepath.Dir(target) relDir, err := filepath.Rel(doneAbs, origDir) if err != nil { http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } relDir = filepath.ToSlash(relDir) if strings.TrimSpace(relDir) == "" { relDir = "." } // ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können tok, err := encodeUndoDeleteToken(undoDeleteToken{ Trash: "", // setzen wir gleich (trashName) RelDir: relDir, // hast du oben schon berechnet File: file, }) if err != nil { http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } trashName := tok + "__" + file // eindeutig + Token sichtbar in filename trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_") dst := filepath.Join(trashDir, trashName) // ✅ Token muss auch wissen, wie der Trashname heißt // (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json) // move mit retry (Windows file-lock robust) if err := renameWithRetry(target, dst); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict) return } http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // ✅ last.json schreiben: nur dieser Token ist gültig type trashMeta struct { Token string `json:"token"` // exakt der Query-Token (encoded) TrashName string `json:"trashName"` // Dateiname in .trash RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs File string `json:"file"` // originaler Name (basename) DeletedAt int64 `json:"deletedAt"` } meta := trashMeta{ Token: tok, TrashName: trashName, RelDir: relDir, File: file, DeletedAt: time.Now().Unix(), } b, _ := json.Marshal(meta) _ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644) // Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich) purgeDurationCacheForPath(target) removeJobsByOutputBasename(file) notifyDoneChanged() notifyJobsChanged() w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "file": file, "from": from, // "done" | "keep" "undoToken": tok, // ✅ für Undo "trashed": true, }) } func recordRestoreVideo(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) return } raw := strings.TrimSpace(r.URL.Query().Get("token")) if raw == "" { http.Error(w, "token fehlt", http.StatusBadRequest) return } // ✅ doneDir auflösen s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if strings.TrimSpace(doneAbs) == "" { http.Error(w, "doneDir ist leer", http.StatusBadRequest) return } // ✅ Single-slot: last.json lesen und Token strikt validieren trashDir := filepath.Join(doneAbs, ".trash") metaPath := filepath.Join(trashDir, "last.json") b, err := os.ReadFile(metaPath) if err != nil { http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound) return } var meta struct { Token string `json:"token"` TrashName string `json:"trashName"` RelDir string `json:"relDir"` File string `json:"file"` DeletedAt int64 `json:"deletedAt"` } if err := json.Unmarshal(b, &meta); err != nil { http.Error(w, "trash meta ungültig", http.StatusInternalServerError) return } if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" { http.Error(w, "trash meta unvollständig", http.StatusInternalServerError) return } // ✅ Nur der letzte Token ist gültig if raw != meta.Token { http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound) return } // ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json) tok, err := decodeUndoDeleteToken(raw) if err != nil { http.Error(w, "token ungültig", http.StatusBadRequest) return } // ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) { http.Error(w, "token inhalt ungültig", http.StatusBadRequest) return } // ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll) if tok.File != meta.File || tok.RelDir != meta.RelDir { http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound) return } ext := strings.ToLower(filepath.Ext(meta.File)) if ext != ".mp4" && ext != ".ts" { http.Error(w, "nicht erlaubt", http.StatusForbidden) return } // Quelle: exakt die zuletzt gelöschte Datei src := filepath.Join(trashDir, meta.TrashName) // Zielordner rekonstruieren (relativ zu doneAbs) rel := meta.RelDir if rel == "." { rel = "" } dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel)) dstDirClean := filepath.Clean(dstDir) doneClean := filepath.Clean(doneAbs) // safety: dstDir muss innerhalb doneAbs liegen if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) && !strings.EqualFold(dstDirClean, doneClean) { http.Error(w, "zielpfad ungültig", http.StatusBadRequest) return } if err := os.MkdirAll(dstDirClean, 0o755); err != nil { http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } dst, err := uniqueDestPath(dstDirClean, meta.File) if err != nil { http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) return } if err := renameWithRetry(src, dst); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) return } http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // ✅ Optional: Trash leeren, damit Token danach definitiv tot ist _ = os.RemoveAll(trashDir) _ = os.MkdirAll(trashDir, 0o755) notifyDoneChanged() w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "file": meta.File, "restoredFile": filepath.Base(dst), // kann __dup enthalten }) } func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) return } raw := strings.TrimSpace(r.URL.Query().Get("file")) if raw == "" { http.Error(w, "file fehlt", http.StatusBadRequest) return } file, err := url.QueryUnescape(raw) if err != nil { http.Error(w, "ungültiger file", http.StatusBadRequest) return } file = strings.TrimSpace(file) if !isSafeBasename(file) { http.Error(w, "ungültiger file", http.StatusBadRequest) return } ext := strings.ToLower(filepath.Ext(file)) if ext != ".mp4" && ext != ".ts" { http.Error(w, "nicht erlaubt", http.StatusForbidden) return } s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if strings.TrimSpace(doneAbs) == "" { http.Error(w, "doneDir ist leer", http.StatusBadRequest) return } // Quelle muss in keep (root oder keep/) liegen src, from, fi, err := resolveDoneFileByName(doneAbs, file) if err != nil { http.Error(w, "datei nicht gefunden", http.StatusNotFound) return } if from != "keep" { http.Error(w, "datei ist nicht in keep", http.StatusConflict) return } if fi != nil && fi.IsDir() { http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) return } // Ziel: zurück nach done/ (flach, ohne model-subdirs) dstDir := doneAbs if err := os.MkdirAll(dstDir, 0o755); err != nil { http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } dst, err := uniqueDestPath(dstDir, file) if err != nil { http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) return } if err := renameWithRetry(src, dst); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) return } http.Error(w, "unkeep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } notifyDoneChanged() w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "oldFile": file, "newFile": filepath.Base(dst), }) } func recordKeepVideo(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed) return } raw := strings.TrimSpace(r.URL.Query().Get("file")) if raw == "" { http.Error(w, "file fehlt", http.StatusBadRequest) return } file, err := url.QueryUnescape(raw) if err != nil { http.Error(w, "ungültiger file", http.StatusBadRequest) return } file = strings.TrimSpace(file) // ✅ nur Basename erlauben if file == "" || strings.Contains(file, "/") || strings.Contains(file, "\\") || filepath.Base(file) != file { http.Error(w, "ungültiger file", http.StatusBadRequest) return } ext := strings.ToLower(filepath.Ext(file)) if ext != ".mp4" && ext != ".ts" { http.Error(w, "nicht erlaubt", http.StatusForbidden) return } s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if strings.TrimSpace(doneAbs) == "" { http.Error(w, "doneDir ist leer", http.StatusBadRequest) return } keepRoot := filepath.Join(doneAbs, "keep") if err := os.MkdirAll(keepRoot, 0o755); err != nil { http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // ✅ 0) Wenn schon irgendwo in keep (root oder keep/) existiert: // - wenn im keep-root: jetzt nach keep// nachziehen if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok { // p liegt entweder in keepRoot oder keepRoot/ if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) { // im Root => versuchen einzusortieren modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */) modelKey = sanitizeModelKey(modelKey) // Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename: if modelKey == "" { stem := strings.TrimSuffix(file, filepath.Ext(file)) modelKey = sanitizeModelKey(modelNameFromFilename(stem)) } if modelKey != "" { dstDir := filepath.Join(keepRoot, modelKey) if err := os.MkdirAll(dstDir, 0o755); err == nil { dst, derr := uniqueDestPath(dstDir, file) if derr == nil { // best-effort move _ = renameWithRetry(p, dst) } } } } w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "file": file, "alreadyKept": true, }) return } // ✅ 1) Quelle in done (root oder done/), aber NICHT aus keep src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep") if !ok { http.Error(w, "datei nicht gefunden", http.StatusNotFound) return } if fi == nil || fi.IsDir() { http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) return } // ✅ 2) Ziel: keep//file modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs) dstDir := keepRoot if modelKey != "" { dstDir = filepath.Join(keepRoot, modelKey) } if err := os.MkdirAll(dstDir, 0o755); err != nil { http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } dst, err := uniqueDestPath(dstDir, file) if err != nil { http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict) return } // rename mit retry (Windows file-lock) if err := renameWithRetry(src, dst); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict) return } http.Error(w, "keep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } notifyDoneChanged() // ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ... w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "file": file, "alreadyKept": false, "newFile": filepath.Base(dst), // ✅ NEU }) } func recordToggleHot(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "Nur POST", http.StatusMethodNotAllowed) return } raw := strings.TrimSpace(r.URL.Query().Get("file")) if raw == "" { http.Error(w, "file fehlt", http.StatusBadRequest) return } file, err := url.QueryUnescape(raw) if err != nil { http.Error(w, "ungültiger file", http.StatusBadRequest) return } file = strings.TrimSpace(file) // ✅ nur Basename erlauben if file == "" || strings.Contains(file, "/") || strings.Contains(file, "\\") || filepath.Base(file) != file { http.Error(w, "ungültiger file", http.StatusBadRequest) return } ext := strings.ToLower(filepath.Ext(file)) if ext != ".mp4" && ext != ".ts" { http.Error(w, "nicht erlaubt", http.StatusForbidden) return } s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil { http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if strings.TrimSpace(doneAbs) == "" { http.Error(w, "doneDir ist leer", http.StatusBadRequest) return } // ✅ Quelle kann in done/, done/, keep/, keep/ liegen src, from, fi, err := resolveDoneFileByName(doneAbs, file) if err != nil { http.Error(w, "datei nicht gefunden", http.StatusNotFound) return } if fi != nil && fi.IsDir() { http.Error(w, "ist ein verzeichnis", http.StatusBadRequest) return } srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner // toggle: HOT Prefix newFile := file if strings.HasPrefix(file, "HOT ") { newFile = strings.TrimPrefix(file, "HOT ") } else { newFile = "HOT " + file } dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep) if _, err := os.Stat(dst); err == nil { http.Error(w, "ziel existiert bereits", http.StatusConflict) return } else if !os.IsNotExist(err) { http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } if err := renameWithRetry(src, dst); err != nil { if runtime.GOOS == "windows" && isSharingViolation(err) { http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict) return } http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError) return } // ✅ KEIN generated-rename! // Assets bleiben canonical (ohne HOT) canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file))) renameJobsOutputBasename(file, newFile) notifyDoneChanged() notifyJobsChanged() w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(map[string]any{ "ok": true, "oldFile": file, "newFile": newFile, "canonicalID": canonicalID, "from": from, // "done" | "keep" }) }