nsfwapp/backend/record_handlers.go
2026-02-24 18:30:30 +01:00

2467 lines
63 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// backend\record_handlers.go
package main
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
type RecordRequest struct {
URL string `json:"url"`
Cookie string `json:"cookie,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Hidden bool `json:"hidden,omitempty"`
}
type doneListResponse struct {
Items []*RecordJob `json:"items"`
TotalCount int `json:"totalCount"`
Page int `json:"page,omitempty"`
PageSize int `json:"pageSize,omitempty"`
}
type previewSpriteMetaResp struct {
Exists bool `json:"exists"`
Path string `json:"path,omitempty"`
Count int `json:"count,omitempty"`
Cols int `json:"cols,omitempty"`
Rows int `json:"rows,omitempty"`
StepSeconds float64 `json:"stepSeconds,omitempty"`
}
type doneMetaFileResp struct {
File string `json:"file"`
MetaExists bool `json:"metaExists"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
FPS float64 `json:"fps,omitempty"`
SourceURL string `json:"sourceUrl,omitempty"`
PreviewSprite previewSpriteMetaResp `json:"previewSprite"`
Error string `json:"error,omitempty"`
}
type doneMetaResp struct {
Count int `json:"count"`
}
type durationReq struct {
Files []string `json:"files"`
}
type durationItem struct {
File string `json:"file"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
Error string `json:"error,omitempty"`
}
type undoDeleteToken struct {
Trash string `json:"trash"` // basename in .trash
RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model"
File string `json:"file"` // original basename, z.B. "HOT xyz.mp4"
}
func encodeUndoDeleteToken(t undoDeleteToken) (string, error) {
b, err := json.Marshal(t)
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(b), nil
}
func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) {
var t undoDeleteToken
b, err := base64.RawURLEncoding.DecodeString(raw)
if err != nil {
return t, err
}
if err := json.Unmarshal(b, &t); err != nil {
return t, err
}
return t, nil
}
func isSafeRelDir(rel string) bool {
rel = strings.TrimSpace(rel)
if rel == "" {
return false
}
// normalize to slash for validation
rel = filepath.ToSlash(rel)
if strings.HasPrefix(rel, "/") {
return false
}
clean := path.Clean(rel) // path.Clean => forward slashes
if clean == "." {
return true
}
if strings.HasPrefix(clean, "../") || clean == ".." {
return false
}
// prevent weird traversal
if strings.Contains(clean, `\`) {
return false
}
return true
}
func isSafeBasename(name string) bool {
name = strings.TrimSpace(name)
if name == "" {
return false
}
if strings.Contains(name, "/") || strings.Contains(name, "\\") {
return false
}
return filepath.Base(name) == name
}
func intFromAny(v any) (int, bool) {
switch x := v.(type) {
case int:
return x, true
case int8:
return int(x), true
case int16:
return int(x), true
case int32:
return int(x), true
case int64:
return int(x), true
case uint:
return int(x), true
case uint8:
return int(x), true
case uint16:
return int(x), true
case uint32:
return int(x), true
case uint64:
return int(x), true
case float32:
return int(x), true
case float64:
return int(x), true
case json.Number:
if i, err := x.Int64(); err == nil {
return int(i), true
}
if f, err := x.Float64(); err == nil {
return int(f), true
}
case string:
s := strings.TrimSpace(x)
if s == "" {
return 0, false
}
if i, err := strconv.Atoi(s); err == nil {
return i, true
}
}
return 0, false
}
func floatFromAny(v any) (float64, bool) {
switch x := v.(type) {
case float32:
return float64(x), true
case float64:
return x, true
case int:
return float64(x), true
case int8:
return float64(x), true
case int16:
return float64(x), true
case int32:
return float64(x), true
case int64:
return float64(x), true
case uint:
return float64(x), true
case uint8:
return float64(x), true
case uint16:
return float64(x), true
case uint32:
return float64(x), true
case uint64:
return float64(x), true
case json.Number:
if f, err := x.Float64(); err == nil {
return f, true
}
case string:
s := strings.TrimSpace(x)
if s == "" {
return 0, false
}
if f, err := strconv.ParseFloat(s, 64); err == nil {
return f, true
}
}
return 0, false
}
type previewSpriteMetaFileInfo struct {
Count int
Cols int
Rows int
StepSeconds float64
}
func readPreviewSpriteMetaFromMetaFile(metaPath string) (previewSpriteMetaFileInfo, bool) {
var out previewSpriteMetaFileInfo
b, err := os.ReadFile(metaPath)
if err != nil || len(b) == 0 {
return out, false
}
var m map[string]any
dec := json.NewDecoder(strings.NewReader(string(b)))
dec.UseNumber()
if err := dec.Decode(&m); err != nil {
return out, false
}
ps, ok := m["previewSprite"].(map[string]any)
if !ok || ps == nil {
return out, false
}
if n, ok := intFromAny(ps["count"]); ok && n > 0 {
out.Count = n
} else if n, ok := intFromAny(ps["frames"]); ok && n > 0 {
out.Count = n
} else if n, ok := intFromAny(ps["imageCount"]); ok && n > 0 {
out.Count = n
}
if n, ok := intFromAny(ps["cols"]); ok && n > 0 {
out.Cols = n
}
if n, ok := intFromAny(ps["rows"]); ok && n > 0 {
out.Rows = n
}
if f, ok := floatFromAny(ps["stepSeconds"]); ok && f > 0 {
out.StepSeconds = f
} else if f, ok := floatFromAny(ps["step"]); ok && f > 0 {
out.StepSeconds = f
} else if f, ok := floatFromAny(ps["intervalSeconds"]); ok && f > 0 {
out.StepSeconds = f
}
// gültig, wenn mindestens count oder grid vorhanden ist
if out.Count > 0 || (out.Cols > 0 && out.Rows > 0) {
return out, true
}
return out, false
}
func previewSpriteTruthForID(id string) previewSpriteMetaResp {
out := previewSpriteMetaResp{Exists: false}
id = strings.TrimSpace(id)
if id == "" || strings.Contains(id, "/") || strings.Contains(id, "\\") {
return out
}
metaPath, err := generatedMetaFile(id)
if err != nil || strings.TrimSpace(metaPath) == "" {
return out
}
genDir := filepath.Dir(metaPath)
spriteFile := filepath.Join(genDir, "preview-sprite.webp")
fi, err := os.Stat(spriteFile)
if err != nil || fi == nil || fi.IsDir() || fi.Size() <= 0 {
return out
}
// ✅ echte Datei existiert
out.Exists = true
out.Path = "/api/preview-sprite/" + url.PathEscape(id)
// Meta-Felder best-effort aus meta.json lesen
if ps, ok := readPreviewSpriteMetaFromMetaFile(metaPath); ok {
if ps.Count > 0 {
out.Count = ps.Count
}
if ps.Cols > 0 {
out.Cols = ps.Cols
}
if ps.Rows > 0 {
out.Rows = ps.Rows
}
if ps.StepSeconds > 0 {
out.StepSeconds = ps.StepSeconds
}
}
return out
}
func applyPreviewSpriteTruthToDoneMetaResp(id string, resp *doneMetaFileResp) {
if resp == nil {
return
}
resp.PreviewSprite = previewSpriteTruthForID(id)
}
func metaMapFromAny(v any) map[string]any {
out := map[string]any{}
switch x := v.(type) {
case nil:
return out
case map[string]any:
for k, val := range x {
out[k] = val
}
return out
case string:
s := strings.TrimSpace(x)
if s == "" {
return out
}
var m map[string]any
dec := json.NewDecoder(strings.NewReader(s))
dec.UseNumber()
if err := dec.Decode(&m); err == nil && m != nil {
return m
}
return out
case []byte:
if len(x) == 0 {
return out
}
var m map[string]any
dec := json.NewDecoder(strings.NewReader(string(x)))
dec.UseNumber()
if err := dec.Decode(&m); err == nil && m != nil {
return m
}
return out
case json.RawMessage:
if len(x) == 0 {
return out
}
var m map[string]any
dec := json.NewDecoder(strings.NewReader(string(x)))
dec.UseNumber()
if err := dec.Decode(&m); err == nil && m != nil {
return m
}
return out
default:
// best effort: unbekannten Typ in map re-hydraten
b, err := json.Marshal(x)
if err != nil || len(b) == 0 {
return out
}
var m map[string]any
dec := json.NewDecoder(strings.NewReader(string(b)))
dec.UseNumber()
if err := dec.Decode(&m); err == nil && m != nil {
return m
}
return out
}
}
func setStructFieldJSONMap(fv reflect.Value, m map[string]any) {
if !fv.IsValid() || !fv.CanSet() {
return
}
// JSON serialisieren (für string / []byte / typed map / struct)
b, err := json.Marshal(m)
if err != nil {
return
}
switch fv.Kind() {
case reflect.Interface:
// interface{} / any -> direkt map setzen
fv.Set(reflect.ValueOf(m))
return
case reflect.String:
fv.SetString(string(b))
return
case reflect.Slice:
// []byte / json.RawMessage
if fv.Type().Elem().Kind() == reflect.Uint8 {
fv.SetBytes(b)
return
}
}
// Fallback: in den echten Feldtyp unmarshaln
ptr := reflect.New(fv.Type())
if err := json.Unmarshal(b, ptr.Interface()); err == nil {
fv.Set(ptr.Elem())
}
}
func applyPreviewSpriteTruthToRecordJobMeta(j *RecordJob) {
if j == nil {
return
}
// ID aus Output ableiten (canonical: ohne HOT, ohne Ext)
outPath := strings.TrimSpace(j.Output)
if outPath == "" {
return
}
base := filepath.Base(outPath)
id := stripHotPrefix(strings.TrimSuffix(base, filepath.Ext(base)))
id = strings.TrimSpace(id)
ps := previewSpriteTruthForID(id)
// per Reflection auf Feld "Meta" zugreifen (robust gegen Meta-Typ)
rv := reflect.ValueOf(j)
if rv.Kind() != reflect.Pointer || rv.IsNil() {
return
}
sv := rv.Elem()
if !sv.IsValid() || sv.Kind() != reflect.Struct {
return
}
fv := sv.FieldByName("Meta")
if !fv.IsValid() || !fv.CanSet() {
// Falls RecordJob kein Meta-Feld hat -> nichts zu tun
return
}
var raw any
switch fv.Kind() {
case reflect.Interface:
if fv.IsNil() {
raw = nil
} else {
raw = fv.Interface()
}
default:
raw = fv.Interface()
}
meta := metaMapFromAny(raw)
if meta == nil {
meta = map[string]any{}
}
// ✅ Legacy/Fallback Felder killen (falls vorhanden)
delete(meta, "previewScrubberPath")
delete(meta, "previewScrubberCount")
// ✅ previewSprite hart mit echter Dateiwahrheit überschreiben
psMap := map[string]any{
"exists": ps.Exists,
}
if ps.Exists {
psMap["path"] = ps.Path
if ps.Count > 0 {
psMap["count"] = ps.Count
}
if ps.Cols > 0 {
psMap["cols"] = ps.Cols
}
if ps.Rows > 0 {
psMap["rows"] = ps.Rows
}
if ps.StepSeconds > 0 {
psMap["stepSeconds"] = ps.StepSeconds
}
}
meta["previewSprite"] = psMap
setStructFieldJSONMap(fv, meta)
}
func recordList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
// ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht
if j == nil || j.Hidden {
continue
}
list = append(list, j)
}
jobsMu.Unlock()
// optional: neueste zuerst
sort.Slice(list, func(i, j int) bool {
return list[i].StartedAt.After(list[j].StartedAt)
})
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(list)
}
func writeSSE(w http.ResponseWriter, data []byte) {
// SSE spec: jede Zeile mit "data:" prefixen
s := strings.ReplaceAll(string(data), "\r\n", "\n")
lines := strings.Split(s, "\n")
for _, line := range lines {
fmt.Fprintf(w, "data: %s\n", line)
}
fmt.Fprint(w, "\n")
}
func handleDoneStream(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Connection", "keep-alive")
// wichtig für nginx / reverse proxies
w.Header().Set("X-Accel-Buffering", "no")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
return
}
// pro client ein channel
ch := make(chan []byte, 32)
doneHub.add(ch)
defer doneHub.remove(ch)
// ✅ KEIN doneChanged als hello nur Kommentar
fmt.Fprintf(w, ": hello seq=%d ts=%d\n\n", atomic.LoadUint64(&doneSeq), time.Now().UnixMilli())
flusher.Flush()
ctx := r.Context()
ping := time.NewTicker(15 * time.Second)
defer ping.Stop()
for {
select {
case <-ctx.Done():
return
case <-ping.C:
// ✅ Keepalive als Kommentar (triggert keine addEventListener("doneChanged"))
fmt.Fprintf(w, ": ping ts=%d\n\n", time.Now().UnixMilli())
flusher.Flush()
case b, ok := <-ch:
if !ok {
return
}
// ✅ nur echte Changes als doneChanged
fmt.Fprintf(w, "event: doneChanged\n")
fmt.Fprintf(w, "data: %s\n\n", b)
flusher.Flush()
}
}
}
func handleRecordVideo(w http.ResponseWriter, r *http.Request) {
recordVideo(w, r)
}
func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
var req RecordRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
job, err := startRecordingInternal(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
// ---- track if headers/body were already written ----
// (Go methods must be at package scope)
type rwTrack struct {
http.ResponseWriter
wrote bool
}
func (t *rwTrack) WriteHeader(statusCode int) {
if t.wrote {
return
}
t.wrote = true
t.ResponseWriter.WriteHeader(statusCode)
}
func (t *rwTrack) Write(p []byte) (int, error) {
if !t.wrote {
t.wrote = true
}
return t.ResponseWriter.Write(p)
}
// ensureMetaJSONForPlayback erzeugt generated/meta/<id>/meta.json falls sie fehlt.
// Best-effort: wenn es nicht geht (FFprobe fehlt, Fehler, etc.), wird Playback nicht verhindert.
func ensureMetaJSONForPlayback(ctx context.Context, videoPath string) {
// nur mp4 (nach TS-remux ist es mp4)
if strings.ToLower(filepath.Ext(videoPath)) != ".mp4" {
return
}
// ID: basename ohne Ext + ohne HOT
base := strings.TrimSuffix(filepath.Base(videoPath), filepath.Ext(videoPath))
id := stripHotPrefix(base)
id = strings.TrimSpace(id)
if id == "" {
return
}
metaPath, err := generatedMetaFile(id)
if err != nil || strings.TrimSpace(metaPath) == "" {
return
}
// existiert schon?
if fi, err := os.Stat(metaPath); err == nil && fi != nil && !fi.IsDir() && fi.Size() > 0 {
return
}
// Video stat für spätere Checks / Meta-Write
vfi, err := os.Stat(videoPath)
if err != nil || vfi == nil || vfi.IsDir() || vfi.Size() == 0 {
return
}
// kleiner Timeout: wir wollen Playback nicht “ewig” blockieren
pctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
// Dauer (best effort)
dur := 0.0
if d, derr := durationSecondsCached(pctx, videoPath); derr == nil && d > 0 {
dur = d
}
// Height/Width optional nicht mehr berechnen (wenn helper entfernt wurde)
h := 0
// FPS optional wenn du einen Cache/helper hast, nimm ihn; sonst 0 lassen.
fps := 0.0
// Quelle URL ist bei done-files oft nur in meta; wenn unbekannt, leer lassen.
srcURL := ""
// Sicherstellen, dass Ordner existiert
_ = os.MkdirAll(filepath.Dir(metaPath), 0o755)
// Format: passe an deine readVideoMeta(...) / readVideoMetaDuration(...) Parser-Struktur an!
// Ich nehme hier eine sehr typische Struktur an:
type videoMeta struct {
DurationSeconds float64 `json:"durationSeconds,omitempty"`
Width int `json:"width,omitempty"`
Height int `json:"height,omitempty"`
FPS float64 `json:"fps,omitempty"`
SourceURL string `json:"sourceUrl,omitempty"`
// optional: file info / updatedAt
UpdatedAtUnix int64 `json:"updatedAtUnix,omitempty"`
FileSizeBytes int64 `json:"fileSizeBytes,omitempty"`
}
m := videoMeta{
DurationSeconds: dur,
Width: 0,
Height: h,
FPS: fps,
SourceURL: srcURL,
UpdatedAtUnix: time.Now().Unix(),
FileSizeBytes: vfi.Size(),
}
// Atomisch schreiben (damit parallele Requests kein kaputtes JSON sehen)
tmp := metaPath + ".tmp"
if b, err := json.MarshalIndent(m, "", " "); err == nil {
_ = os.WriteFile(tmp, b, 0o644)
_ = os.Rename(tmp, metaPath)
} else {
_ = os.Remove(tmp)
}
}
func recordVideo(w http.ResponseWriter, r *http.Request) {
// ---- wrap writer to detect "already wrote" ----
tw := &rwTrack{ResponseWriter: w}
w = tw
writeErr := func(code int, msg string) {
// Wenn schon Header/Body raus sind, dürfen wir KEIN http.Error mehr machen,
// sonst gibt's "superfluous response.WriteHeader".
if tw.wrote {
fmt.Println("[recordVideo] late error (headers already sent):", code, msg)
return
}
http.Error(w, msg, code) // nutzt WriteHeader+Write -> tw.wrote wird automatisch true
}
writeStatus := func(code int) {
if tw.wrote {
return
}
w.WriteHeader(code) // geht durch rwTrack.WriteHeader
}
// ---- CORS ----
origin := r.Header.Get("Origin")
if origin != "" {
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Vary", "Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS")
// Wichtig: Browser schicken bei Video-Range-Requests oft If-Range / If-Modified-Since / If-None-Match.
// Wenn du die nicht erlaubst, schlägt der Preflight fehl -> VideoJS sieht "NETWORK error".
w.Header().Set("Access-Control-Allow-Headers", "Range, If-Range, If-Modified-Since, If-None-Match")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges, ETag, Last-Modified")
w.Header().Set("Access-Control-Allow-Credentials", "true")
}
if r.Method == http.MethodOptions {
writeStatus(http.StatusNoContent)
return
}
// ---- resolve outPath from file or id ----
resolveOutPath := func() (string, bool) {
// ✅ Wiedergabe über Dateiname (für doneDir / recordDir)
if rawFile := strings.TrimSpace(r.URL.Query().Get("file")); rawFile != "" {
file, err := url.QueryUnescape(rawFile)
if err != nil {
writeErr(http.StatusBadRequest, "ungültiger file")
return "", false
}
file = strings.TrimSpace(file)
// kein Pfad, keine Backslashes, kein Traversal
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
writeErr(http.StatusBadRequest, "ungültiger file")
return "", false
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
writeErr(http.StatusForbidden, "nicht erlaubt")
return "", false
}
s := getSettings()
recordAbs, err := resolvePathRelativeToApp(s.RecordDir)
if err != nil {
writeErr(http.StatusInternalServerError, "recordDir auflösung fehlgeschlagen: "+err.Error())
return "", false
}
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
writeErr(http.StatusInternalServerError, "doneDir auflösung fehlgeschlagen: "+err.Error())
return "", false
}
// Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"),
// dann keep (inkl. 1 Level Subdir), dann recordDir
names := []string{file}
if ext == ".ts" {
names = append(names, strings.TrimSuffix(file, ext)+".mp4")
}
var outPath string
for _, name := range names {
if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok {
outPath = p
break
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok {
outPath = p
break
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok {
outPath = p
break
}
}
if outPath == "" {
writeErr(http.StatusNotFound, "datei nicht gefunden")
return "", false
}
return filepath.Clean(strings.TrimSpace(outPath)), true
}
// ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert)
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
writeErr(http.StatusBadRequest, "id fehlt")
return "", false
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
writeErr(http.StatusNotFound, "job nicht gefunden")
return "", false
}
outPath := filepath.Clean(strings.TrimSpace(job.Output))
if outPath == "" {
writeErr(http.StatusNotFound, "output fehlt")
return "", false
}
if !filepath.IsAbs(outPath) {
abs, err := resolvePathRelativeToApp(outPath)
if err != nil {
writeErr(http.StatusInternalServerError, "pfad auflösung fehlgeschlagen: "+err.Error())
return "", false
}
outPath = abs
}
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 {
writeErr(http.StatusNotFound, "datei nicht gefunden")
return "", false
}
return outPath, true
}
outPath, ok := resolveOutPath()
if !ok {
return
}
// ---- TS -> MP4 (on-demand remux) ----
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
newOut, err := maybeRemuxTS(outPath)
if err != nil {
writeErr(http.StatusInternalServerError, "TS Remux fehlgeschlagen: "+err.Error())
return
}
if strings.TrimSpace(newOut) == "" {
writeErr(http.StatusInternalServerError, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt")
return
}
outPath = filepath.Clean(strings.TrimSpace(newOut))
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" {
writeErr(http.StatusInternalServerError, "Remux-Ergebnis ungültig")
return
}
}
// ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern
if strings.ToLower(filepath.Ext(outPath)) == ".mp4" {
kind, _ := sniffVideoKind(outPath)
switch kind {
case "ts":
newOut, err := maybeRemuxTS(outPath)
if err != nil {
writeErr(http.StatusInternalServerError, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error())
return
}
outPath = filepath.Clean(strings.TrimSpace(newOut))
case "html":
writeErr(http.StatusInternalServerError, "Server liefert HTML statt Video (Pfad/Lookup prüfen)")
return
}
}
// ✅ meta.json sicherstellen (best effort), bevor wir ausliefern
ensureMetaJSONForPlayback(r.Context(), outPath)
// ✅ immer Original-Datei ausliefern (Range-fähig via serveVideoFile)
w.Header().Set("Cache-Control", "no-store")
serveVideoFile(w, r, outPath)
}
func recordStatus(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
applyPreviewSpriteTruthToRecordJobMeta(job)
json.NewEncoder(w).Encode(job)
}
func recordStop(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
id := r.URL.Query().Get("id")
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
stopJobsInternal([]*RecordJob{job})
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
func buildDoneIndex(doneAbs string) ([]doneIndexItem, map[string][]int) {
items := make([]doneIndexItem, 0, 2048)
sortedIdx := make(map[string][]int)
isTrashPath := func(full string) bool {
p := strings.ToLower(filepath.ToSlash(strings.TrimSpace(full)))
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
addFile := func(full string, fi os.FileInfo) {
if fi == nil || fi.IsDir() || fi.Size() == 0 {
return
}
if isTrashPath(full) {
return
}
name := filepath.Base(full)
ext := strings.ToLower(filepath.Ext(name))
if ext != ".mp4" && ext != ".ts" {
return
}
// keep?
p := strings.ToLower(filepath.ToSlash(full))
fromKeep := strings.Contains(p, "/keep/")
// started/ended
t := fi.ModTime()
start := t
base := strings.TrimSuffix(name, filepath.Ext(name))
stem := strings.TrimPrefix(base, "HOT ")
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
mm, _ := strconv.Atoi(m[2])
dd, _ := strconv.Atoi(m[3])
yy, _ := strconv.Atoi(m[4])
hh, _ := strconv.Atoi(m[5])
mi, _ := strconv.Atoi(m[6])
ss, _ := strconv.Atoi(m[7])
start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local)
}
// modelKey (lower) nutze deine bestehende Logik
mk := strings.ToLower(strings.TrimSpace(modelKeyFromFilenameOrPath(name, full, doneAbs)))
if mk == "" {
// fallback: parent dir (skip keep)
parent := strings.ToLower(strings.TrimSpace(filepath.Base(filepath.Dir(full))))
if parent != "" && parent != "keep" {
mk = parent
}
}
// fileSort (hot-prefix raus)
fs := strings.ToLower(name)
fs = strings.TrimPrefix(fs, "hot ")
// duration + srcURL (wie bei dir: meta.json, dann cache-only)
dur := 0.0
srcURL := ""
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full)))
if strings.TrimSpace(id) != "" {
if mp, err := generatedMetaFile(id); err == nil {
if d, ok := readVideoMetaDuration(mp, fi); ok {
dur = d
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok {
srcURL = u
}
}
}
if dur <= 0 {
dur = durationSecondsCacheOnly(full, fi)
}
ended := t
items = append(items, doneIndexItem{
job: &RecordJob{
ID: base,
Output: full,
SourceURL: srcURL,
Status: JobFinished,
StartedAt: start,
EndedAt: &ended,
DurationSeconds: dur,
SizeBytes: fi.Size(),
},
endedAt: ended,
fileSort: fs,
fromKeep: fromKeep,
modelKey: mk,
})
}
scanDir := func(dir string, skipKeep bool) {
entries, err := os.ReadDir(dir)
if err != nil {
return
}
for _, e := range entries {
if e.IsDir() {
if strings.EqualFold(e.Name(), ".trash") {
continue
}
if skipKeep && e.Name() == "keep" {
continue
}
sub := filepath.Join(dir, e.Name())
subs, err := os.ReadDir(sub)
if err != nil {
continue
}
for _, se := range subs {
if se.IsDir() {
continue
}
full := filepath.Join(sub, se.Name())
fi, err := os.Stat(full)
if err != nil {
continue
}
addFile(full, fi)
}
continue
}
full := filepath.Join(dir, e.Name())
fi, err := os.Stat(full)
if err != nil {
continue
}
addFile(full, fi)
}
}
// done (ohne keep)
scanDir(doneAbs, true)
// keep (optional im Index, damit includeKeep schnell ist)
scanDir(filepath.Join(doneAbs, "keep"), false)
// Pre-sort für häufigen Fall: includeKeep true/false und die Sort-Modes
// (nur wenn KEIN model-Filter genutzt wird)
mkSorted := func(includeKeep bool, sortMode string) []int {
idx := make([]int, 0, len(items))
for i := range items {
if !includeKeep && items[i].fromKeep {
continue
}
idx = append(idx, i)
}
durationForSort := func(it doneIndexItem) (float64, bool) {
if it.job.DurationSeconds > 0 {
return it.job.DurationSeconds, true
}
return 0, false
}
sort.Slice(idx, func(a, b int) bool {
A := items[idx[a]]
B := items[idx[b]]
ta, tb := A.endedAt, B.endedAt
switch sortMode {
case "completed_asc":
if !ta.Equal(tb) {
return ta.Before(tb)
}
return A.fileSort < B.fileSort
case "completed_desc":
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "file_asc":
if A.fileSort != B.fileSort {
return A.fileSort < B.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "file_desc":
if A.fileSort != B.fileSort {
return A.fileSort > B.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "duration_asc":
da, okA := durationForSort(A)
db, okB := durationForSort(B)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da < db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "duration_desc":
da, okA := durationForSort(A)
db, okB := durationForSort(B)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da > db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "size_asc":
if A.job.SizeBytes != B.job.SizeBytes {
return A.job.SizeBytes < B.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "size_desc":
if A.job.SizeBytes != B.job.SizeBytes {
return A.job.SizeBytes > B.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
default:
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
}
})
return idx
}
modes := []string{
"completed_desc", "completed_asc",
"file_asc", "file_desc",
"duration_asc", "duration_desc",
"size_asc", "size_desc",
}
for _, m := range modes {
sortedIdx["0|"+m] = mkSorted(false, m)
sortedIdx["1|"+m] = mkSorted(true, m)
}
return items, sortedIdx
}
// ⬆️ Ergänze im Import-Block (falls noch nicht drin):
// import "sync"
type doneIndexItem struct {
job *RecordJob
endedAt time.Time
fileSort string
fromKeep bool
modelKey string // lower
}
type doneIndexCache struct {
mu sync.Mutex
builtAt time.Time
seq uint64
doneAbs string
items []doneIndexItem
sortedIdx map[string][]int // key: "<includeKeep 0/1>|<sortMode>"
}
var doneCache doneIndexCache
func normalizeQueryModel(raw string) string {
s := strings.TrimSpace(raw)
if s == "" {
return ""
}
s = strings.TrimPrefix(s, "http://")
s = strings.TrimPrefix(s, "https://")
// letzter URL-Segment, falls jemand ".../modelname" übergibt
if strings.Contains(s, "/") {
parts := strings.Split(s, "/")
for i := len(parts) - 1; i >= 0; i-- {
p := strings.TrimSpace(parts[i])
if p != "" {
s = p
break
}
}
}
// falls "host:model" übergeben wird
if strings.Contains(s, ":") {
parts := strings.Split(s, ":")
s = strings.TrimSpace(parts[len(parts)-1])
}
s = strings.TrimPrefix(s, "@")
return strings.ToLower(strings.TrimSpace(s))
}
func recordDoneMeta(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
// ✅ NEW: File-Mode: /api/record/done/meta?file=XYZ.mp4
if raw := strings.TrimSpace(r.URL.Query().Get("file")); raw != "" {
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// nur Basename erlauben (kein Traversal)
if !isSafeBasename(file) {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// Datei in done/ oder keep/ finden
full, _, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil || fi == nil || fi.IsDir() || fi.Size() == 0 {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
// optional: TS -> MP4 remux (meta soll sich auf abspielbare MP4 beziehen)
outPath := filepath.Clean(strings.TrimSpace(full))
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
if newOut, rerr := maybeRemuxTS(outPath); rerr == nil && strings.TrimSpace(newOut) != "" {
outPath = filepath.Clean(strings.TrimSpace(newOut))
if nfi, serr := os.Stat(outPath); serr == nil && nfi != nil {
fi = nfi
}
}
}
// ✅ best-effort meta.json erzeugen
ensureMetaJSONForPlayback(r.Context(), outPath)
resp := doneMetaFileResp{File: filepath.Base(outPath)}
// meta lesen (wenn vorhanden)
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(outPath), filepath.Ext(outPath)))
// ✅ Preview-Sprite-Truth immer setzen (explizit true/false)
applyPreviewSpriteTruthToDoneMetaResp(id, &resp)
if strings.TrimSpace(id) != "" {
if mp, merr := generatedMetaFile(id); merr == nil && strings.TrimSpace(mp) != "" {
if mfi, serr := os.Stat(mp); serr == nil && mfi != nil && !mfi.IsDir() && mfi.Size() > 0 {
resp.MetaExists = true
if dur, w2, h2, fps2, ok := readVideoMeta(mp, fi); ok {
resp.DurationSeconds = dur
resp.Width = w2
resp.Height = h2
resp.FPS = fps2
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok {
resp.SourceURL = u
}
}
}
}
// fallback: wenn Meta existiert aber Duration fehlt -> zentralen Cache/ffprobe nutzen
if resp.DurationSeconds <= 0 {
pctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
if d, derr := durationSecondsCached(pctx, outPath); derr == nil && d > 0 {
resp.DurationSeconds = d
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(resp)
return
}
// ---------------------
// ✅ ORIGINAL: Count-Mode (wie vorher)
// ---------------------
// optional: includeKeep (falls du später mal brauchst)
qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep")))
includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes"
// optional: model filter (falls du später mal brauchst)
qModel := normalizeQueryModel(r.URL.Query().Get("model"))
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
curSeq := atomic.LoadUint64(&doneSeq)
now := time.Now()
// Cache rebuild (wie in recordDoneList; Count kommt aus Index)
doneCache.mu.Lock()
needRebuild := doneCache.seq != curSeq ||
doneCache.doneAbs != doneAbs ||
now.Sub(doneCache.builtAt) > 30*time.Second
if needRebuild {
if _, err := os.Stat(doneAbs); err != nil && os.IsNotExist(err) {
doneCache.items = nil
doneCache.sortedIdx = make(map[string][]int, 16)
modes := []string{
"completed_desc", "completed_asc",
"file_asc", "file_desc",
"duration_asc", "duration_desc",
"size_asc", "size_desc",
}
for _, m := range modes {
doneCache.sortedIdx["0|"+m] = []int{}
doneCache.sortedIdx["1|"+m] = []int{}
}
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
} else {
items, sorted := buildDoneIndex(doneAbs)
doneCache.items = items
doneCache.sortedIdx = sorted
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
}
}
items := doneCache.items
sortedAll := doneCache.sortedIdx
doneCache.mu.Unlock()
// Count bestimmen
count := 0
if qModel == "" {
incKey := "0"
if includeKeep {
incKey = "1"
}
count = len(sortedAll[incKey+"|completed_desc"])
} else {
for _, it := range items {
if !includeKeep && it.fromKeep {
continue
}
if it.modelKey == qModel {
count++
}
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(doneMetaResp{Count: count})
}
func recordDoneList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
// ✅ optional: auch /done/keep/ einbeziehen (Standard: false)
qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep")))
includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes"
// ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll)
qModel := normalizeQueryModel(r.URL.Query().Get("model"))
// optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste
page := 0
pageSize := 0
if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
page = n
}
}
if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
pageSize = n
}
}
// optional: Sort
// supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc)
sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort")))
if sortMode == "" {
sortMode = "completed_desc"
}
// ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen
if sortMode == "model_asc" {
sortMode = "file_asc"
}
if sortMode == "model_desc" {
sortMode = "file_desc"
}
// ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren)
qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all")))
fetchAll := qAll == "1" || qAll == "true" || qAll == "yes"
if fetchAll {
page = 0
pageSize = 0
}
// ✅ optional: count mitsenden
qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount")))
withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes"
durationForSort := func(j *RecordJob) (sec float64, ok bool) {
if j.DurationSeconds > 0 {
return j.DurationSeconds, true
}
return 0, false
}
compareIdx := func(items []doneIndexItem, sortMode string, ia, ib int) bool {
a := items[ia]
b := items[ib]
ta, tb := a.endedAt, b.endedAt
switch sortMode {
case "completed_asc":
if !ta.Equal(tb) {
return ta.Before(tb)
}
return a.fileSort < b.fileSort
case "completed_desc":
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "file_asc":
if a.fileSort != b.fileSort {
return a.fileSort < b.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "file_desc":
if a.fileSort != b.fileSort {
return a.fileSort > b.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "duration_asc":
da, okA := durationForSort(a.job)
db, okB := durationForSort(b.job)
if okA != okB {
return okA // unknown nach hinten
}
if okA && okB && da != db {
return da < db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "duration_desc":
da, okA := durationForSort(a.job)
db, okB := durationForSort(b.job)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da > db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "size_asc":
if a.job.SizeBytes != b.job.SizeBytes {
return a.job.SizeBytes < b.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "size_desc":
if a.job.SizeBytes != b.job.SizeBytes {
return a.job.SizeBytes > b.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
default:
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
}
}
// --- resolve done path ---
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben
if strings.TrimSpace(doneAbs) == "" {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: []*RecordJob{},
TotalCount: 0,
Page: page,
PageSize: pageSize,
})
return
}
// rebuild wenn doneSeq geändert oder TTL
curSeq := atomic.LoadUint64(&doneSeq)
now := time.Now()
doneCache.mu.Lock()
needRebuild := doneCache.seq != curSeq ||
doneCache.doneAbs != doneAbs ||
now.Sub(doneCache.builtAt) > 30*time.Second
if needRebuild {
// Wenn doneAbs nicht existiert: leere Daten im Cache
if _, err := os.Stat(doneAbs); err != nil && os.IsNotExist(err) {
doneCache.items = nil
doneCache.sortedIdx = make(map[string][]int, 16)
modes := []string{
"completed_desc", "completed_asc",
"file_asc", "file_desc",
"duration_asc", "duration_desc",
"size_asc", "size_desc",
}
for _, m := range modes {
doneCache.sortedIdx["0|"+m] = []int{}
doneCache.sortedIdx["1|"+m] = []int{}
}
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
} else {
items, sorted := buildDoneIndex(doneAbs)
doneCache.items = items
doneCache.sortedIdx = sorted
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
}
}
items := doneCache.items
sortedAll := doneCache.sortedIdx
doneCache.mu.Unlock()
// --------- Request-spezifische Auswahl (Model-Filter, includeKeep, sort, paging) ---------
incKey := "0"
if includeKeep {
incKey = "1"
}
// idx enthält indices in items
var idx []int
if qModel == "" {
idx = sortedAll[incKey+"|"+sortMode]
if idx == nil {
// fallback
idx = sortedAll[incKey+"|completed_desc"]
if idx == nil {
idx = make([]int, 0)
}
}
} else {
// Model-Filter: nur Teilmenge, dann sortieren
idx = make([]int, 0, 256)
for i := range items {
if !includeKeep && items[i].fromKeep {
continue
}
if items[i].modelKey == qModel {
idx = append(idx, i)
}
}
sort.Slice(idx, func(a, b int) bool {
return compareIdx(items, sortMode, idx[a], idx[b])
})
}
totalCount := len(idx)
// Pagination anwenden (nur auf idx)
start := 0
end := totalCount
if pageSize > 0 && !fetchAll {
if page <= 0 {
page = 1
}
start = (page - 1) * pageSize
if start < 0 {
start = 0
}
if start >= totalCount {
start = totalCount
}
end = start + pageSize
if end > totalCount {
end = totalCount
}
}
// Response jobs bauen
out := make([]*RecordJob, 0, max(0, end-start))
for _, ii := range idx[start:end] {
base := items[ii].job
if base == nil {
continue
}
// ✅ Kopie erzeugen (wichtig: keine Race/Mutations am Cache-Objekt)
c := *base
// Size immer korrekt setzen
if fi, err := os.Stat(c.Output); err == nil && fi != nil && !fi.IsDir() && fi.Size() > 0 {
c.SizeBytes = fi.Size()
}
// Meta nur lesen, wenn es existiert (kein Generieren!)
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(c.Output), filepath.Ext(c.Output)))
if id != "" {
if mp, err := generatedMetaFile(id); err == nil {
if fi, err := os.Stat(c.Output); err == nil && fi != nil && !fi.IsDir() {
if dur, w, h, fps, ok := readVideoMeta(mp, fi); ok {
c.DurationSeconds = dur
c.VideoWidth = w
c.VideoHeight = h
c.FPS = fps
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok && strings.TrimSpace(c.SourceURL) == "" {
c.SourceURL = u
}
}
}
}
// ✅ Preview-Sprite-Truth im LIST-Payload erzwingen (wichtig für Cards/Gallery)
applyPreviewSpriteTruthToRecordJobMeta(&c)
out = append(out, &c)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
// ✅ Wenn Frontend "withCount=1" nutzt: {count, items}
if withCount {
_ = json.NewEncoder(w).Encode(map[string]any{
"count": totalCount,
"items": out,
})
return
}
// ✅ Standard-Response: immer auch totalCount mitsenden
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: out,
TotalCount: totalCount,
Page: page,
PageSize: pageSize,
})
}
// mini helper, falls du keinen hast
func max(a, b int) int {
if a > b {
return a
}
return b
}
func recordDeleteVideo(w http.ResponseWriter, r *http.Request) {
// Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE
if r.Method != http.MethodPost && r.Method != http.MethodDelete {
http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
// sicher decoden
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben (keine Unterordner, kein Traversal)
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ done + done/<subdir> sowie keep + keep/<subdir>
target, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben
trashDir := filepath.Join(doneAbs, ".trash")
// ✅ Wenn im Single-slot Trash schon was liegt: ID merken,
// aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde.
prevBase := ""
prevCanonical := ""
if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 {
var prev struct {
File string `json:"file"`
}
if err := json.Unmarshal(b, &prev); err == nil {
prevFile := strings.TrimSpace(prev.File)
if prevFile != "" {
prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile))
prevCanonical = stripHotPrefix(prevBase)
}
}
}
// Trash komplett leeren => ältere Undos sind automatisch ungültig
// ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen.
if err := os.RemoveAll(trashDir); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta/<id>/ entfernen.
if prevCanonical != "" {
removeGeneratedForID(prevCanonical)
// Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind
if prevBase != "" && prevBase != prevCanonical {
removeGeneratedForID(prevBase)
}
}
if err := os.MkdirAll(trashDir, 0o755); err != nil {
http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Original-Dir relativ zu doneAbs merken (inkl. keep/<subdir> oder <subdir>)
origDir := filepath.Dir(target)
relDir, err := filepath.Rel(doneAbs, origDir)
if err != nil {
http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
relDir = filepath.ToSlash(relDir)
if strings.TrimSpace(relDir) == "" {
relDir = "."
}
// ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können
tok, err := encodeUndoDeleteToken(undoDeleteToken{
Trash: "", // setzen wir gleich (trashName)
RelDir: relDir, // hast du oben schon berechnet
File: file,
})
if err != nil {
http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
trashName := tok + "__" + file // eindeutig + Token sichtbar in filename
trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_")
dst := filepath.Join(trashDir, trashName)
// ✅ Token muss auch wissen, wie der Trashname heißt
// (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json)
// move mit retry (Windows file-lock robust)
if err := renameWithRetry(target, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ last.json schreiben: nur dieser Token ist gültig
type trashMeta struct {
Token string `json:"token"` // exakt der Query-Token (encoded)
TrashName string `json:"trashName"` // Dateiname in .trash
RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs
File string `json:"file"` // originaler Name (basename)
DeletedAt int64 `json:"deletedAt"`
}
meta := trashMeta{
Token: tok,
TrashName: trashName,
RelDir: relDir,
File: file,
DeletedAt: time.Now().Unix(),
}
b, _ := json.Marshal(meta)
_ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644)
// Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich)
purgeDurationCacheForPath(target)
removeJobsByOutputBasename(file)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"from": from, // "done" | "keep"
"undoToken": tok, // ✅ für Undo
"trashed": true,
})
}
func recordRestoreVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("token"))
if raw == "" {
http.Error(w, "token fehlt", http.StatusBadRequest)
return
}
// ✅ doneDir auflösen
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Single-slot: last.json lesen und Token strikt validieren
trashDir := filepath.Join(doneAbs, ".trash")
metaPath := filepath.Join(trashDir, "last.json")
b, err := os.ReadFile(metaPath)
if err != nil {
http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound)
return
}
var meta struct {
Token string `json:"token"`
TrashName string `json:"trashName"`
RelDir string `json:"relDir"`
File string `json:"file"`
DeletedAt int64 `json:"deletedAt"`
}
if err := json.Unmarshal(b, &meta); err != nil {
http.Error(w, "trash meta ungültig", http.StatusInternalServerError)
return
}
if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" {
http.Error(w, "trash meta unvollständig", http.StatusInternalServerError)
return
}
// ✅ Nur der letzte Token ist gültig
if raw != meta.Token {
http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound)
return
}
// ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json)
tok, err := decodeUndoDeleteToken(raw)
if err != nil {
http.Error(w, "token ungültig", http.StatusBadRequest)
return
}
// ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden
if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) {
http.Error(w, "token inhalt ungültig", http.StatusBadRequest)
return
}
// ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll)
if tok.File != meta.File || tok.RelDir != meta.RelDir {
http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound)
return
}
ext := strings.ToLower(filepath.Ext(meta.File))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
// Quelle: exakt die zuletzt gelöschte Datei
src := filepath.Join(trashDir, meta.TrashName)
// Zielordner rekonstruieren (relativ zu doneAbs)
rel := meta.RelDir
if rel == "." {
rel = ""
}
dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel))
dstDirClean := filepath.Clean(dstDir)
doneClean := filepath.Clean(doneAbs)
// safety: dstDir muss innerhalb doneAbs liegen
if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) &&
!strings.EqualFold(dstDirClean, doneClean) {
http.Error(w, "zielpfad ungültig", http.StatusBadRequest)
return
}
if err := os.MkdirAll(dstDirClean, 0o755); err != nil {
http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDirClean, meta.File)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Restore soll im Card-Stack "oben" erscheinen (Sort: completed_desc)
// Dafür ModTime auf "jetzt" setzen, weil buildDoneIndex() endedAt aus fi.ModTime() nimmt.
now := time.Now()
_ = os.Chtimes(dst, now, now) // best-effort
// ✅ Optional: Trash leeren, damit Token danach definitiv tot ist
_ = os.RemoveAll(trashDir)
_ = os.MkdirAll(trashDir, 0o755)
purgeDurationCacheForPath(src) // falls src noch irgendwo gecacht wäre (optional)
purgeDurationCacheForPath(dst) // optional
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": meta.File,
"restoredFile": filepath.Base(dst), // kann __dup enthalten
})
}
func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
if !isSafeBasename(file) {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// Quelle muss in keep (root oder keep/<subdir>) liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if from != "keep" {
http.Error(w, "datei ist nicht in keep", http.StatusConflict)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// Ziel: zurück nach done/ (flach, ohne model-subdirs)
dstDir := doneAbs
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "unkeep fehlgeschlagen: "+file, http.StatusInternalServerError)
return
}
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": filepath.Base(dst),
})
}
func recordKeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
keepRoot := filepath.Join(doneAbs, "keep")
if err := os.MkdirAll(keepRoot, 0o755); err != nil {
http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ 0) Wenn schon irgendwo in keep (root oder keep/<subdir>) existiert:
// - wenn im keep-root: jetzt nach keep/<model>/ nachziehen
if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok {
// p liegt entweder in keepRoot oder keepRoot/<subdir>
if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) {
// im Root => versuchen einzusortieren
modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */)
modelKey = sanitizeModelKey(modelKey)
// Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename:
if modelKey == "" {
stem := strings.TrimSuffix(file, filepath.Ext(file))
modelKey = sanitizeModelKey(modelNameFromFilename(stem))
}
if modelKey != "" {
dstDir := filepath.Join(keepRoot, modelKey)
if err := os.MkdirAll(dstDir, 0o755); err == nil {
dst, derr := uniqueDestPath(dstDir, file)
if derr == nil {
// best-effort move
_ = renameWithRetry(p, dst)
}
}
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": true,
})
return
}
// ✅ 1) Quelle in done (root oder done/<subdir>), aber NICHT aus keep
src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep")
if !ok {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi == nil || fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ 2) Ziel: keep/<model>/file
modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs)
dstDir := keepRoot
if modelKey != "" {
dstDir = filepath.Join(keepRoot, modelKey)
}
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
// rename mit retry (Windows file-lock)
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "keep fehlgeschlagen: "+file, http.StatusInternalServerError)
return
}
notifyDoneChanged()
// ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ...
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": false,
"newFile": filepath.Base(dst), // ✅ NEU
})
}
func recordToggleHot(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Quelle kann in done/, done/<subdir>, keep/, keep/<subdir> liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner
// toggle: HOT Prefix
newFile := file
if strings.HasPrefix(file, "HOT ") {
newFile = strings.TrimPrefix(file, "HOT ")
} else {
newFile = "HOT " + file
}
dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep)
if _, err := os.Stat(dst); err == nil {
http.Error(w, "ziel existiert bereits", http.StatusConflict)
return
} else if !os.IsNotExist(err) {
http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ KEIN generated-rename!
// Assets bleiben canonical (ohne HOT)
canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file)))
renameJobsOutputBasename(file, newFile)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": newFile,
"canonicalID": canonicalID,
"from": from, // "done" | "keep"
})
}