nsfwapp/backend/record_handlers.go
2026-02-09 12:29:19 +01:00

2047 lines
53 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// backend\record_handlers.go
package main
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
type RecordRequest struct {
URL string `json:"url"`
Cookie string `json:"cookie,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Hidden bool `json:"hidden,omitempty"`
}
type doneListResponse struct {
Items []*RecordJob `json:"items"`
TotalCount int `json:"totalCount"`
Page int `json:"page,omitempty"`
PageSize int `json:"pageSize,omitempty"`
}
type doneMetaResp struct {
Count int `json:"count"`
}
type durationReq struct {
Files []string `json:"files"`
}
type durationItem struct {
File string `json:"file"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
Error string `json:"error,omitempty"`
}
type undoDeleteToken struct {
Trash string `json:"trash"` // basename in .trash
RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model"
File string `json:"file"` // original basename, z.B. "HOT xyz.mp4"
}
func encodeUndoDeleteToken(t undoDeleteToken) (string, error) {
b, err := json.Marshal(t)
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(b), nil
}
func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) {
var t undoDeleteToken
b, err := base64.RawURLEncoding.DecodeString(raw)
if err != nil {
return t, err
}
if err := json.Unmarshal(b, &t); err != nil {
return t, err
}
return t, nil
}
func isSafeRelDir(rel string) bool {
rel = strings.TrimSpace(rel)
if rel == "" {
return false
}
// normalize to slash for validation
rel = filepath.ToSlash(rel)
if strings.HasPrefix(rel, "/") {
return false
}
clean := path.Clean(rel) // path.Clean => forward slashes
if clean == "." {
return true
}
if strings.HasPrefix(clean, "../") || clean == ".." {
return false
}
// prevent weird traversal
if strings.Contains(clean, `\`) {
return false
}
return true
}
func isSafeBasename(name string) bool {
name = strings.TrimSpace(name)
if name == "" {
return false
}
if strings.Contains(name, "/") || strings.Contains(name, "\\") {
return false
}
return filepath.Base(name) == name
}
func recordList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
// ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht
if j == nil || j.Hidden {
continue
}
list = append(list, j)
}
jobsMu.Unlock()
// optional: neueste zuerst
sort.Slice(list, func(i, j int) bool {
return list[i].StartedAt.After(list[j].StartedAt)
})
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(list)
}
func writeSSE(w http.ResponseWriter, data []byte) {
// SSE spec: jede Zeile mit "data:" prefixen
s := strings.ReplaceAll(string(data), "\r\n", "\n")
lines := strings.Split(s, "\n")
for _, line := range lines {
fmt.Fprintf(w, "data: %s\n", line)
}
fmt.Fprint(w, "\n")
}
func handleDoneStream(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
return
}
ch := make(chan []byte, 16)
doneHub.add(ch)
defer doneHub.remove(ch)
// optional: initial ping/hello, damit Client sofort "lebt"
fmt.Fprintf(w, "event: doneChanged\ndata: {\"type\":\"doneChanged\",\"seq\":%d,\"ts\":%d}\n\n",
atomic.LoadUint64(&doneSeq), time.Now().UnixMilli())
flusher.Flush()
ctx := r.Context()
for {
select {
case <-ctx.Done():
return
case b := <-ch:
// wichtig: event-name setzen -> Client kann addEventListener("doneChanged", ...)
fmt.Fprintf(w, "event: doneChanged\ndata: %s\n\n", b)
flusher.Flush()
}
}
}
func handleRecordVideo(w http.ResponseWriter, r *http.Request) {
recordVideo(w, r)
}
func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
var req RecordRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
job, err := startRecordingInternal(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
func recordVideo(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin != "" {
// ✅ dev origin erlauben (oder "*" wenns dir egal ist)
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Vary", "Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Range")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges")
}
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
// ✅ einmal lesen (für beide Zweige) + normalisieren
q := strings.TrimSpace(r.URL.Query().Get("quality"))
if strings.EqualFold(q, "auto") {
q = ""
}
if q != "" {
// früh validieren (liefert sauberen 400 statt später 500)
if _, ok := profileFromQuality(q); !ok {
http.Error(w, "ungültige quality", http.StatusBadRequest)
return
}
}
fmt.Println("[recordVideo] quality="+q, "file="+r.URL.Query().Get("file"), "id="+r.URL.Query().Get("id"))
// ✅ Wiedergabe über Dateiname (für doneDir / recordDir)
if raw := strings.TrimSpace(r.URL.Query().Get("file")); raw != "" {
// explizit decoden (zur Sicherheit)
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// kein Pfad, keine Backslashes, kein Traversal
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
recordAbs, err := resolvePathRelativeToApp(s.RecordDir)
if err != nil {
http.Error(w, "recordDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"),
// dann keep (inkl. 1 Level Subdir), dann recordDir
names := []string{file}
// Falls UI noch ".ts" kennt, die Datei aber schon als ".mp4" existiert:
if ext == ".ts" {
mp4File := strings.TrimSuffix(file, ext) + ".mp4"
names = append(names, mp4File)
}
var outPath string
for _, name := range names {
// done root + done/<subdir>/ (skip "keep")
if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok {
outPath = p
break
}
// keep root + keep/<subdir>/
if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok {
outPath = p
break
}
// record root (+ optional 1 Level Subdir)
if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok {
outPath = p
break
}
}
if outPath == "" {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
outPath = filepath.Clean(strings.TrimSpace(outPath))
// 1) ✅ TS -> MP4 (on-demand remux)
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(newOut) == "" {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError)
return
}
outPath = filepath.Clean(strings.TrimSpace(newOut))
// sicherstellen, dass wirklich eine MP4 existiert
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" {
http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError)
return
}
}
// ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern
if strings.ToLower(filepath.Ext(outPath)) == ".mp4" {
kind, _ := sniffVideoKind(outPath)
switch kind {
case "ts":
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
outPath = filepath.Clean(strings.TrimSpace(newOut))
case "html":
http.Error(w, "Server liefert HTML statt Video (Pfad/Lookup prüfen)", http.StatusInternalServerError)
return
}
}
// 2) ✅ MP4 -> Quality Transcode (on-demand)
w.Header().Set("Cache-Control", "no-store")
stream := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("stream")))
wantStream := stream == "1" || stream == "true" || stream == "yes"
if q != "" && wantStream {
prof, _ := profileFromQuality(q)
// ⚠️ Streaming-Transcode: startet Playback bevor fertig
if err := serveTranscodedStream(r.Context(), w, outPath, prof); err != nil {
http.Error(w, "transcode stream failed: "+err.Error(), http.StatusInternalServerError)
return
}
return
}
if q != "" {
var terr error
outPath, terr = maybeTranscodeForRequest(r.Context(), outPath, q)
if terr != nil {
http.Error(w, "transcode failed: "+terr.Error(), http.StatusInternalServerError)
return
}
}
serveVideoFile(w, r, outPath)
return
}
// ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert)
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
outPath := filepath.Clean(strings.TrimSpace(job.Output))
if outPath == "" {
http.Error(w, "output fehlt", http.StatusNotFound)
return
}
if !filepath.IsAbs(outPath) {
abs, err := resolvePathRelativeToApp(outPath)
if err != nil {
http.Error(w, "pfad auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
outPath = abs
}
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
// 1) ✅ TS -> MP4 (on-demand remux)
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "TS Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(newOut) == "" {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError)
return
}
outPath = filepath.Clean(strings.TrimSpace(newOut))
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" {
http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError)
return
}
}
// 2) ✅ MP4 -> Quality Transcode (on-demand)
w.Header().Set("Cache-Control", "no-store")
if q != "" {
var terr error
outPath, terr = maybeTranscodeForRequest(r.Context(), outPath, q)
if terr != nil {
http.Error(w, "transcode failed: "+terr.Error(), http.StatusInternalServerError)
return
}
}
serveVideoFile(w, r, outPath)
}
type flushWriter struct {
w http.ResponseWriter
f http.Flusher
}
func (fw flushWriter) Write(p []byte) (int, error) {
n, err := fw.w.Write(p)
if fw.f != nil {
fw.f.Flush()
}
return n, err
}
func serveTranscodedStream(ctx context.Context, w http.ResponseWriter, inPath string, prof TranscodeProfile) error {
if err := ensureFFmpegAvailable(); err != nil {
return err
}
// Header vor dem ersten Write setzen
w.Header().Set("Content-Type", "video/mp4")
w.Header().Set("Cache-Control", "no-store")
// Range macht bei Pipe-Streaming i.d.R. keinen Sinn:
w.Header().Set("Accept-Ranges", "none")
args := buildFFmpegStreamArgs(inPath, prof)
cmd := exec.CommandContext(ctx, "ffmpeg", args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return err
}
defer func() { _ = stdout.Close() }()
flusher, _ := w.(http.Flusher)
fw := flushWriter{w: w, f: flusher}
buf := make([]byte, 64*1024)
_, copyErr := io.CopyBuffer(fw, stdout, buf)
waitErr := cmd.Wait()
// Wenn Client abbricht, ist ctx meist canceled -> nicht als "echter" Fehler behandeln
if ctx.Err() != nil {
return ctx.Err()
}
if copyErr != nil {
return fmt.Errorf("stream copy failed: %w", copyErr)
}
if waitErr != nil {
return fmt.Errorf("ffmpeg failed: %w (stderr=%s)", waitErr, strings.TrimSpace(stderr.String()))
}
return nil
}
func recordStatus(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
json.NewEncoder(w).Encode(job)
}
func recordStop(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
id := r.URL.Query().Get("id")
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
stopJobsInternal([]*RecordJob{job})
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
func buildDoneIndex(doneAbs string) ([]doneIndexItem, map[string][]int) {
items := make([]doneIndexItem, 0, 2048)
sortedIdx := make(map[string][]int)
isTrashPath := func(full string) bool {
p := strings.ToLower(filepath.ToSlash(strings.TrimSpace(full)))
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
addFile := func(full string, fi os.FileInfo) {
if fi == nil || fi.IsDir() || fi.Size() == 0 {
return
}
if isTrashPath(full) {
return
}
name := filepath.Base(full)
ext := strings.ToLower(filepath.Ext(name))
if ext != ".mp4" && ext != ".ts" {
return
}
// keep?
p := strings.ToLower(filepath.ToSlash(full))
fromKeep := strings.Contains(p, "/keep/")
// started/ended
t := fi.ModTime()
start := t
base := strings.TrimSuffix(name, filepath.Ext(name))
stem := strings.TrimPrefix(base, "HOT ")
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
mm, _ := strconv.Atoi(m[2])
dd, _ := strconv.Atoi(m[3])
yy, _ := strconv.Atoi(m[4])
hh, _ := strconv.Atoi(m[5])
mi, _ := strconv.Atoi(m[6])
ss, _ := strconv.Atoi(m[7])
start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local)
}
// modelKey (lower) nutze deine bestehende Logik
mk := strings.ToLower(strings.TrimSpace(modelKeyFromFilenameOrPath(name, full, doneAbs)))
if mk == "" {
// fallback: parent dir (skip keep)
parent := strings.ToLower(strings.TrimSpace(filepath.Base(filepath.Dir(full))))
if parent != "" && parent != "keep" {
mk = parent
}
}
// fileSort (hot-prefix raus)
fs := strings.ToLower(name)
fs = strings.TrimPrefix(fs, "hot ")
// duration + srcURL (wie bei dir: meta.json, dann cache-only)
dur := 0.0
srcURL := ""
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full)))
if strings.TrimSpace(id) != "" {
if mp, err := generatedMetaFile(id); err == nil {
if d, ok := readVideoMetaDuration(mp, fi); ok {
dur = d
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok {
srcURL = u
}
}
}
if dur <= 0 {
dur = durationSecondsCacheOnly(full, fi)
}
ended := t
items = append(items, doneIndexItem{
job: &RecordJob{
ID: base,
Output: full,
SourceURL: srcURL,
Status: JobFinished,
StartedAt: start,
EndedAt: &ended,
DurationSeconds: dur,
SizeBytes: fi.Size(),
},
endedAt: ended,
fileSort: fs,
fromKeep: fromKeep,
modelKey: mk,
})
}
scanDir := func(dir string, skipKeep bool) {
entries, err := os.ReadDir(dir)
if err != nil {
return
}
for _, e := range entries {
if e.IsDir() {
if strings.EqualFold(e.Name(), ".trash") {
continue
}
if skipKeep && e.Name() == "keep" {
continue
}
sub := filepath.Join(dir, e.Name())
subs, err := os.ReadDir(sub)
if err != nil {
continue
}
for _, se := range subs {
if se.IsDir() {
continue
}
full := filepath.Join(sub, se.Name())
fi, err := os.Stat(full)
if err != nil {
continue
}
addFile(full, fi)
}
continue
}
full := filepath.Join(dir, e.Name())
fi, err := os.Stat(full)
if err != nil {
continue
}
addFile(full, fi)
}
}
// done (ohne keep)
scanDir(doneAbs, true)
// keep (optional im Index, damit includeKeep schnell ist)
scanDir(filepath.Join(doneAbs, "keep"), false)
// Pre-sort für häufigen Fall: includeKeep true/false und die Sort-Modes
// (nur wenn KEIN model-Filter genutzt wird)
mkSorted := func(includeKeep bool, sortMode string) []int {
idx := make([]int, 0, len(items))
for i := range items {
if !includeKeep && items[i].fromKeep {
continue
}
idx = append(idx, i)
}
durationForSort := func(it doneIndexItem) (float64, bool) {
if it.job.DurationSeconds > 0 {
return it.job.DurationSeconds, true
}
return 0, false
}
sort.Slice(idx, func(a, b int) bool {
A := items[idx[a]]
B := items[idx[b]]
ta, tb := A.endedAt, B.endedAt
switch sortMode {
case "completed_asc":
if !ta.Equal(tb) {
return ta.Before(tb)
}
return A.fileSort < B.fileSort
case "completed_desc":
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "file_asc":
if A.fileSort != B.fileSort {
return A.fileSort < B.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "file_desc":
if A.fileSort != B.fileSort {
return A.fileSort > B.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "duration_asc":
da, okA := durationForSort(A)
db, okB := durationForSort(B)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da < db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "duration_desc":
da, okA := durationForSort(A)
db, okB := durationForSort(B)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da > db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "size_asc":
if A.job.SizeBytes != B.job.SizeBytes {
return A.job.SizeBytes < B.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
case "size_desc":
if A.job.SizeBytes != B.job.SizeBytes {
return A.job.SizeBytes > B.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
default:
if !ta.Equal(tb) {
return ta.After(tb)
}
return A.fileSort < B.fileSort
}
})
return idx
}
modes := []string{
"completed_desc", "completed_asc",
"file_asc", "file_desc",
"duration_asc", "duration_desc",
"size_asc", "size_desc",
}
for _, m := range modes {
sortedIdx["0|"+m] = mkSorted(false, m)
sortedIdx["1|"+m] = mkSorted(true, m)
}
return items, sortedIdx
}
// ⬆️ Ergänze im Import-Block (falls noch nicht drin):
// import "sync"
type doneIndexItem struct {
job *RecordJob
endedAt time.Time
fileSort string
fromKeep bool
modelKey string // lower
}
type doneIndexCache struct {
mu sync.Mutex
builtAt time.Time
seq uint64
doneAbs string
items []doneIndexItem
sortedIdx map[string][]int // key: "<includeKeep 0/1>|<sortMode>"
}
var doneCache doneIndexCache
func recordDoneList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
// ✅ optional: auch /done/keep/ einbeziehen (Standard: false)
qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep")))
includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes"
// ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll)
normalizeQueryModel := func(raw string) string {
s := strings.TrimSpace(raw)
if s == "" {
return ""
}
s = strings.TrimPrefix(s, "http://")
s = strings.TrimPrefix(s, "https://")
// letzter URL-Segment, falls jemand "…/modelname" übergibt
if strings.Contains(s, "/") {
parts := strings.Split(s, "/")
for i := len(parts) - 1; i >= 0; i-- {
p := strings.TrimSpace(parts[i])
if p != "" {
s = p
break
}
}
}
// falls "host:model" übergeben wird
if strings.Contains(s, ":") {
parts := strings.Split(s, ":")
s = strings.TrimSpace(parts[len(parts)-1])
}
s = strings.TrimPrefix(s, "@")
return strings.ToLower(strings.TrimSpace(s))
}
qModel := normalizeQueryModel(r.URL.Query().Get("model"))
// optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste
page := 0
pageSize := 0
if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
page = n
}
}
if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
pageSize = n
}
}
// optional: Sort
// supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc)
sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort")))
if sortMode == "" {
sortMode = "completed_desc"
}
// ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen
if sortMode == "model_asc" {
sortMode = "file_asc"
}
if sortMode == "model_desc" {
sortMode = "file_desc"
}
// ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren)
qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all")))
fetchAll := qAll == "1" || qAll == "true" || qAll == "yes"
if fetchAll {
page = 0
pageSize = 0
}
// ✅ optional: count mitsenden
qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount")))
withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes"
// ✅ .trash niemals als "done item" zählen/listen
isTrashOutput := func(p string) bool {
pp := strings.ToLower(filepath.ToSlash(strings.TrimSpace(p)))
return strings.Contains(pp, "/.trash/") || strings.HasSuffix(pp, "/.trash")
}
isTrashPath := func(full string) bool {
p := strings.ReplaceAll(full, "\\", "/")
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
// --- helpers (ModelKey aus Filename/Dir ableiten) ---
modelFromStem := func(stem string) string {
// stem: lower, ohne ext, ohne HOT
if stem == "" {
return ""
}
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
return strings.ToLower(strings.TrimSpace(m[1]))
}
// fallback: alles vor letztem "_" (oder kompletter stem)
if i := strings.LastIndex(stem, "_"); i > 0 {
return strings.ToLower(strings.TrimSpace(stem[:i]))
}
return strings.ToLower(strings.TrimSpace(stem))
}
modelFromFullPath := func(full string) string {
name := strings.ToLower(filepath.Base(full))
stem := strings.TrimSuffix(name, filepath.Ext(name))
stem = strings.TrimPrefix(stem, "hot ")
mk := modelFromStem(stem)
// fallback: wenn Dateiname nichts taugt, aus Ordner nehmen (/done/<model>/file)
if mk == "" {
parent := strings.ToLower(filepath.Base(filepath.Dir(full)))
parent = strings.TrimSpace(parent)
if parent != "" && parent != "keep" {
mk = parent
}
}
return mk
}
// helpers (Sort)
fileForSortName := func(filename string) string {
f := strings.ToLower(filename)
f = strings.TrimPrefix(f, "hot ")
return f
}
durationForSort := func(j *RecordJob) (sec float64, ok bool) {
if j.DurationSeconds > 0 {
return j.DurationSeconds, true
}
return 0, false
}
compareIdx := func(items []doneIndexItem, sortMode string, ia, ib int) bool {
a := items[ia]
b := items[ib]
ta, tb := a.endedAt, b.endedAt
switch sortMode {
case "completed_asc":
if !ta.Equal(tb) {
return ta.Before(tb)
}
return a.fileSort < b.fileSort
case "completed_desc":
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "file_asc":
if a.fileSort != b.fileSort {
return a.fileSort < b.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "file_desc":
if a.fileSort != b.fileSort {
return a.fileSort > b.fileSort
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "duration_asc":
da, okA := durationForSort(a.job)
db, okB := durationForSort(b.job)
if okA != okB {
return okA // unknown nach hinten
}
if okA && okB && da != db {
return da < db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "duration_desc":
da, okA := durationForSort(a.job)
db, okB := durationForSort(b.job)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da > db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "size_asc":
if a.job.SizeBytes != b.job.SizeBytes {
return a.job.SizeBytes < b.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
case "size_desc":
if a.job.SizeBytes != b.job.SizeBytes {
return a.job.SizeBytes > b.job.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
default:
if !ta.Equal(tb) {
return ta.After(tb)
}
return a.fileSort < b.fileSort
}
}
// --- resolve done path ---
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben
if strings.TrimSpace(doneAbs) == "" {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: []*RecordJob{},
TotalCount: 0,
Page: page,
PageSize: pageSize,
})
return
}
// --------- Cache rebuild (nur bei doneSeq-Change oder TTL) ---------
buildDoneIndex := func(doneAbs string) ([]doneIndexItem, map[string][]int) {
items := make([]doneIndexItem, 0, 2048)
addFile := func(full string, fi os.FileInfo, fromKeep bool) {
if fi == nil || fi.IsDir() || fi.Size() == 0 {
return
}
// ✅ .trash niemals zählen / zurückgeben
if isTrashPath(full) || isTrashOutput(full) {
return
}
name := filepath.Base(full)
ext := strings.ToLower(filepath.Ext(name))
if ext != ".mp4" && ext != ".ts" {
return
}
base := strings.TrimSuffix(name, filepath.Ext(name))
t := fi.ModTime()
// StartedAt aus Dateiname (Fallback: ModTime)
start := t
stem := base
if strings.HasPrefix(stem, "HOT ") {
stem = strings.TrimPrefix(stem, "HOT ")
}
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
mm, _ := strconv.Atoi(m[2])
dd, _ := strconv.Atoi(m[3])
yy, _ := strconv.Atoi(m[4])
hh, _ := strconv.Atoi(m[5])
mi, _ := strconv.Atoi(m[6])
ss, _ := strconv.Atoi(m[7])
start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local)
}
dur := 0.0
srcURL := ""
// 1) meta.json aus generated/<id>/meta.json lesen (schnell)
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full)))
if strings.TrimSpace(id) != "" {
if mp, err := generatedMetaFile(id); err == nil {
if d, ok := readVideoMetaDuration(mp, fi); ok {
dur = d
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok {
srcURL = u
}
}
}
// 2) Fallback: RAM-Cache only (immer noch schnell, kein ffprobe)
if dur <= 0 {
dur = durationSecondsCacheOnly(full, fi)
}
ended := t
mk := modelFromFullPath(full)
fs := fileForSortName(name)
items = append(items, doneIndexItem{
job: &RecordJob{
ID: base,
Output: full,
SourceURL: srcURL,
Status: JobFinished,
StartedAt: start,
EndedAt: &ended,
DurationSeconds: dur,
SizeBytes: fi.Size(),
},
endedAt: ended,
fileSort: fs,
fromKeep: fromKeep,
modelKey: mk,
})
}
// scan one level: doneAbs + doneAbs/<sub>/*
scanRoot := func(root string, fromKeep bool, skipKeepDir bool) {
entries, err := os.ReadDir(root)
if err != nil {
return
}
for _, e := range entries {
if e.IsDir() {
// ✅ .trash Ordner niemals scannen
if strings.EqualFold(e.Name(), ".trash") {
continue
}
// ✅ keep nicht doppelt scannen (wenn root==doneAbs)
if skipKeepDir && e.Name() == "keep" {
continue
}
sub := filepath.Join(root, e.Name())
subEntries, err := os.ReadDir(sub)
if err != nil {
continue
}
for _, se := range subEntries {
if se.IsDir() {
continue
}
full := filepath.Join(sub, se.Name())
fi, err := se.Info()
if err != nil {
// fallback
fi2, err2 := os.Stat(full)
if err2 != nil {
continue
}
fi = fi2
}
addFile(full, fi, fromKeep)
}
continue
}
full := filepath.Join(root, e.Name())
fi, err := e.Info()
if err != nil {
fi2, err2 := os.Stat(full)
if err2 != nil {
continue
}
fi = fi2
}
addFile(full, fi, fromKeep)
}
}
// doneAbs ohne keep
scanRoot(doneAbs, false, true)
// keep (wenn existiert)
scanRoot(filepath.Join(doneAbs, "keep"), true, false)
// pre-sorted indices: includeKeep 0/1 und pro sortMode
sorted := make(map[string][]int)
buildSorted := func(inc bool, mode string) []int {
idx := make([]int, 0, len(items))
for i := range items {
if !inc && items[i].fromKeep {
continue
}
idx = append(idx, i)
}
sort.Slice(idx, func(a, b int) bool {
return compareIdx(items, mode, idx[a], idx[b])
})
return idx
}
modes := []string{
"completed_desc", "completed_asc",
"file_asc", "file_desc",
"duration_asc", "duration_desc",
"size_asc", "size_desc",
}
for _, m := range modes {
sorted["0|"+m] = buildSorted(false, m)
sorted["1|"+m] = buildSorted(true, m)
}
return items, sorted
}
// rebuild wenn doneSeq geändert oder TTL
curSeq := atomic.LoadUint64(&doneSeq)
now := time.Now()
doneCache.mu.Lock()
needRebuild := doneCache.seq != curSeq ||
doneCache.doneAbs != doneAbs ||
now.Sub(doneCache.builtAt) > 30*time.Second
if needRebuild {
// Wenn doneAbs nicht existiert: leere Daten im Cache
if _, err := os.Stat(doneAbs); err != nil && os.IsNotExist(err) {
doneCache.items = nil
doneCache.sortedIdx = map[string][]int{
"0|completed_desc": {},
"1|completed_desc": {},
}
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
} else {
items, sorted := buildDoneIndex(doneAbs)
doneCache.items = items
doneCache.sortedIdx = sorted
doneCache.seq = curSeq
doneCache.doneAbs = doneAbs
doneCache.builtAt = now
}
}
items := doneCache.items
sortedAll := doneCache.sortedIdx
doneCache.mu.Unlock()
// --------- Request-spezifische Auswahl (Model-Filter, includeKeep, sort, paging) ---------
incKey := "0"
if includeKeep {
incKey = "1"
}
// idx enthält indices in items
var idx []int
if qModel == "" {
idx = sortedAll[incKey+"|"+sortMode]
if idx == nil {
// fallback
idx = sortedAll[incKey+"|completed_desc"]
if idx == nil {
idx = make([]int, 0)
}
}
} else {
// Model-Filter: nur Teilmenge, dann sortieren
idx = make([]int, 0, 256)
for i := range items {
if !includeKeep && items[i].fromKeep {
continue
}
if items[i].modelKey == qModel {
idx = append(idx, i)
}
}
sort.Slice(idx, func(a, b int) bool {
return compareIdx(items, sortMode, idx[a], idx[b])
})
}
totalCount := len(idx)
// Pagination anwenden (nur auf idx)
start := 0
end := totalCount
if pageSize > 0 && !fetchAll {
if page <= 0 {
page = 1
}
start = (page - 1) * pageSize
if start < 0 {
start = 0
}
if start >= totalCount {
start = totalCount
}
end = start + pageSize
if end > totalCount {
end = totalCount
}
}
// Response jobs bauen
out := make([]*RecordJob, 0, max(0, end-start))
for _, i := range idx[start:end] {
out = append(out, items[i].job)
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
// ✅ Wenn Frontend "withCount=1" nutzt: {count, items}
if withCount {
_ = json.NewEncoder(w).Encode(map[string]any{
"count": totalCount,
"items": out,
})
return
}
// ✅ Standard-Response: immer auch totalCount mitsenden
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: out,
TotalCount: totalCount,
Page: page,
PageSize: pageSize,
})
}
// mini helper, falls du keinen hast
func max(a, b int) int {
if a > b {
return a
}
return b
}
func recordDeleteVideo(w http.ResponseWriter, r *http.Request) {
// Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE
if r.Method != http.MethodPost && r.Method != http.MethodDelete {
http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
// sicher decoden
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben (keine Unterordner, kein Traversal)
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ done + done/<subdir> sowie keep + keep/<subdir>
target, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben
trashDir := filepath.Join(doneAbs, ".trash")
// ✅ Wenn im Single-slot Trash schon was liegt: ID merken,
// aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde.
prevBase := ""
prevCanonical := ""
if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 {
var prev struct {
File string `json:"file"`
}
if err := json.Unmarshal(b, &prev); err == nil {
prevFile := strings.TrimSpace(prev.File)
if prevFile != "" {
prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile))
prevCanonical = stripHotPrefix(prevBase)
}
}
}
// Trash komplett leeren => ältere Undos sind automatisch ungültig
// ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen.
if err := os.RemoveAll(trashDir); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta/<id>/ entfernen.
if prevCanonical != "" {
removeGeneratedForID(prevCanonical)
// Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind
if prevBase != "" && prevBase != prevCanonical {
removeGeneratedForID(prevBase)
}
}
if err := os.MkdirAll(trashDir, 0o755); err != nil {
http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Original-Dir relativ zu doneAbs merken (inkl. keep/<subdir> oder <subdir>)
origDir := filepath.Dir(target)
relDir, err := filepath.Rel(doneAbs, origDir)
if err != nil {
http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
relDir = filepath.ToSlash(relDir)
if strings.TrimSpace(relDir) == "" {
relDir = "."
}
// ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können
tok, err := encodeUndoDeleteToken(undoDeleteToken{
Trash: "", // setzen wir gleich (trashName)
RelDir: relDir, // hast du oben schon berechnet
File: file,
})
if err != nil {
http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
trashName := tok + "__" + file // eindeutig + Token sichtbar in filename
trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_")
dst := filepath.Join(trashDir, trashName)
// ✅ Token muss auch wissen, wie der Trashname heißt
// (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json)
// move mit retry (Windows file-lock robust)
if err := renameWithRetry(target, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ last.json schreiben: nur dieser Token ist gültig
type trashMeta struct {
Token string `json:"token"` // exakt der Query-Token (encoded)
TrashName string `json:"trashName"` // Dateiname in .trash
RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs
File string `json:"file"` // originaler Name (basename)
DeletedAt int64 `json:"deletedAt"`
}
meta := trashMeta{
Token: tok,
TrashName: trashName,
RelDir: relDir,
File: file,
DeletedAt: time.Now().Unix(),
}
b, _ := json.Marshal(meta)
_ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644)
// Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich)
purgeDurationCacheForPath(target)
removeJobsByOutputBasename(file)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"from": from, // "done" | "keep"
"undoToken": tok, // ✅ für Undo
"trashed": true,
})
}
func recordRestoreVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("token"))
if raw == "" {
http.Error(w, "token fehlt", http.StatusBadRequest)
return
}
// ✅ doneDir auflösen
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Single-slot: last.json lesen und Token strikt validieren
trashDir := filepath.Join(doneAbs, ".trash")
metaPath := filepath.Join(trashDir, "last.json")
b, err := os.ReadFile(metaPath)
if err != nil {
http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound)
return
}
var meta struct {
Token string `json:"token"`
TrashName string `json:"trashName"`
RelDir string `json:"relDir"`
File string `json:"file"`
DeletedAt int64 `json:"deletedAt"`
}
if err := json.Unmarshal(b, &meta); err != nil {
http.Error(w, "trash meta ungültig", http.StatusInternalServerError)
return
}
if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" {
http.Error(w, "trash meta unvollständig", http.StatusInternalServerError)
return
}
// ✅ Nur der letzte Token ist gültig
if raw != meta.Token {
http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound)
return
}
// ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json)
tok, err := decodeUndoDeleteToken(raw)
if err != nil {
http.Error(w, "token ungültig", http.StatusBadRequest)
return
}
// ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden
if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) {
http.Error(w, "token inhalt ungültig", http.StatusBadRequest)
return
}
// ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll)
if tok.File != meta.File || tok.RelDir != meta.RelDir {
http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound)
return
}
ext := strings.ToLower(filepath.Ext(meta.File))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
// Quelle: exakt die zuletzt gelöschte Datei
src := filepath.Join(trashDir, meta.TrashName)
// Zielordner rekonstruieren (relativ zu doneAbs)
rel := meta.RelDir
if rel == "." {
rel = ""
}
dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel))
dstDirClean := filepath.Clean(dstDir)
doneClean := filepath.Clean(doneAbs)
// safety: dstDir muss innerhalb doneAbs liegen
if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) &&
!strings.EqualFold(dstDirClean, doneClean) {
http.Error(w, "zielpfad ungültig", http.StatusBadRequest)
return
}
if err := os.MkdirAll(dstDirClean, 0o755); err != nil {
http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDirClean, meta.File)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Optional: Trash leeren, damit Token danach definitiv tot ist
_ = os.RemoveAll(trashDir)
_ = os.MkdirAll(trashDir, 0o755)
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": meta.File,
"restoredFile": filepath.Base(dst), // kann __dup enthalten
})
}
func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
if !isSafeBasename(file) {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// Quelle muss in keep (root oder keep/<subdir>) liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if from != "keep" {
http.Error(w, "datei ist nicht in keep", http.StatusConflict)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// Ziel: zurück nach done/ (flach, ohne model-subdirs)
dstDir := doneAbs
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "unkeep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": filepath.Base(dst),
})
}
func recordKeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
keepRoot := filepath.Join(doneAbs, "keep")
if err := os.MkdirAll(keepRoot, 0o755); err != nil {
http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ 0) Wenn schon irgendwo in keep (root oder keep/<subdir>) existiert:
// - wenn im keep-root: jetzt nach keep/<model>/ nachziehen
if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok {
// p liegt entweder in keepRoot oder keepRoot/<subdir>
if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) {
// im Root => versuchen einzusortieren
modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */)
modelKey = sanitizeModelKey(modelKey)
// Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename:
if modelKey == "" {
stem := strings.TrimSuffix(file, filepath.Ext(file))
modelKey = sanitizeModelKey(modelNameFromFilename(stem))
}
if modelKey != "" {
dstDir := filepath.Join(keepRoot, modelKey)
if err := os.MkdirAll(dstDir, 0o755); err == nil {
dst, derr := uniqueDestPath(dstDir, file)
if derr == nil {
// best-effort move
_ = renameWithRetry(p, dst)
}
}
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": true,
})
return
}
// ✅ 1) Quelle in done (root oder done/<subdir>), aber NICHT aus keep
src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep")
if !ok {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi == nil || fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ 2) Ziel: keep/<model>/file
modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs)
dstDir := keepRoot
if modelKey != "" {
dstDir = filepath.Join(keepRoot, modelKey)
}
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
// rename mit retry (Windows file-lock)
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "keep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
notifyDoneChanged()
// ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ...
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": false,
"newFile": filepath.Base(dst), // ✅ NEU
})
}
func recordToggleHot(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Quelle kann in done/, done/<subdir>, keep/, keep/<subdir> liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner
// toggle: HOT Prefix
newFile := file
if strings.HasPrefix(file, "HOT ") {
newFile = strings.TrimPrefix(file, "HOT ")
} else {
newFile = "HOT " + file
}
dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep)
if _, err := os.Stat(dst); err == nil {
http.Error(w, "ziel existiert bereits", http.StatusConflict)
return
} else if !os.IsNotExist(err) {
http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ KEIN generated-rename!
// Assets bleiben canonical (ohne HOT)
canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file)))
renameJobsOutputBasename(file, newFile)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": newFile,
"canonicalID": canonicalID,
"from": from, // "done" | "keep"
})
}