// backend\main.go package main import ( "bufio" "bytes" "context" "encoding/binary" "encoding/json" "errors" "fmt" "io" "math" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/grafov/m3u8" gocpu "github.com/shirou/gopsutil/v3/cpu" godisk "github.com/shirou/gopsutil/v3/disk" ) var roomDossierRegexp = regexp.MustCompile(`window\.initialRoomDossier = "(.*?)"`) type JobStatus string const ( JobRunning JobStatus = "running" JobPostwork JobStatus = "postwork" // ✅ NEU: Aufnahme vorbei, Nacharbeiten laufen noch JobFinished JobStatus = "finished" JobStopped JobStatus = "stopped" JobFailed JobStatus = "failed" ) type RecordJob struct { ID string `json:"id"` SourceURL string `json:"sourceUrl"` Output string `json:"output"` Status JobStatus `json:"status"` StartedAt time.Time `json:"startedAt"` EndedAt *time.Time `json:"endedAt,omitempty"` StartedAtMs int64 `json:"startedAtMs,omitempty"` EndedAtMs int64 `json:"endedAtMs,omitempty"` DurationSeconds float64 `json:"durationSeconds,omitempty"` SizeBytes int64 `json:"sizeBytes,omitempty"` VideoWidth int `json:"videoWidth,omitempty"` VideoHeight int `json:"videoHeight,omitempty"` FPS float64 `json:"fps,omitempty"` Meta *videoMeta `json:"meta,omitempty"` Hidden bool `json:"-"` Error string `json:"error,omitempty"` PreviewDir string `json:"-"` previewCmd *exec.Cmd `json:"-"` LiveThumbStarted bool `json:"-"` // ✅ Preview-Status (z.B. private/offline anhand ffmpeg HTTP Fehler) PreviewState string `json:"previewState,omitempty"` // "", "private", "offline", "error" PreviewStateAt string `json:"previewStateAt,omitempty"` // RFC3339Nano PreviewStateMsg string `json:"previewStateMsg,omitempty"` // kurze Info // Thumbnail cache (verhindert, dass pro HTTP-Request ffmpeg läuft) previewMu sync.Mutex `json:"-"` previewWebp []byte `json:"-"` previewWebpAt time.Time `json:"-"` previewGen bool `json:"-"` PreviewM3U8 string `json:"-"` // HLS url, die ffmpeg inputt PreviewCookie string `json:"-"` // Cookie header (falls nötig) PreviewUA string `json:"-"` // user-agent previewCancel context.CancelFunc `json:"-"` previewLastHit time.Time `json:"-"` previewStartMu sync.Mutex `json:"-"` // ✅ Frontend Progress beim Stop/Finalize Phase string `json:"phase,omitempty"` // stopping | remuxing | moving Progress int `json:"progress,omitempty"` // 0..100 PostWorkKey string `json:"postWorkKey,omitempty"` PostWork *PostWorkKeyStatus `json:"postWork,omitempty"` cancel context.CancelFunc `json:"-"` } type dummyResponseWriter struct { h http.Header } type ffprobeStreamInfo struct { Width int `json:"width"` Height int `json:"height"` AvgFrameRate string `json:"avg_frame_rate"` RFrameRate string `json:"r_frame_rate"` } type ffprobeInfo struct { Streams []ffprobeStreamInfo `json:"streams"` } func parseFFRate(s string) float64 { s = strings.TrimSpace(s) if s == "" || s == "0/0" { return 0 } // "30000/1001" if a, b, ok := strings.Cut(s, "/"); ok { num, err1 := strconv.ParseFloat(strings.TrimSpace(a), 64) den, err2 := strconv.ParseFloat(strings.TrimSpace(b), 64) if err1 == nil && err2 == nil && den != 0 { return num / den } return 0 } // "25" f, err := strconv.ParseFloat(s, 64) if err != nil { return 0 } return f } func probeVideoProps(ctx context.Context, filePath string) (w int, h int, fps float64, err error) { filePath = strings.TrimSpace(filePath) if filePath == "" { return 0, 0, 0, fmt.Errorf("empty path") } cmd := exec.CommandContext(ctx, ffprobePath, "-v", "error", "-select_streams", "v:0", "-show_entries", "stream=width,height,avg_frame_rate,r_frame_rate", "-of", "json", filePath, ) out, err := cmd.Output() if err != nil { return 0, 0, 0, err } var info ffprobeInfo if err := json.Unmarshal(out, &info); err != nil { return 0, 0, 0, err } if len(info.Streams) == 0 { return 0, 0, 0, fmt.Errorf("no video stream") } s := info.Streams[0] w, h = s.Width, s.Height // bevorzugt avg_frame_rate, fallback r_frame_rate fps = parseFFRate(s.AvgFrameRate) if fps <= 0 { fps = parseFFRate(s.RFrameRate) } return w, h, fps, nil } func metaJSONPathForAssetID(assetID string) (string, error) { root, err := generatedMetaRoot() if err != nil { return "", err } if strings.TrimSpace(root) == "" { return "", fmt.Errorf("generated/meta root leer") } return filepath.Join(root, assetID, "meta.json"), nil } func readVideoMetaIfValid(metaPath string, fi os.FileInfo) (*videoMeta, bool) { b, err := os.ReadFile(metaPath) if err != nil || len(b) == 0 { return nil, false } var m videoMeta if err := json.Unmarshal(b, &m); err != nil { return nil, false } // nur akzeptieren wenn Datei identisch (damit wir nicht stale Werte zeigen) if m.FileSize != fi.Size() || m.FileModUnix != fi.ModTime().Unix() { return nil, false } // Mindestvalidierung if m.DurationSeconds <= 0 { return nil, false } return &m, true } func ensureVideoMetaForFile(ctx context.Context, fullPath string, fi os.FileInfo, sourceURL string) (*videoMeta, bool) { // assetID aus Dateiname stem := strings.TrimSuffix(filepath.Base(fullPath), filepath.Ext(fullPath)) assetID := stripHotPrefix(strings.TrimSpace(stem)) if assetID == "" { return nil, false } // sanitize wie bei deinen generated Ordnern var err error assetID, err = sanitizeID(assetID) if err != nil || assetID == "" { return nil, false } metaPath, err := metaJSONPathForAssetID(assetID) if err != nil { return nil, false } // 1) valid meta vorhanden? if m, ok := readVideoMetaIfValid(metaPath, fi); ok { return m, true } // 2) sonst neu erzeugen (mit Concurrency-Limit) if ctx == nil { ctx = context.Background() } cctx, cancel := context.WithTimeout(ctx, 8*time.Second) defer cancel() if durSem != nil { if err := durSem.Acquire(cctx); err != nil { return nil, false } defer durSem.Release() } // Dauer dur, derr := durationSecondsCached(cctx, fullPath) if derr != nil || dur <= 0 { return nil, false } // Video props w, h, fps, perr := probeVideoProps(cctx, fullPath) if perr != nil { // width/height/fps dürfen 0 bleiben, duration ist aber trotzdem nützlich w, h, fps = 0, 0, 0 } // meta dir anlegen _ = os.MkdirAll(filepath.Dir(metaPath), 0o755) m := &videoMeta{ Version: 2, DurationSeconds: dur, FileSize: fi.Size(), FileModUnix: fi.ModTime().Unix(), VideoWidth: w, VideoHeight: h, FPS: fps, Resolution: formatResolution(w, h), SourceURL: strings.TrimSpace(sourceURL), UpdatedAtUnix: time.Now().Unix(), } b, _ := json.MarshalIndent(m, "", " ") b = append(b, '\n') _ = atomicWriteFile(metaPath, b) // best effort return m, true } func attachMetaToJobBestEffort(ctx context.Context, job *RecordJob, fullPath string) { if job == nil { return } fullPath = strings.TrimSpace(fullPath) if fullPath == "" { return } // Stat fi, err := os.Stat(fullPath) if err != nil || fi == nil || fi.IsDir() { return } // Größe immer mitgeben (macht Sort/Anzeige einfacher) if job.SizeBytes <= 0 { job.SizeBytes = fi.Size() } // Meta.json lesen/erzeugen (best effort) m, ok := ensureVideoMetaForFileBestEffort(ctx, fullPath, job.SourceURL) if !ok || m == nil { return } // Optional: komplettes Meta mitsenden job.Meta = m // Und zusätzlich die "Top-Level" Felder befüllen (für Frontend bequem) if job.DurationSeconds <= 0 && m.DurationSeconds > 0 { job.DurationSeconds = m.DurationSeconds } if job.VideoWidth <= 0 && m.VideoWidth > 0 { job.VideoWidth = m.VideoWidth } if job.VideoHeight <= 0 && m.VideoHeight > 0 { job.VideoHeight = m.VideoHeight } if job.FPS <= 0 && m.FPS > 0 { job.FPS = m.FPS } } // ensureVideoMetaForFileBestEffort: // - versucht zuerst echtes Generieren (ffprobe/ffmpeg) via ensureVideoMetaForFile // - wenn das fehlschlägt, aber durationSecondsCacheOnly schon was weiß: // schreibt eine Duration-only meta.json, damit wir künftig "aus meta.json" lesen können. func ensureVideoMetaForFileBestEffort(ctx context.Context, fullPath string, sourceURL string) (*videoMeta, bool) { fullPath = strings.TrimSpace(fullPath) if fullPath == "" { return nil, false } fi, err := os.Stat(fullPath) if err != nil || fi == nil || fi.IsDir() || fi.Size() <= 0 { return nil, false } // 1) Normaler Weg: meta erzeugen/lesen (ffprobe/ffmpeg) if m, ok := ensureVideoMetaForFile(ctx, fullPath, fi, sourceURL); ok && m != nil { return m, true } // 2) Fallback: wenn wir Duration schon im RAM-Cache haben -> meta.json (Duration-only) persistieren dur := durationSecondsCacheOnly(fullPath, fi) if dur <= 0 { return nil, false } stem := strings.TrimSuffix(filepath.Base(fullPath), filepath.Ext(fullPath)) assetID := stripHotPrefix(strings.TrimSpace(stem)) if assetID == "" { return nil, false } metaPath, err := metaJSONPathForAssetID(assetID) if err != nil || strings.TrimSpace(metaPath) == "" { return nil, false } _ = os.MkdirAll(filepath.Dir(metaPath), 0o755) _ = writeVideoMetaDuration(metaPath, fi, dur, sourceURL) // nochmal lesen/validieren if m, ok := readVideoMetaIfValid(metaPath, fi); ok && m != nil { return m, true } return nil, false } func (d *dummyResponseWriter) Header() http.Header { if d.h == nil { d.h = make(http.Header) } return d.h } func (d *dummyResponseWriter) Write(b []byte) (int, error) { return len(b), nil } func (d *dummyResponseWriter) WriteHeader(statusCode int) {} var ( jobs = map[string]*RecordJob{} jobsMu = sync.Mutex{} ) var serverStartedAt = time.Now() var lastCPUUsageBits uint64 // atomic float64 bits func setLastCPUUsage(v float64) { atomic.StoreUint64(&lastCPUUsageBits, math.Float64bits(v)) } func getLastCPUUsage() float64 { return math.Float64frombits(atomic.LoadUint64(&lastCPUUsageBits)) } func startPreviewIdleKiller() { t := time.NewTicker(5 * time.Second) go func() { defer t.Stop() for range t.C { jobsMu.Lock() list := make([]*RecordJob, 0, len(jobs)) for _, j := range jobs { if j != nil { list = append(list, j) } } jobsMu.Unlock() for _, j := range list { jobsMu.Lock() cmdRunning := j.previewCmd != nil last := j.previewLastHit st := j.Status jobsMu.Unlock() if !cmdRunning { continue } // wenn Job nicht mehr läuft oder Hover weg if st != JobRunning || (!last.IsZero() && time.Since(last) > 10*time.Minute) { stopPreview(j) } } } }() } func init() { initFFmpegSemaphores() startAdaptiveSemController(context.Background()) startPreviewIdleKiller() initSSE() } func publishJob(jobID string) bool { jobsMu.Lock() j := jobs[jobID] if j == nil || !j.Hidden { jobsMu.Unlock() return false } j.Hidden = false jobsMu.Unlock() notifyJobsChanged() return true } // ffmpeg-Binary suchen (env, neben EXE, oder PATH) var ffmpegPath = detectFFmpegPath() var ffprobePath = detectFFprobePath() func detectFFprobePath() string { // 1) Env-Override if p := strings.TrimSpace(os.Getenv("FFPROBE_PATH")); p != "" { if abs, err := filepath.Abs(p); err == nil { return abs } return p } // 2) Neben ffmpeg.exe (gleicher Ordner) fp := strings.TrimSpace(ffmpegPath) if fp != "" && fp != "ffmpeg" { dir := filepath.Dir(fp) ext := "" if strings.HasSuffix(strings.ToLower(fp), ".exe") { ext = ".exe" } c := filepath.Join(dir, "ffprobe"+ext) if fi, err := os.Stat(c); err == nil && !fi.IsDir() { return c } } // 3) Im EXE-Ordner if exe, err := os.Executable(); err == nil { exeDir := filepath.Dir(exe) candidates := []string{ filepath.Join(exeDir, "ffprobe"), filepath.Join(exeDir, "ffprobe.exe"), } for _, c := range candidates { if fi, err := os.Stat(c); err == nil && !fi.IsDir() { return c } } } // 4) PATH if lp, err := exec.LookPath("ffprobe"); err == nil { if abs, err2 := filepath.Abs(lp); err2 == nil { return abs } return lp } return "ffprobe" } // ---------- Dynamic Semaphore (resizeable by load controller) ---------- type DynSem struct { mu sync.Mutex in int max int cap int } func NewDynSem(initial, cap int) *DynSem { if cap < 1 { cap = 1 } if initial < 1 { initial = 1 } if initial > cap { initial = cap } return &DynSem{max: initial, cap: cap} } func (s *DynSem) Acquire(ctx context.Context) error { for { if ctx != nil && ctx.Err() != nil { return ctx.Err() } s.mu.Lock() if s.in < s.max { s.in++ s.mu.Unlock() return nil } s.mu.Unlock() time.Sleep(25 * time.Millisecond) } } func (s *DynSem) Release() { s.mu.Lock() if s.in > 0 { s.in-- } s.mu.Unlock() } func (s *DynSem) SetMax(n int) { if n < 1 { n = 1 } if n > s.cap { n = s.cap } s.mu.Lock() s.max = n s.mu.Unlock() } func (s *DynSem) Max() int { s.mu.Lock() defer s.mu.Unlock() return s.max } func (s *DynSem) Cap() int { s.mu.Lock() defer s.mu.Unlock() return s.cap } func (s *DynSem) InUse() int { s.mu.Lock() defer s.mu.Unlock() return s.in } var ( genSem *DynSem previewSem *DynSem thumbSem *DynSem durSem *DynSem ) func clamp(n, lo, hi int) int { if n < lo { return lo } if n > hi { return hi } return n } func envInt(name string) (int, bool) { v := strings.TrimSpace(os.Getenv(name)) if v == "" { return 0, false } n, err := strconv.Atoi(v) if err != nil { return 0, false } return n, true } func initFFmpegSemaphores() { cpu := runtime.NumCPU() if cpu <= 0 { cpu = 2 } // Defaults (heuristisch) previewN := clamp((cpu+1)/2, 1, 6) // x264 live -> konservativ thumbN := clamp(cpu, 2, 12) // Frames -> darf höher genN := clamp((cpu+3)/4, 1, 4) // preview.mp4 clips -> eher klein durN := clamp(cpu, 2, 16) // ffprobe: darf höher, aber nicht unbegrenzt // ENV Overrides (optional) if n, ok := envInt("PREVIEW_WORKERS"); ok { previewN = clamp(n, 1, 32) } if n, ok := envInt("THUMB_WORKERS"); ok { thumbN = clamp(n, 1, 64) } if n, ok := envInt("GEN_WORKERS"); ok { genN = clamp(n, 1, 16) } if n, ok := envInt("DUR_WORKERS"); ok { durN = clamp(n, 1, 64) } // Caps (Obergrenzen) – können via ENV überschrieben werden previewCap := clamp(cpu, 2, 12) thumbCap := clamp(cpu*2, 4, 32) genCap := clamp((cpu+1)/2, 2, 12) durCap := clamp(cpu*2, 4, 32) if n, ok := envInt("PREVIEW_CAP"); ok { previewCap = clamp(n, 1, 64) } if n, ok := envInt("THUMB_CAP"); ok { thumbCap = clamp(n, 1, 128) } if n, ok := envInt("GEN_CAP"); ok { genCap = clamp(n, 1, 64) } if n, ok := envInt("DUR_CAP"); ok { durCap = clamp(n, 1, 128) } // Initial max (Startwerte) previewSem = NewDynSem(previewN, previewCap) thumbSem = NewDynSem(thumbN, thumbCap) genSem = NewDynSem(genN, genCap) durSem = NewDynSem(durN, durCap) fmt.Printf( "🔧 semaphores(init): preview=%d/%d thumb=%d/%d gen=%d/%d dur=%d/%d (cpu=%d)\n", previewSem.Max(), previewSem.Cap(), thumbSem.Max(), thumbSem.Cap(), genSem.Max(), genSem.Cap(), durSem.Max(), durSem.Cap(), cpu, ) fmt.Printf( "🔧 semaphores: preview=%d thumb=%d gen=%d dur=%d (cpu=%d)\n", previewN, thumbN, genN, durN, cpu, ) } func startAdaptiveSemController(ctx context.Context) { targetHi := 85.0 targetLo := 65.0 if v := strings.TrimSpace(os.Getenv("CPU_TARGET_HI")); v != "" { if f, err := strconv.ParseFloat(v, 64); err == nil { targetHi = f } } if v := strings.TrimSpace(os.Getenv("CPU_TARGET_LO")); v != "" { if f, err := strconv.ParseFloat(v, 64); err == nil { targetLo = f } } // Warmup (erste Messung kann 0 sein) _, _ = gocpu.Percent(200*time.Millisecond, false) t := time.NewTicker(2 * time.Second) go func() { defer t.Stop() for { select { case <-ctx.Done(): return case <-t.C: p, err := gocpu.Percent(0, false) if err != nil || len(p) == 0 { continue } usage := p[0] setLastCPUUsage(usage) // Preview ist am teuersten → konservativ if usage > targetHi { previewSem.SetMax(previewSem.Max() - 1) genSem.SetMax(genSem.Max() - 1) thumbSem.SetMax(thumbSem.Max() - 1) } else if usage < targetLo { previewSem.SetMax(previewSem.Max() + 1) genSem.SetMax(genSem.Max() + 1) thumbSem.SetMax(thumbSem.Max() + 1) } // optional Debug: // fmt.Printf("CPU %.1f%% -> preview=%d thumb=%d gen=%d\n", usage, previewSem.Max(), thumbSem.Max(), genSem.Max()) } } }() } type durEntry struct { size int64 mod time.Time sec float64 } var durCache = struct { mu sync.Mutex m map[string]durEntry }{m: map[string]durEntry{}} var startedAtFromFilenameRe = regexp.MustCompile( `^(.+)_([0-9]{1,2})_([0-9]{1,2})_([0-9]{4})__([0-9]{1,2})-([0-9]{2})-([0-9]{2})$`, ) func buildPerfSnapshot() map[string]any { var ms runtime.MemStats runtime.ReadMemStats(&ms) s := getSettings() recordDir, _ := resolvePathRelativeToApp(s.RecordDir) var diskFreeBytes uint64 var diskTotalBytes uint64 var diskUsedPercent float64 diskPath := recordDir if recordDir != "" { if u, err := godisk.Usage(recordDir); err == nil && u != nil { diskFreeBytes = u.Free diskTotalBytes = u.Total diskUsedPercent = u.UsedPercent } } // ✅ Dynamische Disk-Schwellen (2× inFlight, Resume = +3GB) pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds() resp := map[string]any{ "ts": time.Now().UTC().Format(time.RFC3339Nano), "serverMs": time.Now().UTC().UnixMilli(), // ✅ für "Ping" im Frontend (Approx) "uptimeSec": time.Since(serverStartedAt).Seconds(), "cpuPercent": func() float64 { v := getLastCPUUsage() if math.IsNaN(v) || math.IsInf(v, 0) || v < 0 { return 0 } return v }(), "diskPath": diskPath, "diskFreeBytes": diskFreeBytes, "diskTotalBytes": diskTotalBytes, "diskUsedPercent": diskUsedPercent, "diskEmergency": atomic.LoadInt32(&diskEmergency) == 1, // ✅ statt LowDiskPauseBelowGB aus Settings "diskPauseBelowGB": pauseGB, "diskResumeAboveGB": resumeGB, // ✅ optional, aber sehr hilfreich (Debug/UI) "diskInFlightBytes": inFlight, "diskInFlightHuman": formatBytesSI(u64ToI64(inFlight)), "diskPauseNeedBytes": pauseNeed, "diskPauseNeedHuman": formatBytesSI(u64ToI64(pauseNeed)), "diskResumeNeedBytes": resumeNeed, "diskResumeNeedHuman": formatBytesSI(u64ToI64(resumeNeed)), "goroutines": runtime.NumGoroutine(), "mem": map[string]any{ "alloc": ms.Alloc, "heapAlloc": ms.HeapAlloc, "heapInuse": ms.HeapInuse, "sys": ms.Sys, "numGC": ms.NumGC, }, } sem := map[string]any{} if genSem != nil { sem["gen"] = map[string]any{"inUse": genSem.InUse(), "cap": genSem.Cap(), "max": genSem.Max()} } if previewSem != nil { sem["preview"] = map[string]any{"inUse": previewSem.InUse(), "cap": previewSem.Cap(), "max": previewSem.Max()} } if thumbSem != nil { sem["thumb"] = map[string]any{"inUse": thumbSem.InUse(), "cap": thumbSem.Cap(), "max": thumbSem.Max()} } if durSem != nil { sem["dur"] = map[string]any{"inUse": durSem.InUse(), "cap": durSem.Cap(), "max": durSem.Max()} } if len(sem) > 0 { resp["sem"] = sem } return resp } func pingHandler(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet && r.Method != http.MethodHead { http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed) return } w.Header().Set("Cache-Control", "no-store") w.WriteHeader(http.StatusNoContent) } func perfHandler(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) return } resp := buildPerfSnapshot() w.Header().Set("Content-Type", "application/json") w.Header().Set("Cache-Control", "no-store") _ = json.NewEncoder(w).Encode(resp) } func perfStreamHandler(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed) return } fl, ok := w.(http.Flusher) if !ok { http.Error(w, "Streaming nicht unterstützt", http.StatusInternalServerError) return } // Optional: client kann Intervall mitgeben: /api/perf/stream?ms=5000 ms := 5000 if q := r.URL.Query().Get("ms"); q != "" { if v, err := strconv.Atoi(q); err == nil { // clamp: 1000..30000 if v < 1000 { v = 1000 } if v > 30000 { v = 30000 } ms = v } } w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") w.Header().Set("Cache-Control", "no-store") w.Header().Set("Connection", "keep-alive") // hilfreich hinter nginx/proxies: w.Header().Set("X-Accel-Buffering", "no") ctx := r.Context() // sofort erstes Event schicken send := func() error { payload := buildPerfSnapshot() var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(payload); err != nil { return err } // event: perf _, _ = io.WriteString(w, "event: perf\n") _, _ = io.WriteString(w, "data: ") _, _ = w.Write(buf.Bytes()) _, _ = io.WriteString(w, "\n") fl.Flush() return nil } // initial _ = send() t := time.NewTicker(time.Duration(ms) * time.Millisecond) hb := time.NewTicker(15 * time.Second) // heartbeat gegen Proxy timeouts defer t.Stop() defer hb.Stop() for { select { case <-ctx.Done(): return case <-t.C: _ = send() case <-hb.C: // SSE Kommentar als Heartbeat _, _ = io.WriteString(w, ": keep-alive\n\n") fl.Flush() } } } // ------------------------- // Low disk space guard // - pausiert Autostart // - stoppt laufende Downloads // ------------------------- const ( diskGuardInterval = 5 * time.Second ) var diskEmergency int32 // 0=false, 1=true type diskStatusResp struct { Emergency bool `json:"emergency"` PauseGB int `json:"pauseGB"` ResumeGB int `json:"resumeGB"` FreeBytes uint64 `json:"freeBytes"` FreeBytesHuman string `json:"freeBytesHuman"` RecordPath string `json:"recordPath"` } func diskStatusHandler(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet && r.Method != http.MethodHead { http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed) return } s := getSettings() pauseGB, resumeGB, _, _, _ := computeDiskThresholds() recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir) dir := strings.TrimSpace(recordDirAbs) if dir == "" { dir = strings.TrimSpace(s.RecordDir) } free := uint64(0) if dir != "" { if u, err := godisk.Usage(dir); err == nil && u != nil { free = u.Free } } resp := diskStatusResp{ Emergency: atomic.LoadInt32(&diskEmergency) == 1, PauseGB: pauseGB, ResumeGB: resumeGB, FreeBytes: free, FreeBytesHuman: formatBytesSI(int64(free)), RecordPath: dir, } w.Header().Set("Cache-Control", "no-store") writeJSON(w, http.StatusOK, resp) } // stopJobsInternal markiert Jobs als "stopping" und cancelt sie (inkl. Preview-FFmpeg Kill). // Nutzt 2 notify-Pushes, damit die UI Phase/Progress sofort sieht. func stopJobsInternal(list []*RecordJob) { if len(list) == 0 { return } type payload struct { cmd *exec.Cmd cancel context.CancelFunc } pl := make([]payload, 0, len(list)) jobsMu.Lock() for _, job := range list { if job == nil { continue } job.Phase = "stopping" job.Progress = 10 pl = append(pl, payload{cmd: job.previewCmd, cancel: job.cancel}) job.previewCmd = nil } jobsMu.Unlock() notifyJobsChanged() // 1) UI sofort updaten (Phase/Progress) for _, p := range pl { if p.cmd != nil && p.cmd.Process != nil { _ = p.cmd.Process.Kill() } if p.cancel != nil { p.cancel() } } notifyJobsChanged() // 2) optional: nach Cancel/Kill nochmal pushen } func stopAllStoppableJobs() int { stoppable := make([]*RecordJob, 0, 16) jobsMu.Lock() for _, j := range jobs { if j == nil { continue } if j.Status != JobRunning { continue } phase := strings.ToLower(strings.TrimSpace(j.Phase)) // ✅ Im Disk-Notfall ALLES stoppen, was noch schreibt. // Wir skippen nur Jobs, die sowieso schon im "stopping" sind. if phase == "stopping" { continue } stoppable = append(stoppable, j) } jobsMu.Unlock() stopJobsInternal(stoppable) return len(stoppable) } func shouldAutoDeleteSmallDownload(filePath string) (bool, int64, int64) { // returns: (shouldDelete, sizeBytes, thresholdBytes) s := getSettings() if !s.AutoDeleteSmallDownloads { return false, 0, 0 } mb := s.AutoDeleteSmallDownloadsBelowMB if mb <= 0 { return false, 0, 0 } p := strings.TrimSpace(filePath) if p == "" { return false, 0, 0 } // relativ -> absolut versuchen (best effort) if !filepath.IsAbs(p) { if abs, err := resolvePathRelativeToApp(p); err == nil && strings.TrimSpace(abs) != "" { p = abs } } fi, err := os.Stat(p) if err != nil || fi.IsDir() { return false, 0, int64(mb) * 1024 * 1024 } size := fi.Size() thr := int64(mb) * 1024 * 1024 if size > 0 && size < thr { return true, size, thr } return false, size, thr } func sizeOfPathBestEffort(p string) uint64 { p = strings.TrimSpace(p) if p == "" { return 0 } // relativ -> absolut versuchen if !filepath.IsAbs(p) { if abs, err := resolvePathRelativeToApp(p); err == nil && strings.TrimSpace(abs) != "" { p = abs } } fi, err := os.Stat(p) if err != nil || fi.IsDir() || fi.Size() <= 0 { return 0 } return uint64(fi.Size()) } func inFlightBytesForJob(j *RecordJob) uint64 { if j == nil { return 0 } // Prefer live-tracked bytes if available (accurate & cheap). if j.SizeBytes > 0 { return uint64(j.SizeBytes) } return sizeOfPathBestEffort(j.Output) } const giB = uint64(1024 * 1024 * 1024) // computeDiskThresholds: // Pause = ceil( (2 * inFlightBytes) / GiB ) // Resume = Pause + 3 GB (Hysterese) // Wenn inFlight==0 => Pause/Resume = 0 func computeDiskThresholds() (pauseGB int, resumeGB int, inFlight uint64, pauseNeed uint64, resumeNeed uint64) { inFlight = sumInFlightBytes() if inFlight == 0 { return 0, 0, 0, 0, 0 } need := inFlight * 2 pauseGB = int((need + giB - 1) / giB) // ceil // Safety cap (nur zur Sicherheit, falls irgendwas eskaliert) if pauseGB > 10_000 { pauseGB = 10_000 } resumeGB = pauseGB + 3 if resumeGB > 10_000 { resumeGB = 10_000 } pauseNeed = uint64(pauseGB) * giB resumeNeed = uint64(resumeGB) * giB return } // ✅ Summe der "wachsenden" Daten (running + remuxing etc.) // Idee: Für TS->MP4 Peak brauchst du grob nochmal die Größe der aktuellen Datei als Reserve. func sumInFlightBytes() uint64 { var sum uint64 jobsMu.Lock() defer jobsMu.Unlock() for _, j := range jobs { if j == nil { continue } if j.Status != JobRunning { continue } // Nimm die Datei, die gerade wächst. // In deinem System ist das typischerweise j.Output (TS oder temporäres Ziel). // Falls du ein separates Feld für "TempTS" o.ä. hast: hier ergänzen. sum += inFlightBytesForJob(j) } return sum } // startDiskSpaceGuard läuft im Backend und reagiert auch ohne offenen Browser. // Bei wenig freiem Platz: // - Autostart pausieren // - laufende Jobs stoppen (nur Status=running und Phase leer) func startDiskSpaceGuard() { t := time.NewTicker(diskGuardInterval) defer t.Stop() for range t.C { s := getSettings() // Pfad bestimmen, auf dem wir freien Speicher prüfen recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir) dir := strings.TrimSpace(recordDirAbs) if dir == "" { dir = strings.TrimSpace(s.RecordDir) } if dir == "" { continue } u, err := godisk.Usage(dir) if err != nil || u == nil { continue } free := u.Free // ✅ Dynamische Schwellen: // Pause = ceil((2 * inFlight) / GiB) // Resume = Pause + 3 GB // pauseNeed/resumeNeed sind die benötigten freien Bytes pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds() // Wenn nichts läuft, gibt es nichts zu reservieren. // (Optional: Emergency zurücksetzen, damit Autostart wieder frei wird.) if inFlight == 0 { if atomic.LoadInt32(&diskEmergency) == 1 { atomic.StoreInt32(&diskEmergency, 0) fmt.Printf( "✅ [disk] No active jobs: emergency cleared (free=%s, path=%s)\n", formatBytesSI(u64ToI64(free)), dir, ) } continue } // ✅ Hysterese: erst ab resumeNeed wieder "bereit" if atomic.LoadInt32(&diskEmergency) == 1 { if free >= resumeNeed { atomic.StoreInt32(&diskEmergency, 0) fmt.Printf( "✅ [disk] Recovered: free=%s (%dB) (>= %s, %dB) emergency cleared (pause=%dGB resume=%dGB inFlight=%s, %dB)\n", formatBytesSI(u64ToI64(free)), free, formatBytesSI(u64ToI64(resumeNeed)), resumeNeed, pauseGB, resumeGB, formatBytesSI(u64ToI64(inFlight)), inFlight, ) } continue } // ✅ Normalzustand: solange free >= pauseNeed, nichts tun if free >= pauseNeed { continue } // ✅ Trigger: Notbremse aktivieren, Jobs stoppen atomic.StoreInt32(&diskEmergency, 1) fmt.Printf( "🛑 [disk] Low space: free=%s (%dB) (< %s, %dB, pause=%dGB resume=%dGB, inFlight=%s, %dB) -> stop jobs + block autostart via diskEmergency (path=%s)\n", formatBytesSI(u64ToI64(free)), free, formatBytesSI(u64ToI64(pauseNeed)), pauseNeed, pauseGB, resumeGB, formatBytesSI(u64ToI64(inFlight)), inFlight, dir, ) stopped := stopAllStoppableJobs() if stopped > 0 { fmt.Printf("🛑 [disk] Stop requested for %d job(s)\n", stopped) } } } func setJobPhase(job *RecordJob, phase string, progress int) { if progress < 0 { progress = 0 } if progress > 100 { progress = 100 } jobsMu.Lock() job.Phase = phase job.Progress = progress jobsMu.Unlock() notifyJobsChanged() } func durationSecondsCached(ctx context.Context, path string) (float64, error) { fi, err := os.Stat(path) if err != nil { return 0, err } durCache.mu.Lock() if e, ok := durCache.m[path]; ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 { durCache.mu.Unlock() return e.sec, nil } durCache.mu.Unlock() // ✅ Concurrency Limit für ffprobe/ffmpeg if durSem != nil { // kurzer Acquire: wenn Server busy ist, lieber später erneut probieren cctx := ctx if cctx == nil { cctx = context.Background() } acqCtx, cancel := context.WithTimeout(cctx, 2*time.Second) defer cancel() if err := durSem.Acquire(acqCtx); err != nil { return 0, err } defer durSem.Release() } // 1) ffprobe (bevorzugt) cmd := exec.CommandContext(ctx, ffprobePath, "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", path, ) out, err := cmd.Output() if err == nil { s := strings.TrimSpace(string(out)) sec, err2 := strconv.ParseFloat(s, 64) if err2 == nil && sec > 0 { durCache.mu.Lock() durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec} durCache.mu.Unlock() return sec, nil } } // 2) Fallback: ffmpeg -i "Duration: HH:MM:SS.xx" parsen cmd2 := exec.CommandContext(ctx, ffmpegPath, "-i", path) b, _ := cmd2.CombinedOutput() // ffmpeg liefert hier oft ExitCode!=0, Output ist trotzdem da text := string(b) re := regexp.MustCompile(`Duration:\s*(\d+):(\d+):(\d+(?:\.\d+)?)`) m := re.FindStringSubmatch(text) if len(m) != 4 { return 0, fmt.Errorf("duration not found") } hh, _ := strconv.ParseFloat(m[1], 64) mm, _ := strconv.ParseFloat(m[2], 64) ss, _ := strconv.ParseFloat(m[3], 64) sec := hh*3600 + mm*60 + ss if sec <= 0 { return 0, fmt.Errorf("invalid duration") } durCache.mu.Lock() durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec} durCache.mu.Unlock() return sec, nil } func detectFFmpegPath() string { // 0. Settings-Override (ffmpegPath in recorder_settings.json / UI) s := getSettings() if p := strings.TrimSpace(s.FFmpegPath); p != "" { // Relativ zur EXE auflösen, falls nötig if !filepath.IsAbs(p) { if abs, err := resolvePathRelativeToApp(p); err == nil { p = abs } } return p } // 1. Umgebungsvariable FFMPEG_PATH erlaubt Override if p := strings.TrimSpace(os.Getenv("FFMPEG_PATH")); p != "" { if abs, err := filepath.Abs(p); err == nil { return abs } return p } // 2. ffmpeg / ffmpeg.exe im selben Ordner wie dein Go-Programm if exe, err := os.Executable(); err == nil { exeDir := filepath.Dir(exe) candidates := []string{ filepath.Join(exeDir, "ffmpeg"), filepath.Join(exeDir, "ffmpeg.exe"), } for _, c := range candidates { if fi, err := os.Stat(c); err == nil && !fi.IsDir() { return c } } } // 3. ffmpeg über PATH suchen und absolut machen if lp, err := exec.LookPath("ffmpeg"); err == nil { if abs, err2 := filepath.Abs(lp); err2 == nil { return abs } return lp } // 4. Fallback: plain "ffmpeg" – kann dann immer noch fehlschlagen return "ffmpeg" } func removeGeneratedForID(id string) { // ✅ canonical id: wie beim Erzeugen der generated Ordner id = strings.TrimSpace(id) if id == "" { return } // falls jemand "file.mp4" übergibt id = strings.TrimSuffix(id, filepath.Ext(id)) // HOT Prefix weg id = stripHotPrefix(id) // wichtig: exakt gleiche Normalisierung wie überall sonst (Ordnernamen!) var err error id, err = sanitizeID(id) if err != nil || id == "" { return } // 1) NEU: generated/meta// ... if root, _ := generatedMetaRoot(); strings.TrimSpace(root) != "" { _ = os.RemoveAll(filepath.Join(root, id)) } // (optional aber sinnvoll) 1b) Legacy: generated// (falls noch alte Assets existieren) if root, _ := generatedRoot(); strings.TrimSpace(root) != "" { _ = os.RemoveAll(filepath.Join(root, id)) } // 2) Temp Preview Segmente (HLS) wegräumen // (%TEMP%/rec_preview/) _ = os.RemoveAll(filepath.Join(os.TempDir(), "rec_preview", id)) } func purgeDurationCacheForPath(p string) { p = strings.TrimSpace(p) if p == "" { return } durCache.mu.Lock() delete(durCache.m, p) durCache.mu.Unlock() } func renameGenerated(oldID, newID string) { thumbsRoot, _ := generatedThumbsRoot() teaserRoot, _ := generatedTeaserRoot() oldThumb := filepath.Join(thumbsRoot, oldID) newThumb := filepath.Join(thumbsRoot, newID) if _, err := os.Stat(oldThumb); err == nil { if _, err2 := os.Stat(newThumb); os.IsNotExist(err2) { _ = os.Rename(oldThumb, newThumb) } else { _ = os.RemoveAll(oldThumb) } } oldTeaser := filepath.Join(teaserRoot, oldID+".mp4") newTeaser := filepath.Join(teaserRoot, newID+".mp4") if _, err := os.Stat(oldTeaser); err == nil { if _, err2 := os.Stat(newTeaser); os.IsNotExist(err2) { _ = os.Rename(oldTeaser, newTeaser) } else { _ = os.Remove(oldTeaser) } } } // --- Gemeinsame Status-Werte für MFC --- type Status int const ( StatusUnknown Status = iota StatusPublic StatusPrivate StatusOffline StatusNotExist ) func (s Status) String() string { switch s { case StatusPublic: return "PUBLIC" case StatusPrivate: return "PRIVATE" case StatusOffline: return "OFFLINE" case StatusNotExist: return "NOTEXIST" default: return "UNKNOWN" } } // HTTPClient kapselt http.Client + Header/Cookies (wie internal.Req im DVR) type HTTPClient struct { client *http.Client userAgent string } // gemeinsamen HTTP-Client erzeugen func NewHTTPClient(userAgent string) *HTTPClient { if userAgent == "" { // Default, falls kein UA übergeben wird userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)" } return &HTTPClient{ client: &http.Client{ Timeout: 10 * time.Second, }, userAgent: userAgent, } } // Request-Erstellung mit User-Agent + Cookies func (h *HTTPClient) NewRequest(ctx context.Context, method, url, cookieStr string) (*http.Request, error) { req, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } // Basis-Header, die immer gesetzt werden if h.userAgent != "" { req.Header.Set("User-Agent", h.userAgent) } else { req.Header.Set("User-Agent", "Mozilla/5.0") } req.Header.Set("Accept", "*/*") // Cookie-String wie "name=value; foo=bar" addCookiesFromString(req, cookieStr) return req, nil } // Seite laden + einfache Erkennung von Schutzseiten (Cloudflare / Age-Gate) func (h *HTTPClient) FetchPage(ctx context.Context, url, cookieStr string) (string, error) { req, err := h.NewRequest(ctx, http.MethodGet, url, cookieStr) if err != nil { return "", err } resp, err := h.client.Do(req) if err != nil { return "", err } defer resp.Body.Close() data, err := io.ReadAll(resp.Body) if err != nil { return "", err } body := string(data) // Etwas aussagekräftigere Fehler als nur "room dossier nicht gefunden" if strings.Contains(body, "Just a moment...") { return "", errors.New("Schutzseite von Cloudflare erhalten (\"Just a moment...\") – kein Room-HTML") } if strings.Contains(body, "Verify your age") { return "", errors.New("Altersverifikationsseite erhalten – kein Room-HTML") } if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("HTTP %d beim Laden von %s", resp.StatusCode, url) } return body, nil } func remuxTSToMP4(tsPath, mp4Path string) error { // ffmpeg -y -i in.ts -c copy -movflags +faststart out.mp4 cmd := exec.Command(ffmpegPath, "-y", "-i", tsPath, "-c", "copy", "-movflags", "+faststart", mp4Path, ) var stderr bytes.Buffer cmd.Stderr = &stderr if err := cmd.Run(); err != nil { return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, stderr.String()) } return nil } func parseFFmpegOutTime(v string) float64 { v = strings.TrimSpace(v) if v == "" { return 0 } parts := strings.Split(v, ":") if len(parts) != 3 { return 0 } h, err1 := strconv.Atoi(parts[0]) m, err2 := strconv.Atoi(parts[1]) s, err3 := strconv.ParseFloat(parts[2], 64) // Sekunden können Dezimalstellen haben if err1 != nil || err2 != nil || err3 != nil { return 0 } return float64(h*3600+m*60) + s } func remuxTSToMP4WithProgress( ctx context.Context, tsPath, mp4Path string, durationSec float64, inSize int64, onRatio func(r float64), ) error { // ffmpeg progress kommt auf stdout als key=value cmd := exec.CommandContext(ctx, ffmpegPath, "-y", "-nostats", "-progress", "pipe:1", "-i", tsPath, "-c", "copy", "-movflags", "+faststart", mp4Path, ) stdout, err := cmd.StdoutPipe() if err != nil { return err } var stderr bytes.Buffer cmd.Stderr = &stderr if err := cmd.Start(); err != nil { return err } sc := bufio.NewScanner(stdout) sc.Buffer(make([]byte, 0, 64*1024), 1024*1024) var ( lastOutSec float64 lastTotalSz int64 ) send := func(outSec float64, totalSize int64, force bool) { // bevorzugt: Zeit/Dauer if durationSec > 0 && outSec > 0 { r := outSec / durationSec if r < 0 { r = 0 } if r > 1 { r = 1 } if onRatio != nil { onRatio(r) } return } // fallback: Bytes (bei remux meist okay-ish) if inSize > 0 && totalSize > 0 { r := float64(totalSize) / float64(inSize) if r < 0 { r = 0 } if r > 1 { r = 1 } if onRatio != nil { onRatio(r) } return } // force (z.B. end) if force && onRatio != nil { onRatio(1) } } for sc.Scan() { line := strings.TrimSpace(sc.Text()) if line == "" { continue } k, v, ok := strings.Cut(line, "=") if !ok { continue } switch k { case "out_time_us": if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 { lastOutSec = float64(n) / 1_000_000.0 send(lastOutSec, lastTotalSz, false) } case "out_time_ms": if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 { // out_time_ms ist i.d.R. Millisekunden lastOutSec = float64(n) / 1_000.0 send(lastOutSec, lastTotalSz, false) } case "out_time": if s := parseFFmpegOutTime(v); s > 0 { lastOutSec = s send(lastOutSec, lastTotalSz, false) } case "total_size": if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 { lastTotalSz = n send(lastOutSec, lastTotalSz, false) } case "progress": if strings.TrimSpace(v) == "end" { send(lastOutSec, lastTotalSz, true) } } } if err := cmd.Wait(); err != nil { return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, strings.TrimSpace(stderr.String())) } return nil } // --- MP4 Streaming Optimierung (Fast Start) --- // "Fast Start" bedeutet: moov vor mdat (Browser kann sofort Metadaten lesen) func isFastStartMP4(path string) (bool, error) { f, err := os.Open(path) if err != nil { return false, err } defer f.Close() for i := 0; i < 256; i++ { var hdr [8]byte if _, err := io.ReadFull(f, hdr[:]); err != nil { // unklar/kurz -> nicht anfassen return true, nil } sz32 := binary.BigEndian.Uint32(hdr[0:4]) typ := string(hdr[4:8]) var boxSize int64 headerSize := int64(8) if sz32 == 0 { return true, nil } if sz32 == 1 { var ext [8]byte if _, err := io.ReadFull(f, ext[:]); err != nil { return true, nil } boxSize = int64(binary.BigEndian.Uint64(ext[:])) headerSize = 16 } else { boxSize = int64(sz32) } if boxSize < headerSize { return true, nil } switch typ { case "moov": return true, nil case "mdat": return false, nil } if _, err := f.Seek(boxSize-headerSize, io.SeekCurrent); err != nil { return true, nil } } return true, nil } func ensureFastStartMP4(path string) error { path = strings.TrimSpace(path) if path == "" || !strings.EqualFold(filepath.Ext(path), ".mp4") { return nil } if strings.TrimSpace(ffmpegPath) == "" { return nil } ok, err := isFastStartMP4(path) if err == nil && ok { return nil } dir := filepath.Dir(path) base := filepath.Base(path) tmp := filepath.Join(dir, ".__faststart__"+base+".tmp") bak := filepath.Join(dir, ".__faststart__"+base+".bak") _ = os.Remove(tmp) _ = os.Remove(bak) cmd := exec.Command(ffmpegPath, "-y", "-i", path, "-c", "copy", "-movflags", "+faststart", tmp, ) var stderr bytes.Buffer cmd.Stderr = &stderr if err := cmd.Run(); err != nil { _ = os.Remove(tmp) return fmt.Errorf("ffmpeg faststart failed: %v (%s)", err, strings.TrimSpace(stderr.String())) } // atomar austauschen if err := os.Rename(path, bak); err != nil { _ = os.Remove(tmp) return fmt.Errorf("rename original to bak failed: %w", err) } if err := os.Rename(tmp, path); err != nil { _ = os.Rename(bak, path) _ = os.Remove(tmp) return fmt.Errorf("rename tmp to original failed: %w", err) } _ = os.Remove(bak) return nil } func stripHotPrefix(s string) string { s = strings.TrimSpace(s) // akzeptiere "HOT " auch case-insensitive if len(s) >= 4 && strings.EqualFold(s[:4], "HOT ") { return strings.TrimSpace(s[4:]) } return s } func generatedRoot() (string, error) { return resolvePathRelativeToApp("generated") } func generatedMetaRoot() (string, error) { return resolvePathRelativeToApp(filepath.Join("generated", "meta")) } // generatedThumbWebPFile gibt den Pfad zu generated//thumbs.webp zurück. // assetID darf "HOT " enthalten; wird entfernt und wie überall sonst sanitisiert. func generatedThumbWebPFile(assetID string) (string, error) { assetID = stripHotPrefix(strings.TrimSpace(assetID)) if assetID == "" { return "", fmt.Errorf("empty assetID") } // gleiche Normalisierung wie bei Ordnernamen var err error assetID, err = sanitizeID(assetID) if err != nil || assetID == "" { return "", fmt.Errorf("invalid assetID: %w", err) } dir, err := ensureGeneratedDir(assetID) if err != nil || strings.TrimSpace(dir) == "" { return "", fmt.Errorf("ensureGeneratedDir: %w", err) } return filepath.Join(dir, "thumbs.webp"), nil } // Legacy (falls noch alte Assets liegen): func generatedThumbsRoot() (string, error) { return resolvePathRelativeToApp(filepath.Join("generated", "thumbs")) } func generatedTeaserRoot() (string, error) { return resolvePathRelativeToApp(filepath.Join("generated", "teaser")) } func sanitizeModelKey(k string) string { k = stripHotPrefix(strings.TrimSpace(k)) if k == "" || k == "—" || strings.ContainsAny(k, `/\`) { return "" } return k } func modelKeyFromFilenameOrPath(file string, srcPath string, doneAbs string) string { // 1) bevorzugt aus Dateiname (OHNE Extension!) stem := strings.TrimSuffix(filepath.Base(strings.TrimSpace(file)), filepath.Ext(file)) k := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem))) if k != "" { return k } // 2) Fallback: aus Quellpfad ableiten, wenn Datei in done//... lag if strings.TrimSpace(srcPath) != "" && strings.TrimSpace(doneAbs) != "" { srcDir := filepath.Clean(filepath.Dir(srcPath)) doneAbs = filepath.Clean(doneAbs) // srcDir != doneAbs => wir sind in einem Unterordner, dessen Name i.d.R. das Model ist if !strings.EqualFold(srcDir, doneAbs) { k2 := sanitizeModelKey(filepath.Base(srcDir)) if k2 != "" && !strings.EqualFold(k2, "keep") { return k2 } } } return "" } func uniqueDestPath(dstDir, file string) (string, error) { dst := filepath.Join(dstDir, file) if _, err := os.Stat(dst); err == nil { ext := filepath.Ext(file) base := strings.TrimSuffix(file, ext) for i := 2; i <= 200; i++ { alt := fmt.Sprintf("%s__dup%d%s", base, i, ext) cand := filepath.Join(dstDir, alt) if _, err := os.Stat(cand); os.IsNotExist(err) { return cand, nil } } return "", fmt.Errorf("too many duplicates for %s", file) } else if err != nil && !os.IsNotExist(err) { return "", err } return dst, nil } func idFromVideoPath(videoPath string) string { name := filepath.Base(strings.TrimSpace(videoPath)) return strings.TrimSuffix(name, filepath.Ext(name)) } func assetIDForJob(job *RecordJob) string { if job == nil { return "" } // Prefer: Dateiname ohne Endung (und ohne HOT Prefix) out := strings.TrimSpace(job.Output) if out != "" { id := stripHotPrefix(idFromVideoPath(out)) if strings.TrimSpace(id) != "" { return id } } // Fallback: JobID (sollte praktisch nie nötig sein) return strings.TrimSpace(job.ID) } func atomicWriteFile(dst string, data []byte) error { dir := filepath.Dir(dst) if err := os.MkdirAll(dir, 0o755); err != nil { return err } tmp, err := os.CreateTemp(dir, ".tmp-*") if err != nil { return err } tmpName := tmp.Name() _ = tmp.Chmod(0o644) _, werr := tmp.Write(data) cerr := tmp.Close() if werr != nil { _ = os.Remove(tmpName) return werr } if cerr != nil { _ = os.Remove(tmpName) return cerr } // ✅ Wichtig: Windows/SMB kann Rename nicht überschreiben _ = os.Remove(dst) // ✅ Wenn du renameWithRetry hast: nutzen (robuster bei Shares) if err := renameWithRetry(tmpName, dst); err != nil { _ = os.Remove(tmpName) return err } return nil } // Beim Start: loose Dateien in /done/keep (Root) in /done/keep// einsortieren. // Best-effort: wenn Model nicht ableitbar oder Ziel kollidiert, wird geskippt bzw. umbenannt. func fixKeepRootFilesIntoModelSubdirs() { s := getSettings() doneAbs, err := resolvePathRelativeToApp(s.DoneDir) if err != nil || strings.TrimSpace(doneAbs) == "" { return } keepRoot := filepath.Join(doneAbs, "keep") ents, err := os.ReadDir(keepRoot) if err != nil { // keep existiert evtl. noch nicht -> nichts zu tun if os.IsNotExist(err) { return } fmt.Println("⚠️ keep scan failed:", err) return } moved := 0 skipped := 0 isVideo := func(name string) bool { low := strings.ToLower(name) if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") { return false } ext := strings.ToLower(filepath.Ext(name)) return ext == ".mp4" || ext == ".ts" } for _, e := range ents { if e.IsDir() { continue } name := e.Name() if !isVideo(name) { continue } // Quelle: /done/keep/ src := filepath.Join(keepRoot, name) // Model aus Dateiname ableiten (wie im keep-handler) stem := strings.TrimSuffix(name, filepath.Ext(name)) // ✅ ohne .mp4/.ts modelKey := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem))) // wenn nicht ableitbar -> skip if modelKey == "" || modelKey == "—" || strings.ContainsAny(modelKey, `/\`) { skipped++ continue } dstDir := filepath.Join(keepRoot, modelKey) if err := os.MkdirAll(dstDir, 0o755); err != nil { fmt.Println("⚠️ keep mkdir failed:", err) skipped++ continue } dst := filepath.Join(dstDir, name) // Wenn Ziel schon existiert -> unique Name finden if _, err := os.Stat(dst); err == nil { ext := filepath.Ext(name) base := strings.TrimSuffix(name, ext) found := false for i := 2; i <= 200; i++ { alt := fmt.Sprintf("%s__dup%d%s", base, i, ext) cand := filepath.Join(dstDir, alt) if _, err := os.Stat(cand); os.IsNotExist(err) { dst = cand found = true break } } if !found { fmt.Println("⚠️ keep fix: too many duplicates, skip:", name) skipped++ continue } } else if err != nil && !os.IsNotExist(err) { fmt.Println("⚠️ keep stat dst failed:", err) skipped++ continue } // Verschieben (Windows-lock robust) if err := renameWithRetry(src, dst); err != nil { fmt.Println("⚠️ keep fix rename failed:", err) skipped++ continue } moved++ } if moved > 0 || skipped > 0 { fmt.Printf("🧹 keep fix: moved=%d skipped=%d (root=%s)\n", moved, skipped, keepRoot) } } func findFinishedFileByID(id string) (string, error) { s := getSettings() recordAbs, _ := resolvePathRelativeToApp(s.RecordDir) doneAbs, _ := resolvePathRelativeToApp(s.DoneDir) base := stripHotPrefix(strings.TrimSpace(id)) if base == "" { return "", fmt.Errorf("not found") } names := []string{ base + ".mp4", "HOT " + base + ".mp4", base + ".ts", "HOT " + base + ".ts", } // done (root + /done//) + keep (root + /done/keep//) for _, name := range names { if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok { return p, nil } if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok { return p, nil } if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok { return p, nil } } return "", fmt.Errorf("not found") } const maxInt64 = int64(^uint64(0) >> 1) func removeJobsByOutputBasename(file string) { file = strings.TrimSpace(file) if file == "" { return } removed := false jobsMu.Lock() for id, j := range jobs { if j == nil { continue } out := strings.TrimSpace(j.Output) if out == "" { continue } if filepath.Base(out) == file { delete(jobs, id) removed = true } } jobsMu.Unlock() if removed { notifyJobsChanged() } } func renameJobsOutputBasename(oldFile, newFile string) { oldFile = strings.TrimSpace(oldFile) newFile = strings.TrimSpace(newFile) if oldFile == "" || newFile == "" { return } changed := false jobsMu.Lock() for _, j := range jobs { if j == nil { continue } out := strings.TrimSpace(j.Output) if out == "" { continue } if filepath.Base(out) == oldFile { j.Output = filepath.Join(filepath.Dir(out), newFile) changed = true } } jobsMu.Unlock() if changed { notifyJobsChanged() } } // nimmt jetzt *HTTPClient entgegen func FetchPlaylist(ctx context.Context, hc *HTTPClient, hlsSource, httpCookie string) (*Playlist, error) { if hlsSource == "" { return nil, errors.New("HLS-URL leer") } req, err := hc.NewRequest(ctx, http.MethodGet, hlsSource, httpCookie) if err != nil { return nil, fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err) } resp, err := hc.client.Do(req) if err != nil { return nil, fmt.Errorf("Fehler beim Laden der Playlist: %w", err) } defer resp.Body.Close() playlist, listType, err := m3u8.DecodeFrom(resp.Body, true) if err != nil || listType != m3u8.MASTER { return nil, errors.New("keine gültige Master-Playlist") } master := playlist.(*m3u8.MasterPlaylist) var bestURI string var bestWidth int var bestFramerate int for _, variant := range master.Variants { if variant == nil || variant.Resolution == "" { continue } parts := strings.Split(variant.Resolution, "x") if len(parts) != 2 { continue } width, err := strconv.Atoi(parts[0]) if err != nil { continue } fr := 30 if strings.Contains(variant.Name, "FPS:60.0") { fr = 60 } if width > bestWidth || (width == bestWidth && fr > bestFramerate) { bestWidth = width bestFramerate = fr bestURI = variant.URI } } if bestURI == "" { return nil, errors.New("keine gültige Auflösung gefunden") } root := hlsSource[:strings.LastIndex(hlsSource, "/")+1] return &Playlist{ PlaylistURL: root + bestURI, RootURL: root, Resolution: bestWidth, Framerate: bestFramerate, }, nil } func readLine() string { r := bufio.NewReader(os.Stdin) s, _ := r.ReadString('\n') return strings.TrimRight(s, "\r\n") } func fileExists(path string) bool { _, err := os.Stat(path) return err == nil }