nsfwapp/backend/main.go
2026-02-06 09:35:43 +01:00

10012 lines
245 KiB
Go
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// backend\main.go
package main
import (
"bufio"
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"html"
"image"
"image/color"
"image/draw"
"image/jpeg"
"image/png"
"io"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/google/uuid"
"github.com/grafov/m3u8"
gocpu "github.com/shirou/gopsutil/v3/cpu"
godisk "github.com/shirou/gopsutil/v3/disk"
"github.com/sqweek/dialog"
"golang.org/x/image/font"
"golang.org/x/image/font/basicfont"
"golang.org/x/image/math/fixed"
)
var roomDossierRegexp = regexp.MustCompile(`window\.initialRoomDossier = "(.*?)"`)
type JobStatus string
const (
JobRunning JobStatus = "running"
JobPostwork JobStatus = "postwork" // ✅ NEU: Aufnahme vorbei, Nacharbeiten laufen noch
JobFinished JobStatus = "finished"
JobStopped JobStatus = "stopped"
JobFailed JobStatus = "failed"
)
type RecordJob struct {
ID string `json:"id"`
SourceURL string `json:"sourceUrl"`
Output string `json:"output"`
Status JobStatus `json:"status"`
StartedAt time.Time `json:"startedAt"`
EndedAt *time.Time `json:"endedAt,omitempty"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
SizeBytes int64 `json:"sizeBytes,omitempty"`
VideoWidth int `json:"videoWidth,omitempty"`
VideoHeight int `json:"videoHeight,omitempty"`
FPS float64 `json:"fps,omitempty"`
Hidden bool `json:"-"`
Error string `json:"error,omitempty"`
PreviewDir string `json:"-"`
PreviewImage string `json:"-"`
previewCmd *exec.Cmd `json:"-"`
LiveThumbStarted bool `json:"-"`
// ✅ Preview-Status (z.B. private/offline anhand ffmpeg HTTP Fehler)
PreviewState string `json:"previewState,omitempty"` // "", "private", "offline", "error"
PreviewStateAt string `json:"previewStateAt,omitempty"` // RFC3339Nano
PreviewStateMsg string `json:"previewStateMsg,omitempty"` // kurze Info
// Thumbnail cache (verhindert, dass pro HTTP-Request ffmpeg läuft)
previewMu sync.Mutex `json:"-"`
previewJpeg []byte `json:"-"`
previewJpegAt time.Time `json:"-"`
previewGen bool `json:"-"`
PreviewM3U8 string `json:"-"` // HLS url, die ffmpeg inputt
PreviewCookie string `json:"-"` // Cookie header (falls nötig)
PreviewUA string `json:"-"` // user-agent
previewCancel context.CancelFunc `json:"-"`
previewLastHit time.Time `json:"-"`
previewStartMu sync.Mutex `json:"-"`
// ✅ Frontend Progress beim Stop/Finalize
Phase string `json:"phase,omitempty"` // stopping | remuxing | moving
Progress int `json:"progress,omitempty"` // 0..100
PostWorkKey string `json:"postWorkKey,omitempty"`
PostWork *PostWorkKeyStatus `json:"postWork,omitempty"`
cancel context.CancelFunc `json:"-"`
}
type dummyResponseWriter struct {
h http.Header
}
type ffprobeStreamInfo struct {
Width int `json:"width"`
Height int `json:"height"`
AvgFrameRate string `json:"avg_frame_rate"`
RFrameRate string `json:"r_frame_rate"`
}
type ffprobeInfo struct {
Streams []ffprobeStreamInfo `json:"streams"`
}
func parseFFRate(s string) float64 {
s = strings.TrimSpace(s)
if s == "" || s == "0/0" {
return 0
}
// "30000/1001"
if a, b, ok := strings.Cut(s, "/"); ok {
num, err1 := strconv.ParseFloat(strings.TrimSpace(a), 64)
den, err2 := strconv.ParseFloat(strings.TrimSpace(b), 64)
if err1 == nil && err2 == nil && den != 0 {
return num / den
}
return 0
}
// "25"
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0
}
return f
}
func probeVideoProps(ctx context.Context, filePath string) (w int, h int, fps float64, err error) {
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return 0, 0, 0, fmt.Errorf("empty path")
}
cmd := exec.CommandContext(ctx, ffprobePath,
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream=width,height,avg_frame_rate,r_frame_rate",
"-of", "json",
filePath,
)
out, err := cmd.Output()
if err != nil {
return 0, 0, 0, err
}
var info ffprobeInfo
if err := json.Unmarshal(out, &info); err != nil {
return 0, 0, 0, err
}
if len(info.Streams) == 0 {
return 0, 0, 0, fmt.Errorf("no video stream")
}
s := info.Streams[0]
w, h = s.Width, s.Height
// bevorzugt avg_frame_rate, fallback r_frame_rate
fps = parseFFRate(s.AvgFrameRate)
if fps <= 0 {
fps = parseFFRate(s.RFrameRate)
}
return w, h, fps, nil
}
func (d *dummyResponseWriter) Header() http.Header {
if d.h == nil {
d.h = make(http.Header)
}
return d.h
}
func (d *dummyResponseWriter) Write(b []byte) (int, error) { return len(b), nil }
func (d *dummyResponseWriter) WriteHeader(statusCode int) {}
var (
jobs = map[string]*RecordJob{}
jobsMu = sync.Mutex{}
)
var serverStartedAt = time.Now()
var lastCPUUsageBits uint64 // atomic float64 bits
func setLastCPUUsage(v float64) { atomic.StoreUint64(&lastCPUUsageBits, math.Float64bits(v)) }
func getLastCPUUsage() float64 { return math.Float64frombits(atomic.LoadUint64(&lastCPUUsageBits)) }
// -------------------- SSE: /api/record/stream --------------------
type sseHub struct {
mu sync.Mutex
clients map[chan []byte]struct{}
}
func newSSEHub() *sseHub {
return &sseHub{clients: map[chan []byte]struct{}{}}
}
func (h *sseHub) add(ch chan []byte) {
h.mu.Lock()
h.clients[ch] = struct{}{}
h.mu.Unlock()
}
func (h *sseHub) remove(ch chan []byte) {
h.mu.Lock()
delete(h.clients, ch)
h.mu.Unlock()
close(ch)
}
func (h *sseHub) broadcast(b []byte) {
h.mu.Lock()
defer h.mu.Unlock()
for ch := range h.clients {
// Non-blocking: langsame Clients droppen Updates (holen sich beim nächsten Update wieder ein)
select {
case ch <- b:
default:
}
}
}
var doneHub = newSSEHub()
var doneNotify = make(chan struct{}, 1)
// optional: monotoner Zähler, damit Clients auch bei "gleichen" payloads triggern können
var doneSeq uint64
func notifyDoneChanged() {
select {
case doneNotify <- struct{}{}:
default:
}
}
var recordJobsHub = newSSEHub()
var recordJobsNotify = make(chan struct{}, 1)
func startPreviewIdleKiller() {
t := time.NewTicker(5 * time.Second)
go func() {
defer t.Stop()
for range t.C {
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
if j != nil {
list = append(list, j)
}
}
jobsMu.Unlock()
for _, j := range list {
jobsMu.Lock()
cmdRunning := j.previewCmd != nil
last := j.previewLastHit
st := j.Status
jobsMu.Unlock()
if !cmdRunning {
continue
}
// wenn Job nicht mehr läuft oder Hover weg
if st != JobRunning || (!last.IsZero() && time.Since(last) > 10*time.Minute) {
stopPreview(j)
}
}
}
}()
}
func init() {
initFFmpegSemaphores()
startAdaptiveSemController(context.Background())
startPreviewIdleKiller()
// Debounced broadcaster (jobs)
go func() {
for range recordJobsNotify {
time.Sleep(40 * time.Millisecond)
for {
select {
case <-recordJobsNotify:
default:
goto SEND
}
}
SEND:
recordJobsHub.broadcast(jobsSnapshotJSON())
}
}()
// Debounced broadcaster (done changed)
go func() {
for range doneNotify {
time.Sleep(40 * time.Millisecond)
for {
select {
case <-doneNotify:
default:
goto SEND
}
}
SEND:
seq := atomic.AddUint64(&doneSeq, 1)
// Payload bewusst klein halten: Client soll nur "refetch done" machen
b := []byte(fmt.Sprintf(`{"type":"doneChanged","seq":%d,"ts":%d}`, seq, time.Now().UnixMilli()))
doneHub.broadcast(b)
}
}()
}
func publishJob(jobID string) bool {
jobsMu.Lock()
j := jobs[jobID]
if j == nil || !j.Hidden {
jobsMu.Unlock()
return false
}
j.Hidden = false
jobsMu.Unlock()
notifyJobsChanged()
return true
}
func notifyJobsChanged() {
select {
case recordJobsNotify <- struct{}{}:
default:
}
}
func jobsSnapshotJSON() []byte {
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
// ✅ Hidden-Jobs niemals an die UI senden (verhindert „UI springt“)
if j == nil || j.Hidden {
continue
}
c := *j
c.cancel = nil // nicht serialisieren
list = append(list, &c)
}
jobsMu.Unlock()
// optional: neueste zuerst
sort.Slice(list, func(i, j int) bool {
return list[i].StartedAt.After(list[j].StartedAt)
})
b, _ := json.Marshal(list)
return b
}
func recordStream(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming nicht unterstützt", http.StatusInternalServerError)
return
}
// SSE-Header
h := w.Header()
h.Set("Content-Type", "text/event-stream; charset=utf-8")
h.Set("Cache-Control", "no-cache, no-transform")
h.Set("Connection", "keep-alive")
h.Set("X-Accel-Buffering", "no") // hilfreich bei Reverse-Proxies
// sofort starten
w.WriteHeader(http.StatusOK)
writeEvent := func(event string, data []byte) bool {
// returns false => client weg / write error
if event != "" {
if _, err := fmt.Fprintf(w, "event: %s\n", event); err != nil {
return false
}
}
if len(data) > 0 {
if _, err := fmt.Fprintf(w, "data: %s\n\n", data); err != nil {
return false
}
} else {
// empty payload ok (nur terminator)
if _, err := io.WriteString(w, "\n"); err != nil {
return false
}
}
flusher.Flush()
return true
}
writeComment := func(msg string) bool {
if _, err := fmt.Fprintf(w, ": %s\n\n", msg); err != nil {
return false
}
flusher.Flush()
return true
}
// Reconnect-Hinweis
if _, err := fmt.Fprintf(w, "retry: 3000\n\n"); err != nil {
return
}
flusher.Flush()
// Channel + Hub
ch := make(chan []byte, 32)
recordJobsHub.add(ch)
defer recordJobsHub.remove(ch)
// Initialer Snapshot sofort
if b := jobsSnapshotJSON(); len(b) > 0 {
if !writeEvent("jobs", b) {
return
}
}
ctx := r.Context()
// Ping/Keepalive
ping := time.NewTicker(15 * time.Second)
defer ping.Stop()
for {
select {
case <-ctx.Done():
return
case b, ok := <-ch:
if !ok {
return
}
if len(b) == 0 {
continue
}
// ✅ Burst-Coalescing: wenn viele Updates schnell kommen, nur das neueste senden
last := b
drain:
for i := 0; i < 64; i++ {
select {
case nb, ok := <-ch:
if !ok {
return
}
if len(nb) > 0 {
last = nb
}
default:
break drain
}
}
if !writeEvent("jobs", last) {
return
}
case <-ping.C:
// Keepalive als Kommentar (stört nicht, hält Verbindungen offen)
if !writeComment(fmt.Sprintf("ping %d", time.Now().Unix())) {
return
}
}
}
}
// ffmpeg-Binary suchen (env, neben EXE, oder PATH)
var ffmpegPath = detectFFmpegPath()
var ffprobePath = detectFFprobePath()
func detectFFprobePath() string {
// 1) Env-Override
if p := strings.TrimSpace(os.Getenv("FFPROBE_PATH")); p != "" {
if abs, err := filepath.Abs(p); err == nil {
return abs
}
return p
}
// 2) Neben ffmpeg.exe (gleicher Ordner)
fp := strings.TrimSpace(ffmpegPath)
if fp != "" && fp != "ffmpeg" {
dir := filepath.Dir(fp)
ext := ""
if strings.HasSuffix(strings.ToLower(fp), ".exe") {
ext = ".exe"
}
c := filepath.Join(dir, "ffprobe"+ext)
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
// 3) Im EXE-Ordner
if exe, err := os.Executable(); err == nil {
exeDir := filepath.Dir(exe)
candidates := []string{
filepath.Join(exeDir, "ffprobe"),
filepath.Join(exeDir, "ffprobe.exe"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
}
// 4) PATH
if lp, err := exec.LookPath("ffprobe"); err == nil {
if abs, err2 := filepath.Abs(lp); err2 == nil {
return abs
}
return lp
}
return "ffprobe"
}
// ---------- Dynamic Semaphore (resizeable by load controller) ----------
type DynSem struct {
mu sync.Mutex
in int
max int
cap int
}
func NewDynSem(initial, cap int) *DynSem {
if cap < 1 {
cap = 1
}
if initial < 1 {
initial = 1
}
if initial > cap {
initial = cap
}
return &DynSem{max: initial, cap: cap}
}
func (s *DynSem) Acquire(ctx context.Context) error {
for {
if ctx != nil && ctx.Err() != nil {
return ctx.Err()
}
s.mu.Lock()
if s.in < s.max {
s.in++
s.mu.Unlock()
return nil
}
s.mu.Unlock()
time.Sleep(25 * time.Millisecond)
}
}
func (s *DynSem) Release() {
s.mu.Lock()
if s.in > 0 {
s.in--
}
s.mu.Unlock()
}
func (s *DynSem) SetMax(n int) {
if n < 1 {
n = 1
}
if n > s.cap {
n = s.cap
}
s.mu.Lock()
s.max = n
s.mu.Unlock()
}
func (s *DynSem) Max() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.max
}
func (s *DynSem) Cap() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.cap
}
func (s *DynSem) InUse() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.in
}
var (
genSem *DynSem
previewSem *DynSem
thumbSem *DynSem
durSem *DynSem
)
func clamp(n, lo, hi int) int {
if n < lo {
return lo
}
if n > hi {
return hi
}
return n
}
func envInt(name string) (int, bool) {
v := strings.TrimSpace(os.Getenv(name))
if v == "" {
return 0, false
}
n, err := strconv.Atoi(v)
if err != nil {
return 0, false
}
return n, true
}
func initFFmpegSemaphores() {
cpu := runtime.NumCPU()
if cpu <= 0 {
cpu = 2
}
// Defaults (heuristisch)
previewN := clamp((cpu+1)/2, 1, 6) // x264 live -> konservativ
thumbN := clamp(cpu, 2, 12) // Frames -> darf höher
genN := clamp((cpu+3)/4, 1, 4) // preview.mp4 clips -> eher klein
durN := clamp(cpu, 2, 16) // ffprobe: darf höher, aber nicht unbegrenzt
// ENV Overrides (optional)
if n, ok := envInt("PREVIEW_WORKERS"); ok {
previewN = clamp(n, 1, 32)
}
if n, ok := envInt("THUMB_WORKERS"); ok {
thumbN = clamp(n, 1, 64)
}
if n, ok := envInt("GEN_WORKERS"); ok {
genN = clamp(n, 1, 16)
}
if n, ok := envInt("DUR_WORKERS"); ok {
durN = clamp(n, 1, 64)
}
// Caps (Obergrenzen) können via ENV überschrieben werden
previewCap := clamp(cpu, 2, 12)
thumbCap := clamp(cpu*2, 4, 32)
genCap := clamp((cpu+1)/2, 2, 12)
durCap := clamp(cpu*2, 4, 32)
if n, ok := envInt("PREVIEW_CAP"); ok {
previewCap = clamp(n, 1, 64)
}
if n, ok := envInt("THUMB_CAP"); ok {
thumbCap = clamp(n, 1, 128)
}
if n, ok := envInt("GEN_CAP"); ok {
genCap = clamp(n, 1, 64)
}
if n, ok := envInt("DUR_CAP"); ok {
durCap = clamp(n, 1, 128)
}
// Initial max (Startwerte)
previewSem = NewDynSem(previewN, previewCap)
thumbSem = NewDynSem(thumbN, thumbCap)
genSem = NewDynSem(genN, genCap)
durSem = NewDynSem(durN, durCap)
fmt.Printf(
"🔧 semaphores(init): preview=%d/%d thumb=%d/%d gen=%d/%d dur=%d/%d (cpu=%d)\n",
previewSem.Max(), previewSem.Cap(),
thumbSem.Max(), thumbSem.Cap(),
genSem.Max(), genSem.Cap(),
durSem.Max(), durSem.Cap(),
cpu,
)
fmt.Printf(
"🔧 semaphores: preview=%d thumb=%d gen=%d dur=%d (cpu=%d)\n",
previewN, thumbN, genN, durN, cpu,
)
}
func startAdaptiveSemController(ctx context.Context) {
targetHi := 85.0
targetLo := 65.0
if v := strings.TrimSpace(os.Getenv("CPU_TARGET_HI")); v != "" {
if f, err := strconv.ParseFloat(v, 64); err == nil {
targetHi = f
}
}
if v := strings.TrimSpace(os.Getenv("CPU_TARGET_LO")); v != "" {
if f, err := strconv.ParseFloat(v, 64); err == nil {
targetLo = f
}
}
// Warmup (erste Messung kann 0 sein)
_, _ = gocpu.Percent(200*time.Millisecond, false)
t := time.NewTicker(2 * time.Second)
go func() {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
p, err := gocpu.Percent(0, false)
if err != nil || len(p) == 0 {
continue
}
usage := p[0]
setLastCPUUsage(usage)
// Preview ist am teuersten → konservativ
if usage > targetHi {
previewSem.SetMax(previewSem.Max() - 1)
genSem.SetMax(genSem.Max() - 1)
thumbSem.SetMax(thumbSem.Max() - 1)
} else if usage < targetLo {
previewSem.SetMax(previewSem.Max() + 1)
genSem.SetMax(genSem.Max() + 1)
thumbSem.SetMax(thumbSem.Max() + 1)
}
// optional Debug:
// fmt.Printf("CPU %.1f%% -> preview=%d thumb=%d gen=%d\n", usage, previewSem.Max(), thumbSem.Max(), genSem.Max())
}
}
}()
}
type durEntry struct {
size int64
mod time.Time
sec float64
}
var durCache = struct {
mu sync.Mutex
m map[string]durEntry
}{m: map[string]durEntry{}}
var startedAtFromFilenameRe = regexp.MustCompile(
`^(.+)_([0-9]{1,2})_([0-9]{1,2})_([0-9]{4})__([0-9]{1,2})-([0-9]{2})-([0-9]{2})$`,
)
func buildPerfSnapshot() map[string]any {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
s := getSettings()
recordDir, _ := resolvePathRelativeToApp(s.RecordDir)
var diskFreeBytes uint64
var diskTotalBytes uint64
var diskUsedPercent float64
diskPath := recordDir
if recordDir != "" {
if u, err := godisk.Usage(recordDir); err == nil && u != nil {
diskFreeBytes = u.Free
diskTotalBytes = u.Total
diskUsedPercent = u.UsedPercent
}
}
// ✅ Dynamische Disk-Schwellen (2× inFlight, Resume = +3GB)
pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds()
resp := map[string]any{
"ts": time.Now().UTC().Format(time.RFC3339Nano),
"serverMs": time.Now().UTC().UnixMilli(), // ✅ für "Ping" im Frontend (Approx)
"uptimeSec": time.Since(serverStartedAt).Seconds(),
"cpuPercent": func() float64 {
v := getLastCPUUsage()
if math.IsNaN(v) || math.IsInf(v, 0) || v < 0 {
return 0
}
return v
}(),
"diskPath": diskPath,
"diskFreeBytes": diskFreeBytes,
"diskTotalBytes": diskTotalBytes,
"diskUsedPercent": diskUsedPercent,
"diskEmergency": atomic.LoadInt32(&diskEmergency) == 1,
// ✅ statt LowDiskPauseBelowGB aus Settings
"diskPauseBelowGB": pauseGB,
"diskResumeAboveGB": resumeGB,
// ✅ optional, aber sehr hilfreich (Debug/UI)
"diskInFlightBytes": inFlight,
"diskInFlightHuman": formatBytesSI(u64ToI64(inFlight)),
"diskPauseNeedBytes": pauseNeed,
"diskPauseNeedHuman": formatBytesSI(u64ToI64(pauseNeed)),
"diskResumeNeedBytes": resumeNeed,
"diskResumeNeedHuman": formatBytesSI(u64ToI64(resumeNeed)),
"goroutines": runtime.NumGoroutine(),
"mem": map[string]any{
"alloc": ms.Alloc,
"heapAlloc": ms.HeapAlloc,
"heapInuse": ms.HeapInuse,
"sys": ms.Sys,
"numGC": ms.NumGC,
},
}
sem := map[string]any{}
if genSem != nil {
sem["gen"] = map[string]any{"inUse": genSem.InUse(), "cap": genSem.Cap(), "max": genSem.Max()}
}
if previewSem != nil {
sem["preview"] = map[string]any{"inUse": previewSem.InUse(), "cap": previewSem.Cap(), "max": previewSem.Max()}
}
if thumbSem != nil {
sem["thumb"] = map[string]any{"inUse": thumbSem.InUse(), "cap": thumbSem.Cap(), "max": thumbSem.Max()}
}
if durSem != nil {
sem["dur"] = map[string]any{"inUse": durSem.InUse(), "cap": durSem.Cap(), "max": durSem.Max()}
}
if len(sem) > 0 {
resp["sem"] = sem
}
return resp
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusNoContent)
}
func perfHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
resp := buildPerfSnapshot()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(resp)
}
func perfStreamHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
fl, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming nicht unterstützt", http.StatusInternalServerError)
return
}
// Optional: client kann Intervall mitgeben: /api/perf/stream?ms=5000
ms := 5000
if q := r.URL.Query().Get("ms"); q != "" {
if v, err := strconv.Atoi(q); err == nil {
// clamp: 1000..30000
if v < 1000 {
v = 1000
}
if v > 30000 {
v = 30000
}
ms = v
}
}
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Connection", "keep-alive")
// hilfreich hinter nginx/proxies:
w.Header().Set("X-Accel-Buffering", "no")
ctx := r.Context()
// sofort erstes Event schicken
send := func() error {
payload := buildPerfSnapshot()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(payload); err != nil {
return err
}
// event: perf
_, _ = io.WriteString(w, "event: perf\n")
_, _ = io.WriteString(w, "data: ")
_, _ = w.Write(buf.Bytes())
_, _ = io.WriteString(w, "\n")
fl.Flush()
return nil
}
// initial
_ = send()
t := time.NewTicker(time.Duration(ms) * time.Millisecond)
hb := time.NewTicker(15 * time.Second) // heartbeat gegen Proxy timeouts
defer t.Stop()
defer hb.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
_ = send()
case <-hb.C:
// SSE Kommentar als Heartbeat
_, _ = io.WriteString(w, ": keep-alive\n\n")
fl.Flush()
}
}
}
// -------------------------
// Low disk space guard
// - pausiert Autostart
// - stoppt laufende Downloads
// -------------------------
const (
diskGuardInterval = 5 * time.Second
)
var diskEmergency int32 // 0=false, 1=true
type diskStatusResp struct {
Emergency bool `json:"emergency"`
PauseGB int `json:"pauseGB"`
ResumeGB int `json:"resumeGB"`
FreeBytes uint64 `json:"freeBytes"`
FreeBytesHuman string `json:"freeBytesHuman"`
RecordPath string `json:"recordPath"`
}
func diskStatusHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
s := getSettings()
pauseGB, resumeGB, _, _, _ := computeDiskThresholds()
recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir)
dir := strings.TrimSpace(recordDirAbs)
if dir == "" {
dir = strings.TrimSpace(s.RecordDir)
}
free := uint64(0)
if dir != "" {
if u, err := godisk.Usage(dir); err == nil && u != nil {
free = u.Free
}
}
resp := diskStatusResp{
Emergency: atomic.LoadInt32(&diskEmergency) == 1,
PauseGB: pauseGB,
ResumeGB: resumeGB,
FreeBytes: free,
FreeBytesHuman: formatBytesSI(int64(free)),
RecordPath: dir,
}
w.Header().Set("Cache-Control", "no-store")
writeJSON(w, http.StatusOK, resp)
}
// stopJobsInternal markiert Jobs als "stopping" und cancelt sie (inkl. Preview-FFmpeg Kill).
// Nutzt 2 notify-Pushes, damit die UI Phase/Progress sofort sieht.
func stopJobsInternal(list []*RecordJob) {
if len(list) == 0 {
return
}
type payload struct {
cmd *exec.Cmd
cancel context.CancelFunc
}
pl := make([]payload, 0, len(list))
jobsMu.Lock()
for _, job := range list {
if job == nil {
continue
}
job.Phase = "stopping"
job.Progress = 10
pl = append(pl, payload{cmd: job.previewCmd, cancel: job.cancel})
job.previewCmd = nil
}
jobsMu.Unlock()
notifyJobsChanged() // 1) UI sofort updaten (Phase/Progress)
for _, p := range pl {
if p.cmd != nil && p.cmd.Process != nil {
_ = p.cmd.Process.Kill()
}
if p.cancel != nil {
p.cancel()
}
}
notifyJobsChanged() // 2) optional: nach Cancel/Kill nochmal pushen
}
func stopAllStoppableJobs() int {
stoppable := make([]*RecordJob, 0, 16)
jobsMu.Lock()
for _, j := range jobs {
if j == nil {
continue
}
if j.Status != JobRunning {
continue
}
phase := strings.ToLower(strings.TrimSpace(j.Phase))
// ✅ Im Disk-Notfall ALLES stoppen, was noch schreibt.
// Wir skippen nur Jobs, die sowieso schon im "stopping" sind.
if phase == "stopping" {
continue
}
stoppable = append(stoppable, j)
}
jobsMu.Unlock()
stopJobsInternal(stoppable)
return len(stoppable)
}
func sizeOfPathBestEffort(p string) uint64 {
p = strings.TrimSpace(p)
if p == "" {
return 0
}
// relativ -> absolut versuchen
if !filepath.IsAbs(p) {
if abs, err := resolvePathRelativeToApp(p); err == nil && strings.TrimSpace(abs) != "" {
p = abs
}
}
fi, err := os.Stat(p)
if err != nil || fi.IsDir() || fi.Size() <= 0 {
return 0
}
return uint64(fi.Size())
}
func inFlightBytesForJob(j *RecordJob) uint64 {
if j == nil {
return 0
}
// Prefer live-tracked bytes if available (accurate & cheap).
if j.SizeBytes > 0 {
return uint64(j.SizeBytes)
}
return sizeOfPathBestEffort(j.Output)
}
const giB = uint64(1024 * 1024 * 1024)
// computeDiskThresholds:
// Pause = ceil( (2 * inFlightBytes) / GiB )
// Resume = Pause + 3 GB (Hysterese)
// Wenn inFlight==0 => Pause/Resume = 0
func computeDiskThresholds() (pauseGB int, resumeGB int, inFlight uint64, pauseNeed uint64, resumeNeed uint64) {
inFlight = sumInFlightBytes()
if inFlight == 0 {
return 0, 0, 0, 0, 0
}
need := inFlight * 2
pauseGB = int((need + giB - 1) / giB) // ceil
// Safety cap (nur zur Sicherheit, falls irgendwas eskaliert)
if pauseGB > 10_000 {
pauseGB = 10_000
}
resumeGB = pauseGB + 3
if resumeGB > 10_000 {
resumeGB = 10_000
}
pauseNeed = uint64(pauseGB) * giB
resumeNeed = uint64(resumeGB) * giB
return
}
// ✅ Summe der "wachsenden" Daten (running + remuxing etc.)
// Idee: Für TS->MP4 Peak brauchst du grob nochmal die Größe der aktuellen Datei als Reserve.
func sumInFlightBytes() uint64 {
var sum uint64
jobsMu.Lock()
defer jobsMu.Unlock()
for _, j := range jobs {
if j == nil {
continue
}
if j.Status != JobRunning {
continue
}
// Nimm die Datei, die gerade wächst.
// In deinem System ist das typischerweise j.Output (TS oder temporäres Ziel).
// Falls du ein separates Feld für "TempTS" o.ä. hast: hier ergänzen.
sum += inFlightBytesForJob(j)
}
return sum
}
// startDiskSpaceGuard läuft im Backend und reagiert auch ohne offenen Browser.
// Bei wenig freiem Platz:
// - Autostart pausieren
// - laufende Jobs stoppen (nur Status=running und Phase leer)
func startDiskSpaceGuard() {
t := time.NewTicker(diskGuardInterval)
defer t.Stop()
for range t.C {
s := getSettings()
// Pfad bestimmen, auf dem wir freien Speicher prüfen
recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir)
dir := strings.TrimSpace(recordDirAbs)
if dir == "" {
dir = strings.TrimSpace(s.RecordDir)
}
if dir == "" {
continue
}
u, err := godisk.Usage(dir)
if err != nil || u == nil {
continue
}
free := u.Free
// ✅ Dynamische Schwellen:
// Pause = ceil((2 * inFlight) / GiB)
// Resume = Pause + 3 GB
// pauseNeed/resumeNeed sind die benötigten freien Bytes
pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds()
// Wenn nichts läuft, gibt es nichts zu reservieren.
// (Optional: Emergency zurücksetzen, damit Autostart wieder frei wird.)
if inFlight == 0 {
if atomic.LoadInt32(&diskEmergency) == 1 {
atomic.StoreInt32(&diskEmergency, 0)
fmt.Printf(
"✅ [disk] No active jobs: emergency cleared (free=%s, path=%s)\n",
formatBytesSI(u64ToI64(free)),
dir,
)
}
continue
}
// ✅ Hysterese: erst ab resumeNeed wieder "bereit"
if atomic.LoadInt32(&diskEmergency) == 1 {
if free >= resumeNeed {
atomic.StoreInt32(&diskEmergency, 0)
fmt.Printf(
"✅ [disk] Recovered: free=%s (%dB) (>= %s, %dB) emergency cleared (pause=%dGB resume=%dGB inFlight=%s, %dB)\n",
formatBytesSI(u64ToI64(free)), free,
formatBytesSI(u64ToI64(resumeNeed)), resumeNeed,
pauseGB, resumeGB,
formatBytesSI(u64ToI64(inFlight)), inFlight,
)
}
continue
}
// ✅ Normalzustand: solange free >= pauseNeed, nichts tun
if free >= pauseNeed {
continue
}
// ✅ Trigger: Notbremse aktivieren, Jobs stoppen
atomic.StoreInt32(&diskEmergency, 1)
fmt.Printf(
"🛑 [disk] Low space: free=%s (%dB) (< %s, %dB, pause=%dGB resume=%dGB, inFlight=%s, %dB) -> stop jobs + block autostart via diskEmergency (path=%s)\n",
formatBytesSI(u64ToI64(free)), free,
formatBytesSI(u64ToI64(pauseNeed)), pauseNeed,
pauseGB, resumeGB,
formatBytesSI(u64ToI64(inFlight)), inFlight,
dir,
)
stopped := stopAllStoppableJobs()
if stopped > 0 {
fmt.Printf("🛑 [disk] Stop requested for %d job(s)\n", stopped)
}
}
}
func setJobPhase(job *RecordJob, phase string, progress int) {
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
jobsMu.Lock()
job.Phase = phase
job.Progress = progress
jobsMu.Unlock()
notifyJobsChanged()
}
func durationSecondsCached(ctx context.Context, path string) (float64, error) {
fi, err := os.Stat(path)
if err != nil {
return 0, err
}
durCache.mu.Lock()
if e, ok := durCache.m[path]; ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 {
durCache.mu.Unlock()
return e.sec, nil
}
durCache.mu.Unlock()
// 1) ffprobe (bevorzugt)
cmd := exec.CommandContext(ctx, ffprobePath,
"-v", "error",
"-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1",
path,
)
out, err := cmd.Output()
if err == nil {
s := strings.TrimSpace(string(out))
sec, err2 := strconv.ParseFloat(s, 64)
if err2 == nil && sec > 0 {
durCache.mu.Lock()
durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec}
durCache.mu.Unlock()
return sec, nil
}
}
// 2) Fallback: ffmpeg -i "Duration: HH:MM:SS.xx" parsen
cmd2 := exec.CommandContext(ctx, ffmpegPath, "-i", path)
b, _ := cmd2.CombinedOutput() // ffmpeg liefert hier oft ExitCode!=0, Output ist trotzdem da
text := string(b)
re := regexp.MustCompile(`Duration:\s*(\d+):(\d+):(\d+(?:\.\d+)?)`)
m := re.FindStringSubmatch(text)
if len(m) != 4 {
return 0, fmt.Errorf("duration not found")
}
hh, _ := strconv.ParseFloat(m[1], 64)
mm, _ := strconv.ParseFloat(m[2], 64)
ss, _ := strconv.ParseFloat(m[3], 64)
sec := hh*3600 + mm*60 + ss
if sec <= 0 {
return 0, fmt.Errorf("invalid duration")
}
durCache.mu.Lock()
durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec}
durCache.mu.Unlock()
return sec, nil
}
// main.go
type RecorderSettings struct {
RecordDir string `json:"recordDir"`
DoneDir string `json:"doneDir"`
FFmpegPath string `json:"ffmpegPath"`
AutoAddToDownloadList bool `json:"autoAddToDownloadList"`
AutoStartAddedDownloads bool `json:"autoStartAddedDownloads"`
UseChaturbateAPI bool `json:"useChaturbateApi"`
UseMyFreeCamsWatcher bool `json:"useMyFreeCamsWatcher"`
// Wenn aktiv, werden fertige Downloads automatisch gelöscht, wenn sie kleiner als der Grenzwert sind.
AutoDeleteSmallDownloads bool `json:"autoDeleteSmallDownloads"`
AutoDeleteSmallDownloadsBelowMB int `json:"autoDeleteSmallDownloadsBelowMB"`
BlurPreviews bool `json:"blurPreviews"`
TeaserPlayback string `json:"teaserPlayback"` // still | hover | all
TeaserAudio bool `json:"teaserAudio"` // ✅ Vorschau/Teaser mit Ton abspielen
// EncryptedCookies contains base64(nonce+ciphertext) of a JSON cookie map.
EncryptedCookies string `json:"encryptedCookies"`
}
var (
settingsMu sync.Mutex
settings = RecorderSettings{
RecordDir: "/records",
DoneDir: "/records/done",
FFmpegPath: "",
AutoAddToDownloadList: false,
AutoStartAddedDownloads: false,
UseChaturbateAPI: false,
UseMyFreeCamsWatcher: false,
AutoDeleteSmallDownloads: false,
AutoDeleteSmallDownloadsBelowMB: 50,
BlurPreviews: false,
TeaserPlayback: "hover",
TeaserAudio: false,
EncryptedCookies: "",
}
settingsFile = "recorder_settings.json"
)
func settingsFilePath() string {
// optionaler Override per ENV
name := strings.TrimSpace(os.Getenv("RECORDER_SETTINGS_FILE"))
if name == "" {
name = settingsFile
}
// Standard: relativ zur EXE / App-Dir (oder fallback auf Working Dir bei go run)
if p, err := resolvePathRelativeToApp(name); err == nil && strings.TrimSpace(p) != "" {
return p
}
// Fallback: so zurückgeben wie es ist
return name
}
func getSettings() RecorderSettings {
settingsMu.Lock()
defer settingsMu.Unlock()
return settings
}
func detectFFmpegPath() string {
// 0. Settings-Override (ffmpegPath in recorder_settings.json / UI)
s := getSettings()
if p := strings.TrimSpace(s.FFmpegPath); p != "" {
// Relativ zur EXE auflösen, falls nötig
if !filepath.IsAbs(p) {
if abs, err := resolvePathRelativeToApp(p); err == nil {
p = abs
}
}
return p
}
// 1. Umgebungsvariable FFMPEG_PATH erlaubt Override
if p := strings.TrimSpace(os.Getenv("FFMPEG_PATH")); p != "" {
if abs, err := filepath.Abs(p); err == nil {
return abs
}
return p
}
// 2. ffmpeg / ffmpeg.exe im selben Ordner wie dein Go-Programm
if exe, err := os.Executable(); err == nil {
exeDir := filepath.Dir(exe)
candidates := []string{
filepath.Join(exeDir, "ffmpeg"),
filepath.Join(exeDir, "ffmpeg.exe"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
}
// 3. ffmpeg über PATH suchen und absolut machen
if lp, err := exec.LookPath("ffmpeg"); err == nil {
if abs, err2 := filepath.Abs(lp); err2 == nil {
return abs
}
return lp
}
// 4. Fallback: plain "ffmpeg" kann dann immer noch fehlschlagen
return "ffmpeg"
}
func removeGeneratedForID(id string) {
// ✅ canonical id: wie beim Erzeugen der generated Ordner
id = strings.TrimSpace(id)
if id == "" {
return
}
// falls jemand "file.mp4" übergibt
id = strings.TrimSuffix(id, filepath.Ext(id))
// HOT Prefix weg
id = stripHotPrefix(id)
// wichtig: exakt gleiche Normalisierung wie überall sonst (Ordnernamen!)
var err error
id, err = sanitizeID(id)
if err != nil || id == "" {
return
}
// 1) NEU: generated/meta/<id>/ ...
if root, _ := generatedMetaRoot(); strings.TrimSpace(root) != "" {
_ = os.RemoveAll(filepath.Join(root, id))
}
// (optional aber sinnvoll) 1b) Legacy: generated/<id>/ (falls noch alte Assets existieren)
if root, _ := generatedRoot(); strings.TrimSpace(root) != "" {
_ = os.RemoveAll(filepath.Join(root, id))
}
// 2) Temp Preview Segmente (HLS) wegräumen
// (%TEMP%/rec_preview/<assetID>)
_ = os.RemoveAll(filepath.Join(os.TempDir(), "rec_preview", id))
// 3) Legacy Cleanup (best effort)
thumbsLegacy, _ := generatedThumbsRoot()
teaserLegacy, _ := generatedTeaserRoot()
if strings.TrimSpace(thumbsLegacy) != "" {
_ = os.RemoveAll(filepath.Join(thumbsLegacy, id))
_ = os.Remove(filepath.Join(thumbsLegacy, id+".jpg"))
}
if strings.TrimSpace(teaserLegacy) != "" {
_ = os.Remove(filepath.Join(teaserLegacy, id+".mp4"))
_ = os.Remove(filepath.Join(teaserLegacy, id+"_teaser.mp4"))
}
}
func purgeDurationCacheForPath(p string) {
p = strings.TrimSpace(p)
if p == "" {
return
}
durCache.mu.Lock()
delete(durCache.m, p)
durCache.mu.Unlock()
}
func renameGenerated(oldID, newID string) {
thumbsRoot, _ := generatedThumbsRoot()
teaserRoot, _ := generatedTeaserRoot()
oldThumb := filepath.Join(thumbsRoot, oldID)
newThumb := filepath.Join(thumbsRoot, newID)
if _, err := os.Stat(oldThumb); err == nil {
if _, err2 := os.Stat(newThumb); os.IsNotExist(err2) {
_ = os.Rename(oldThumb, newThumb)
} else {
_ = os.RemoveAll(oldThumb)
}
}
oldTeaser := filepath.Join(teaserRoot, oldID+".mp4")
newTeaser := filepath.Join(teaserRoot, newID+".mp4")
if _, err := os.Stat(oldTeaser); err == nil {
if _, err2 := os.Stat(newTeaser); os.IsNotExist(err2) {
_ = os.Rename(oldTeaser, newTeaser)
} else {
_ = os.Remove(oldTeaser)
}
}
}
func loadSettings() {
p := settingsFilePath()
b, err := os.ReadFile(p)
fmt.Println("🔧 settingsFile:", p)
if err == nil {
s := getSettings() // ✅ startet mit Defaults
if json.Unmarshal(b, &s) == nil {
if strings.TrimSpace(s.RecordDir) != "" {
s.RecordDir = filepath.Clean(strings.TrimSpace(s.RecordDir))
}
if strings.TrimSpace(s.DoneDir) != "" {
s.DoneDir = filepath.Clean(strings.TrimSpace(s.DoneDir))
}
if strings.TrimSpace(s.FFmpegPath) != "" {
s.FFmpegPath = strings.TrimSpace(s.FFmpegPath)
}
s.TeaserPlayback = strings.ToLower(strings.TrimSpace(s.TeaserPlayback))
if s.TeaserPlayback == "" {
s.TeaserPlayback = "hover"
}
if s.TeaserPlayback != "still" && s.TeaserPlayback != "hover" && s.TeaserPlayback != "all" {
s.TeaserPlayback = "hover"
}
// Auto-Delete: clamp
if s.AutoDeleteSmallDownloadsBelowMB < 0 {
s.AutoDeleteSmallDownloadsBelowMB = 0
}
if s.AutoDeleteSmallDownloadsBelowMB > 100_000 {
s.AutoDeleteSmallDownloadsBelowMB = 100_000
}
settingsMu.Lock()
settings = s
settingsMu.Unlock()
}
}
// Ordner sicherstellen
s := getSettings()
recordAbs, _ := resolvePathRelativeToApp(s.RecordDir)
doneAbs, _ := resolvePathRelativeToApp(s.DoneDir)
if strings.TrimSpace(recordAbs) != "" {
_ = os.MkdirAll(recordAbs, 0o755)
}
if strings.TrimSpace(doneAbs) != "" {
_ = os.MkdirAll(doneAbs, 0o755)
}
// ffmpeg-Pfad anhand Settings/Env/PATH bestimmen
ffmpegPath = detectFFmpegPath()
fmt.Println("🔍 ffmpegPath:", ffmpegPath)
ffprobePath = detectFFprobePath()
fmt.Println("🔍 ffprobePath:", ffprobePath)
}
func saveSettingsToDisk() {
s := getSettings()
b, err := json.MarshalIndent(s, "", " ")
if err != nil {
fmt.Println("⚠️ settings marshal:", err)
return
}
b = append(b, '\n')
p := settingsFilePath()
if err := atomicWriteFile(p, b); err != nil {
fmt.Println("⚠️ settings write:", err)
return
}
// optional
// fmt.Println("✅ settings saved:", p)
}
func recordSettingsHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(getSettings())
return
case http.MethodPost:
var in RecorderSettings
if err := json.NewDecoder(r.Body).Decode(&in); err != nil {
http.Error(w, "invalid json: "+err.Error(), http.StatusBadRequest)
return
}
// --- normalize (WICHTIG: erst trim, dann leer-check, dann clean) ---
recRaw := strings.TrimSpace(in.RecordDir)
doneRaw := strings.TrimSpace(in.DoneDir)
if recRaw == "" || doneRaw == "" {
http.Error(w, "recordDir und doneDir dürfen nicht leer sein", http.StatusBadRequest)
return
}
in.RecordDir = filepath.Clean(recRaw)
in.DoneDir = filepath.Clean(doneRaw)
// Optional aber sehr empfehlenswert: "." verbieten
if in.RecordDir == "." || in.DoneDir == "." {
http.Error(w, "recordDir/doneDir dürfen nicht '.' sein", http.StatusBadRequest)
return
}
in.FFmpegPath = strings.TrimSpace(in.FFmpegPath)
in.TeaserPlayback = strings.ToLower(strings.TrimSpace(in.TeaserPlayback))
if in.TeaserPlayback == "" {
in.TeaserPlayback = "hover"
}
if in.TeaserPlayback != "still" && in.TeaserPlayback != "hover" && in.TeaserPlayback != "all" {
in.TeaserPlayback = "hover"
}
// Auto-Delete: clamp
if in.AutoDeleteSmallDownloadsBelowMB < 0 {
in.AutoDeleteSmallDownloadsBelowMB = 0
}
if in.AutoDeleteSmallDownloadsBelowMB > 100_000 {
in.AutoDeleteSmallDownloadsBelowMB = 100_000
}
// --- ensure folders (Fehler zurückgeben, falls z.B. keine Rechte) ---
recAbs, err := resolvePathRelativeToApp(in.RecordDir)
if err != nil {
http.Error(w, "ungültiger recordDir: "+err.Error(), http.StatusBadRequest)
return
}
doneAbs, err := resolvePathRelativeToApp(in.DoneDir)
if err != nil {
http.Error(w, "ungültiger doneDir: "+err.Error(), http.StatusBadRequest)
return
}
if err := os.MkdirAll(recAbs, 0o755); err != nil {
http.Error(w, "konnte recordDir nicht erstellen: "+err.Error(), http.StatusBadRequest)
return
}
if err := os.MkdirAll(doneAbs, 0o755); err != nil {
http.Error(w, "konnte doneDir nicht erstellen: "+err.Error(), http.StatusBadRequest)
return
}
// ✅ Settings im RAM aktualisieren
settingsMu.Lock()
settings = in
settingsMu.Unlock()
// ✅ Settings auf Disk persistieren
saveSettingsToDisk()
// ✅ ffmpeg/ffprobe nach Änderungen neu bestimmen
// Tipp: wenn der User FFmpegPath explizit setzt, nutze den direkt.
if strings.TrimSpace(in.FFmpegPath) != "" {
ffmpegPath = in.FFmpegPath
} else {
ffmpegPath = detectFFmpegPath()
}
fmt.Println("🔍 ffmpegPath:", ffmpegPath)
ffprobePath = detectFFprobePath()
fmt.Println("🔍 ffprobePath:", ffprobePath)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(getSettings())
return
default:
http.Error(w, "Nur GET/POST erlaubt", http.StatusMethodNotAllowed)
return
}
}
func settingsBrowse(w http.ResponseWriter, r *http.Request) {
target := r.URL.Query().Get("target")
if target != "record" && target != "done" && target != "ffmpeg" {
http.Error(w, "target muss record, done oder ffmpeg sein", http.StatusBadRequest)
return
}
var (
p string
err error
)
if target == "ffmpeg" {
// Dateiauswahl für ffmpeg.exe
p, err = dialog.File().
Title("ffmpeg.exe auswählen").
Load()
} else {
// Ordnerauswahl für record/done
p, err = dialog.Directory().
Title("Ordner auswählen").
Browse()
}
if err != nil {
// User cancelled → 204 No Content ist praktisch fürs Frontend
if strings.Contains(strings.ToLower(err.Error()), "cancel") {
w.WriteHeader(http.StatusNoContent)
return
}
http.Error(w, "auswahl fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// optional: wenn innerhalb exe-dir, als RELATIV zurückgeben
p = maybeMakeRelativeToExe(p)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]string{"path": p})
}
func maybeMakeRelativeToExe(abs string) string {
exe, err := os.Executable()
if err != nil {
return abs
}
base := filepath.Dir(exe)
rel, err := filepath.Rel(base, abs)
if err != nil {
return abs
}
// wenn rel mit ".." beginnt -> nicht innerhalb base -> absoluten Pfad behalten
if rel == "." || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return abs
}
return filepath.ToSlash(rel) // frontend-freundlich
}
// --- Gemeinsame Status-Werte für MFC ---
type Status int
const (
StatusUnknown Status = iota
StatusPublic
StatusPrivate
StatusOffline
StatusNotExist
)
func (s Status) String() string {
switch s {
case StatusPublic:
return "PUBLIC"
case StatusPrivate:
return "PRIVATE"
case StatusOffline:
return "OFFLINE"
case StatusNotExist:
return "NOTEXIST"
default:
return "UNKNOWN"
}
}
// HTTPClient kapselt http.Client + Header/Cookies (wie internal.Req im DVR)
type HTTPClient struct {
client *http.Client
userAgent string
}
// gemeinsamen HTTP-Client erzeugen
func NewHTTPClient(userAgent string) *HTTPClient {
if userAgent == "" {
// Default, falls kein UA übergeben wird
userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}
return &HTTPClient{
client: &http.Client{
Timeout: 10 * time.Second,
},
userAgent: userAgent,
}
}
// Request-Erstellung mit User-Agent + Cookies
func (h *HTTPClient) NewRequest(ctx context.Context, method, url, cookieStr string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
// Basis-Header, die immer gesetzt werden
if h.userAgent != "" {
req.Header.Set("User-Agent", h.userAgent)
} else {
req.Header.Set("User-Agent", "Mozilla/5.0")
}
req.Header.Set("Accept", "*/*")
// Cookie-String wie "name=value; foo=bar"
addCookiesFromString(req, cookieStr)
return req, nil
}
// Seite laden + einfache Erkennung von Schutzseiten (Cloudflare / Age-Gate)
func (h *HTTPClient) FetchPage(ctx context.Context, url, cookieStr string) (string, error) {
req, err := h.NewRequest(ctx, http.MethodGet, url, cookieStr)
if err != nil {
return "", err
}
resp, err := h.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
body := string(data)
// Etwas aussagekräftigere Fehler als nur "room dossier nicht gefunden"
if strings.Contains(body, "<title>Just a moment...</title>") {
return "", errors.New("Schutzseite von Cloudflare erhalten (\"Just a moment...\") kein Room-HTML")
}
if strings.Contains(body, "Verify your age") {
return "", errors.New("Altersverifikationsseite erhalten kein Room-HTML")
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("HTTP %d beim Laden von %s", resp.StatusCode, url)
}
return body, nil
}
func remuxTSToMP4(tsPath, mp4Path string) error {
// ffmpeg -y -i in.ts -c copy -movflags +faststart out.mp4
cmd := exec.Command(ffmpegPath,
"-y",
"-i", tsPath,
"-c", "copy",
"-movflags", "+faststart",
mp4Path,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, stderr.String())
}
return nil
}
func parseFFmpegOutTime(v string) float64 {
v = strings.TrimSpace(v)
if v == "" {
return 0
}
parts := strings.Split(v, ":")
if len(parts) != 3 {
return 0
}
h, err1 := strconv.Atoi(parts[0])
m, err2 := strconv.Atoi(parts[1])
s, err3 := strconv.ParseFloat(parts[2], 64) // Sekunden können Dezimalstellen haben
if err1 != nil || err2 != nil || err3 != nil {
return 0
}
return float64(h*3600+m*60) + s
}
func remuxTSToMP4WithProgress(
ctx context.Context,
tsPath, mp4Path string,
durationSec float64,
inSize int64,
onRatio func(r float64),
) error {
// ffmpeg progress kommt auf stdout als key=value
cmd := exec.CommandContext(ctx, ffmpegPath,
"-y",
"-nostats",
"-progress", "pipe:1",
"-i", tsPath,
"-c", "copy",
"-movflags", "+faststart",
mp4Path,
)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return err
}
sc := bufio.NewScanner(stdout)
sc.Buffer(make([]byte, 0, 64*1024), 1024*1024)
var (
lastOutSec float64
lastTotalSz int64
)
send := func(outSec float64, totalSize int64, force bool) {
// bevorzugt: Zeit/Dauer
if durationSec > 0 && outSec > 0 {
r := outSec / durationSec
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
if onRatio != nil {
onRatio(r)
}
return
}
// fallback: Bytes (bei remux meist okay-ish)
if inSize > 0 && totalSize > 0 {
r := float64(totalSize) / float64(inSize)
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
if onRatio != nil {
onRatio(r)
}
return
}
// force (z.B. end)
if force && onRatio != nil {
onRatio(1)
}
}
for sc.Scan() {
line := strings.TrimSpace(sc.Text())
if line == "" {
continue
}
k, v, ok := strings.Cut(line, "=")
if !ok {
continue
}
switch k {
case "out_time_us":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
lastOutSec = float64(n) / 1_000_000.0
send(lastOutSec, lastTotalSz, false)
}
case "out_time_ms":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
// out_time_ms ist i.d.R. Millisekunden
lastOutSec = float64(n) / 1_000.0
send(lastOutSec, lastTotalSz, false)
}
case "out_time":
if s := parseFFmpegOutTime(v); s > 0 {
lastOutSec = s
send(lastOutSec, lastTotalSz, false)
}
case "total_size":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
lastTotalSz = n
send(lastOutSec, lastTotalSz, false)
}
case "progress":
if strings.TrimSpace(v) == "end" {
send(lastOutSec, lastTotalSz, true)
}
}
}
if err := cmd.Wait(); err != nil {
return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
return nil
}
// --- MP4 Streaming Optimierung (Fast Start) ---
// "Fast Start" bedeutet: moov vor mdat (Browser kann sofort Metadaten lesen)
func isFastStartMP4(path string) (bool, error) {
f, err := os.Open(path)
if err != nil {
return false, err
}
defer f.Close()
for i := 0; i < 256; i++ {
var hdr [8]byte
if _, err := io.ReadFull(f, hdr[:]); err != nil {
// unklar/kurz -> nicht anfassen
return true, nil
}
sz32 := binary.BigEndian.Uint32(hdr[0:4])
typ := string(hdr[4:8])
var boxSize int64
headerSize := int64(8)
if sz32 == 0 {
return true, nil
}
if sz32 == 1 {
var ext [8]byte
if _, err := io.ReadFull(f, ext[:]); err != nil {
return true, nil
}
boxSize = int64(binary.BigEndian.Uint64(ext[:]))
headerSize = 16
} else {
boxSize = int64(sz32)
}
if boxSize < headerSize {
return true, nil
}
switch typ {
case "moov":
return true, nil
case "mdat":
return false, nil
}
if _, err := f.Seek(boxSize-headerSize, io.SeekCurrent); err != nil {
return true, nil
}
}
return true, nil
}
func ensureFastStartMP4(path string) error {
path = strings.TrimSpace(path)
if path == "" || !strings.EqualFold(filepath.Ext(path), ".mp4") {
return nil
}
if strings.TrimSpace(ffmpegPath) == "" {
return nil
}
ok, err := isFastStartMP4(path)
if err == nil && ok {
return nil
}
dir := filepath.Dir(path)
base := filepath.Base(path)
tmp := filepath.Join(dir, ".__faststart__"+base+".tmp")
bak := filepath.Join(dir, ".__faststart__"+base+".bak")
_ = os.Remove(tmp)
_ = os.Remove(bak)
cmd := exec.Command(ffmpegPath,
"-y",
"-i", path,
"-c", "copy",
"-movflags", "+faststart",
tmp,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("ffmpeg faststart failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
// atomar austauschen
if err := os.Rename(path, bak); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("rename original to bak failed: %w", err)
}
if err := os.Rename(tmp, path); err != nil {
_ = os.Rename(bak, path)
_ = os.Remove(tmp)
return fmt.Errorf("rename tmp to original failed: %w", err)
}
_ = os.Remove(bak)
return nil
}
func extractLastFrameJPEG(path string) ([]byte, error) {
cmd := exec.Command(
ffmpegPath,
"-hide_banner",
"-loglevel", "error",
"-sseof", "-0.1",
"-i", path,
"-frames:v", "1",
"-vf", "scale=720:-2",
"-q:v", "10",
"-f", "image2pipe",
"-vcodec", "mjpeg",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg last-frame: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
return out.Bytes(), nil
}
func extractFrameAtTimeJPEG(path string, seconds float64) ([]byte, error) {
if seconds < 0 {
seconds = 0
}
seek := fmt.Sprintf("%.3f", seconds)
cmd := exec.Command(
ffmpegPath,
"-hide_banner",
"-loglevel", "error",
"-ss", seek,
"-i", path,
"-frames:v", "1",
"-vf", "scale=720:-2",
"-q:v", "10",
"-f", "image2pipe",
"-vcodec", "mjpeg",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg frame-at-time: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
return out.Bytes(), nil
}
func extractLastFrameJPEGScaled(path string, width int, q int) ([]byte, error) {
if width <= 0 {
width = 320
}
if q <= 0 {
q = 14
}
// ffmpeg: letztes Frame, low-res
cmd := exec.Command(
ffmpegPath,
"-hide_banner", "-loglevel", "error",
"-sseof", "-0.25",
"-i", path,
"-frames:v", "1",
"-vf", fmt.Sprintf("scale=%d:-2", width),
"-q:v", strconv.Itoa(q),
"-f", "image2pipe",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg last-frame scaled: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
b := out.Bytes()
if len(b) == 0 {
return nil, fmt.Errorf("ffmpeg last-frame scaled: empty output")
}
return b, nil
}
func extractFirstFrameJPEGScaled(path string, width int, q int) ([]byte, error) {
if width <= 0 {
width = 320
}
if q <= 0 {
q = 14
}
cmd := exec.Command(
ffmpegPath,
"-hide_banner", "-loglevel", "error",
"-ss", "0",
"-i", path,
"-frames:v", "1",
"-vf", fmt.Sprintf("scale=%d:-2", width),
"-q:v", strconv.Itoa(q),
"-f", "image2pipe",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg first-frame scaled: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
b := out.Bytes()
if len(b) == 0 {
return nil, fmt.Errorf("ffmpeg first-frame scaled: empty output")
}
return b, nil
}
func extractLastFrameFromPreviewDirThumb(previewDir string) ([]byte, error) {
seg, err := latestPreviewSegment(previewDir)
if err != nil {
return nil, err
}
// low-res, notfalls fallback auf erstes Frame
img, err := extractLastFrameJPEGScaled(seg, 320, 14)
if err == nil && len(img) > 0 {
return img, nil
}
return extractFirstFrameJPEGScaled(seg, 320, 14)
}
// sucht das "neueste" Preview-Segment (seg_low_XXXXX.ts / seg_hq_XXXXX.ts)
func latestPreviewSegment(previewDir string) (string, error) {
entries, err := os.ReadDir(previewDir)
if err != nil {
return "", err
}
var best string
for _, e := range entries {
if e.IsDir() {
continue
}
name := e.Name()
if !strings.HasPrefix(name, "seg_low_") && !strings.HasPrefix(name, "seg_hq_") {
continue
}
if best == "" || name > best {
best = name
}
}
if best == "" {
return "", fmt.Errorf("kein Preview-Segment in %s", previewDir)
}
return filepath.Join(previewDir, best), nil
}
// erzeugt ein JPEG aus dem letzten Preview-Segment
func extractLastFrameFromPreviewDir(previewDir string) ([]byte, error) {
seg, err := latestPreviewSegment(previewDir)
if err != nil {
return nil, err
}
// Segment ist klein und "fertig" hier reicht ein Last-Frame-Versuch,
// mit Fallback auf First-Frame.
img, err := extractLastFrameJPEG(seg)
if err != nil {
return extractFirstFrameJPEG(seg)
}
return img, nil
}
func stripHotPrefix(s string) string {
s = strings.TrimSpace(s)
// akzeptiere "HOT " auch case-insensitive
if len(s) >= 4 && strings.EqualFold(s[:4], "HOT ") {
return strings.TrimSpace(s[4:])
}
return s
}
// --------------------------
// Covers: generated/covers/<category>.<ext>
// --------------------------
type coverInfo struct {
Category string `json:"category"`
Model string `json:"model,omitempty"`
Src string `json:"src,omitempty"`
GeneratedAt string `json:"generatedAt"`
}
func normalizeCoverSrc(s string) string {
s = strings.TrimSpace(s)
if s == "" {
return ""
}
// Windows -> URL-artig
s2 := strings.ReplaceAll(s, "\\", "/")
// Wenn es schon wie ein Web-Pfad aussieht, so lassen
if strings.HasPrefix(s2, "/generated/") || strings.HasPrefix(s2, "http://") || strings.HasPrefix(s2, "https://") {
return s2
}
// Wenn es ein lokaler Pfad ist, versuche den /generated/ Teil zu extrahieren
// z.B. ".../generated/meta/<id>/thumbs.jpg" -> "/generated/meta/<id>/thumbs.jpg"
if i := strings.Index(s2, "/generated/"); i >= 0 {
return s2[i:]
}
return s2
}
func coverInfoPathForKey(key string) (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
return filepath.Join(root, key+".info.json"), nil
}
func writeCoverInfoBestEffort(key string, info coverInfo) {
p, err := coverInfoPathForKey(key)
if err != nil {
return
}
b, err := json.MarshalIndent(info, "", " ")
if err != nil {
return
}
_ = os.MkdirAll(filepath.Dir(p), 0o755)
_ = os.WriteFile(p, b, 0o644)
}
func readCoverInfoBestEffort(key string) (coverInfo, bool) {
p, err := coverInfoPathForKey(key)
if err != nil {
return coverInfo{}, false
}
b, err := os.ReadFile(p)
if err != nil || len(b) == 0 {
return coverInfo{}, false
}
var ci coverInfo
if json.Unmarshal(b, &ci) != nil {
return coverInfo{}, false
}
return ci, true
}
func drawLabel(img draw.Image, text string) {
text = strings.TrimSpace(text)
if text == "" {
return
}
face := basicfont.Face7x13
// Layout
const margin = 10
const padX = 10
const padY = 8
b := img.Bounds()
// Max. verfügbare Breite für Text (ohne Padding/Margins)
maxTextW := (b.Dx() - 2*margin) - 2*padX
if maxTextW <= 0 {
return
}
// Text ggf. kürzen, damit er ins Badge passt
measure := func(s string) int {
d := &font.Drawer{Face: face}
return d.MeasureString(s).Ceil()
}
label := text
if w := measure(label); w > maxTextW {
ellipsis := "…"
rs := []rune(text)
// harte Schranke gegen Extremfälle
if len(rs) == 0 {
return
}
lo, hi := 0, len(rs)
best := ""
for lo <= hi {
mid := (lo + hi) / 2
cand := string(rs[:mid]) + ellipsis
if measure(cand) <= maxTextW {
best = cand
lo = mid + 1
} else {
hi = mid - 1
}
}
if best == "" {
// notfalls nur Ellipsis
label = ellipsis
} else {
label = best
}
}
// Textmetriken
d := &font.Drawer{Face: face}
textW := d.MeasureString(label).Ceil()
textH := face.Metrics().Height.Ceil()
ascent := face.Metrics().Ascent.Ceil()
// Badge-Box (unten links)
x0 := b.Min.X + margin
y1 := b.Max.Y - margin
y0 := y1 - (textH + 2*padY)
x1 := x0 + (textW + 2*padX)
// Clamp nach rechts (falls Bild sehr schmal)
maxX1 := b.Max.X - margin
if x1 > maxX1 {
shift := x1 - maxX1
x0 -= shift
x1 -= shift
if x0 < b.Min.X+margin {
x0 = b.Min.X + margin
x1 = maxX1
}
}
// Clamp nach oben (falls Bild sehr niedrig)
minY0 := b.Min.Y + margin
if y0 < minY0 {
y0 = minY0
y1 = y0 + (textH + 2*padY)
if y1 > b.Max.Y-margin {
// zu wenig Platz insgesamt
return
}
}
rect := image.Rect(x0, y0, x1, y1)
// Background
bg := image.NewUniform(color.RGBA{0, 0, 0, 170})
draw.Draw(img, rect, bg, image.Point{}, draw.Over)
// Optional: dünner Rand für mehr Kontrast
border := image.NewUniform(color.RGBA{255, 255, 255, 35})
// top
draw.Draw(img, image.Rect(rect.Min.X, rect.Min.Y, rect.Max.X, rect.Min.Y+1), border, image.Point{}, draw.Over)
// bottom
draw.Draw(img, image.Rect(rect.Min.X, rect.Max.Y-1, rect.Max.X, rect.Max.Y), border, image.Point{}, draw.Over)
// left
draw.Draw(img, image.Rect(rect.Min.X, rect.Min.Y, rect.Min.X+1, rect.Max.Y), border, image.Point{}, draw.Over)
// right
draw.Draw(img, image.Rect(rect.Max.X-1, rect.Min.Y, rect.Max.X, rect.Max.Y), border, image.Point{}, draw.Over)
// Text baseline
tx := x0 + padX
ty := y0 + padY + ascent
// Mini-Schatten (Lesbarkeit)
shadow := &font.Drawer{
Dst: img,
Src: image.NewUniform(color.RGBA{0, 0, 0, 200}),
Face: face,
Dot: fixed.P(tx+1, ty+1),
}
shadow.DrawString(label)
// Text
fg := &font.Drawer{
Dst: img,
Src: image.NewUniform(color.RGBA{255, 255, 255, 235}),
Face: face,
Dot: fixed.P(tx, ty),
}
fg.DrawString(label)
}
func splitTagsLoose(raw string) []string {
raw = strings.TrimSpace(raw)
if raw == "" {
return nil
}
parts := strings.FieldsFunc(raw, func(r rune) bool {
switch r {
case '\n', ',', ';', '|':
return true
}
return false
})
out := make([]string, 0, len(parts))
seen := map[string]struct{}{}
for _, p := range parts {
t := strings.TrimSpace(p)
if t == "" {
continue
}
low := strings.ToLower(t)
if _, ok := seen[low]; ok {
continue
}
seen[low] = struct{}{}
out = append(out, t)
}
return out
}
func hasTag(tagsRaw string, want string) bool {
want = strings.ToLower(strings.TrimSpace(want))
if want == "" {
return false
}
for _, t := range splitTagsLoose(tagsRaw) {
if strings.ToLower(strings.TrimSpace(t)) == want {
return true
}
}
return false
}
// ✅ Passe diese Struct/Methoden an dein echtes ModelStore-API an.
type coverModel struct {
Key string // z.B. model key/name
Tags string // raw tags (csv/newline/…)
}
func listModelsForCovers() ([]coverModel, error) {
if coverModelStore == nil {
return nil, fmt.Errorf("model store not set")
}
ms := coverModelStore.List() // ✅ existiert bei dir
out := make([]coverModel, 0, len(ms))
for _, m := range ms {
key := strings.TrimSpace(m.ModelKey)
if key == "" {
continue
}
out = append(out, coverModel{
Key: key,
Tags: m.Tags,
})
}
return out, nil
}
func pickRandomThumbForCategory(ctx context.Context, category string) (thumbPath string, err error) {
category = strings.TrimSpace(category)
if category == "" {
return "", fmt.Errorf("category empty")
}
// Optional: früh abbrechen, wenn Request schon tot ist
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
models, err := listModelsForCovers()
if err != nil {
return "", err
}
// 1) Kandidaten-Models nach Tag filtern
cands := make([]coverModel, 0, 64)
for _, m := range models {
key := strings.TrimSpace(m.Key)
if key == "" {
continue
}
if hasTag(m.Tags, category) {
cands = append(cands, coverModel{Key: key, Tags: m.Tags})
}
}
if len(cands) == 0 {
return "", fmt.Errorf("no model with tag")
}
// 2) Kandidaten mischen und nacheinander probieren (robuster als 1 random pick)
rand.Shuffle(len(cands), func(i, j int) { cands[i], cands[j] = cands[j], cands[i] })
// 3) done dirs (einmalig auflösen)
s := getSettings()
doneAbs, derr := resolvePathRelativeToApp(s.DoneDir)
if derr != nil || strings.TrimSpace(doneAbs) == "" {
return "", fmt.Errorf("doneDir resolve failed: %v", derr)
}
type candFile struct {
videoPath string
id string
}
isVideo := func(name string) bool {
low := strings.ToLower(name)
if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") {
return false
}
ext := strings.ToLower(filepath.Ext(name))
return ext == ".mp4" || ext == ".ts"
}
// 4) Für jedes passende Model: Dateien sammeln, random wählen, Thumb prüfen
for _, m := range cands {
// Context check pro Iteration
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
modelKey := strings.TrimSpace(m.Key)
if modelKey == "" {
continue
}
// Kandidaten: done/<model>/ und done/keep/<model>/
dirs := []string{
filepath.Join(doneAbs, modelKey),
filepath.Join(doneAbs, "keep", modelKey),
}
files := make([]candFile, 0, 128)
for _, d := range dirs {
ents, err := os.ReadDir(d)
if err != nil {
continue
}
for _, e := range ents {
if e.IsDir() {
continue
}
name := e.Name()
if !isVideo(name) {
continue
}
full := filepath.Join(d, name)
stem := strings.TrimSuffix(name, filepath.Ext(name))
id := stripHotPrefix(strings.TrimSpace(stem))
if id == "" {
continue
}
files = append(files, candFile{videoPath: full, id: id})
}
}
if len(files) == 0 {
// ✅ dieses Model hat (noch) keine Downloads -> nächstes Model probieren
continue
}
// random file innerhalb des Models
cf := files[rand.Intn(len(files))]
// thumbs sicherstellen (best effort)
_ = ensureAssetsForVideo(cf.videoPath)
tp, terr := generatedThumbFile(cf.id)
if terr != nil {
// nächstes Model probieren
continue
}
if fi, serr := os.Stat(tp); serr == nil && !fi.IsDir() && fi.Size() > 0 {
return tp, nil
}
// ✅ Thumb fehlt -> nächstes Model probieren
}
return "", fmt.Errorf("no downloads/thumbs for category")
}
func coversRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "covers"))
}
func ensureCoversDir() (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
if strings.TrimSpace(root) == "" {
return "", fmt.Errorf("covers root ist leer")
}
if err := os.MkdirAll(root, 0o755); err != nil {
return "", err
}
return root, nil
}
var coverKeyRe = regexp.MustCompile(`[^a-z0-9._-]+`)
func sanitizeCoverKey(category string) (string, error) {
c := strings.ToLower(strings.TrimSpace(category))
if c == "" {
sum := sha1.Sum([]byte(category))
c = "tag_" + hex.EncodeToString(sum[:8]) // 16 hex chars reichen
}
if c == "" {
return "", fmt.Errorf("category fehlt")
}
// Windows & FS safe
c = strings.ReplaceAll(c, " ", "_")
c = coverKeyRe.ReplaceAllString(c, "_")
c = strings.Trim(c, "._-")
if c == "" {
return "", fmt.Errorf("category ungültig")
}
if len(c) > 120 {
c = c[:120]
}
return c, nil
}
func detectImageExt(contentType string, b []byte) (ext string, ct string) {
ct = strings.ToLower(strings.TrimSpace(contentType))
// wenn server CT liefert
switch {
case strings.Contains(ct, "image/jpeg") || strings.Contains(ct, "image/jpg"):
return ".jpg", "image/jpeg"
case strings.Contains(ct, "image/png"):
return ".png", "image/png"
case strings.Contains(ct, "image/webp"):
return ".webp", "image/webp"
case strings.Contains(ct, "image/gif"):
return ".gif", "image/gif"
}
// Magic bytes fallback
if len(b) >= 3 && b[0] == 0xFF && b[1] == 0xD8 && b[2] == 0xFF {
return ".jpg", "image/jpeg"
}
if len(b) >= 8 && bytes.Equal(b[:8], []byte{0x89, 'P', 'N', 'G', 0x0D, 0x0A, 0x1A, 0x0A}) {
return ".png", "image/png"
}
if len(b) >= 12 && string(b[:4]) == "RIFF" && string(b[8:12]) == "WEBP" {
return ".webp", "image/webp"
}
if len(b) >= 6 && (string(b[:6]) == "GIF87a" || string(b[:6]) == "GIF89a") {
return ".gif", "image/gif"
}
// default: jpg
return ".jpg", "image/jpeg"
}
func coverPathForCategory(key string, ext string) (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
if strings.TrimSpace(root) == "" {
return "", fmt.Errorf("covers root ist leer")
}
if ext == "" {
ext = ".jpg"
}
return filepath.Join(root, key+ext), nil
}
func findExistingCoverFile(key string) (string, os.FileInfo, bool) {
root, err := coversRoot()
if err != nil || strings.TrimSpace(root) == "" {
return "", nil, false
}
// probiere gängige Endungen
exts := []string{".jpg", ".png", ".webp", ".gif"}
for _, ext := range exts {
p := filepath.Join(root, key+ext)
if fi, err := os.Stat(p); err == nil && !fi.IsDir() && fi.Size() > 0 {
return p, fi, true
}
}
return "", nil, false
}
func downloadBytes(ctx context.Context, rawURL string, ua string) ([]byte, string, error) {
rawURL = strings.TrimSpace(rawURL)
if rawURL == "" {
return nil, "", fmt.Errorf("src fehlt")
}
// ✅ 1) Lokaler Pfad: nur /generated/... erlauben
if strings.HasPrefix(rawURL, "/") {
// URL-Pfad normalisieren und Traversal verhindern
clean := path.Clean(rawURL) // URL-style cleaning
if !strings.HasPrefix(clean, "/generated/") {
return nil, "", fmt.Errorf("src ungültig")
}
if strings.Contains(clean, "..") {
return nil, "", fmt.Errorf("src ungültig")
}
// "/generated/..." -> "generated/..." (relativ zur App)
rel := strings.TrimPrefix(clean, "/")
abs, err := resolvePathRelativeToApp(rel)
if err != nil || strings.TrimSpace(abs) == "" {
return nil, "", fmt.Errorf("src ungültig")
}
f, err := os.Open(abs)
if err != nil {
return nil, "", fmt.Errorf("download failed: %v", err)
}
defer f.Close()
// max 10MB lesen
b, err := io.ReadAll(io.LimitReader(f, 10*1024*1024))
if err != nil {
return nil, "", fmt.Errorf("download failed: %v", err)
}
if len(b) == 0 {
return nil, "", fmt.Errorf("download empty")
}
// Content-Type grob nach Extension (Magic-bytes macht detectImageExt später sowieso)
ext := strings.ToLower(filepath.Ext(abs))
ct := "application/octet-stream"
switch ext {
case ".jpg", ".jpeg":
ct = "image/jpeg"
case ".png":
ct = "image/png"
case ".webp":
ct = "image/webp"
case ".gif":
ct = "image/gif"
}
return b, ct, nil
}
// ✅ 2) Remote URL: wie bisher nur http/https
u, err := url.Parse(rawURL)
if err != nil || u.Scheme == "" || u.Host == "" {
return nil, "", fmt.Errorf("src ungültig")
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, "", fmt.Errorf("src schema nicht erlaubt")
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil)
if err != nil {
return nil, "", err
}
if strings.TrimSpace(ua) == "" {
ua = "Mozilla/5.0"
}
req.Header.Set("User-Agent", ua)
req.Header.Set("Accept", "image/*,*/*;q=0.8")
client := &http.Client{Timeout: 12 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("download failed: HTTP %d", resp.StatusCode)
}
b, err := io.ReadAll(io.LimitReader(resp.Body, 10*1024*1024)) // max 10MB
if err != nil {
return nil, "", err
}
if len(b) == 0 {
return nil, "", fmt.Errorf("download empty")
}
return b, resp.Header.Get("Content-Type"), nil
}
func generatedRoot() (string, error) {
return resolvePathRelativeToApp("generated")
}
func generatedMetaRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "meta"))
}
// Legacy (falls noch alte Assets liegen):
func generatedThumbsRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "thumbs"))
}
func generatedTeaserRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "teaser"))
}
// irgendwo auf Package-Level (z.B. in derselben Datei über generatedCover)
var coverBatchMu sync.Mutex
var (
coverBatchInflight int
coverBatchStarted time.Time
coverBatchTotal int
coverBatchForced int
coverBatchMiss int
coverBatchErrors int
coverBatchNoThumb int
coverBatchDecodeErr int
)
func coverBatchEnter(force bool) {
coverBatchMu.Lock()
defer coverBatchMu.Unlock()
if coverBatchInflight == 0 {
coverBatchStarted = time.Now()
coverBatchTotal = 0
coverBatchForced = 0
coverBatchMiss = 0
coverBatchErrors = 0
coverBatchNoThumb = 0
coverBatchDecodeErr = 0
log.Printf("[cover] BATCH START")
}
coverBatchInflight++
coverBatchTotal++
if force {
coverBatchForced++
} else {
coverBatchMiss++
}
}
func coverBatchLeave(outcome string, status int) {
coverBatchMu.Lock()
defer coverBatchMu.Unlock()
// Outcome-Stats (grob, aber hilfreich)
if status >= 400 {
coverBatchErrors++
}
switch outcome {
case "no-thumb":
coverBatchNoThumb++
case "decode-failed-no-overlay":
coverBatchDecodeErr++
}
coverBatchInflight--
if coverBatchInflight <= 0 {
dur := time.Since(coverBatchStarted).Round(time.Millisecond)
log.Printf(
"[cover] BATCH END total=%d miss=%d forced=%d errors=%d noThumb=%d decodeFail=%d took=%s",
coverBatchTotal,
coverBatchMiss,
coverBatchForced,
coverBatchErrors,
coverBatchNoThumb,
coverBatchDecodeErr,
dur,
)
coverBatchInflight = 0
}
}
// ----------------------------------------------------------------------
// deine Handler-Funktion (vollständig)
var (
reModelFromStem = regexp.MustCompile(`^(.*?)_\d{1,2}_\d{1,2}_\d{4}__\d{1,2}-\d{2}-\d{2}`)
)
// stem ist z.B. "sigmasian_01_21_2026__07-28-13" oder ein Parent-Dir-Name
func inferModelFromStem(stem string) string {
stem = stripHotPrefix(strings.TrimSpace(stem))
if stem == "" {
return ""
}
m := reModelFromStem.FindStringSubmatch(stem)
if len(m) >= 2 {
return strings.TrimSpace(m[1])
}
return ""
}
// akzeptiert:
// - "/generated/meta/<id>/thumbs.jpg"
// - "C:\...\generated\meta\<id>\thumbs.jpg"
// - "http(s)://host/generated/meta/<id>/thumbs.jpg"
// - (fallback) irgendeinen Dateinamen-Stem, der wie "<model>_MM_DD_YYYY__HH-MM-ss" aussieht
func inferModelFromThumbLike(srcOrPath string) string {
s := strings.TrimSpace(srcOrPath)
if s == "" {
return ""
}
// Windows -> slash
s = strings.ReplaceAll(s, `\`, `/`)
// Wenn URL: nimm nur den Path
if u, err := url.Parse(s); err == nil && u != nil && u.Scheme != "" && u.Host != "" {
s = u.Path
}
// Wenn es wie ".../<id>/thumbs.jpg" aussieht: parent dir ist <id>
base := path.Base(s)
lb := strings.ToLower(base)
if strings.HasPrefix(lb, "thumbs.") {
id := path.Base(path.Dir(s))
return inferModelFromStem(id)
}
// Fallback: versuch Stem aus basename
stem := strings.TrimSuffix(base, path.Ext(base))
return inferModelFromStem(stem)
}
// ----------------------------------------------------------------------
// deine Handler-Funktion (vollständig)
type coverInfoListItem struct {
Category string `json:"category"`
Model string `json:"model,omitempty"`
GeneratedAt string `json:"generatedAt,omitempty"`
HasCover bool `json:"hasCover"`
}
func generatedCoverInfoList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
root, err := coversRoot()
if err != nil {
http.Error(w, "covers root: "+err.Error(), http.StatusInternalServerError)
return
}
entries, err := os.ReadDir(root)
if err != nil {
http.Error(w, "covers dir: "+err.Error(), http.StatusInternalServerError)
return
}
byKey := map[string]*coverInfoListItem{}
ensure := func(key string) *coverInfoListItem {
if v, ok := byKey[key]; ok {
return v
}
v := &coverInfoListItem{Category: key}
byKey[key] = v
return v
}
isCoverExt := func(ext string) bool {
switch strings.ToLower(ext) {
case ".jpg", ".jpeg", ".png", ".webp", ".gif":
return true
default:
return false
}
}
for _, e := range entries {
name := e.Name()
lower := strings.ToLower(name)
// info.json
if strings.HasSuffix(lower, ".info.json") {
key := strings.TrimSuffix(name, ".info.json")
if ci, ok := readCoverInfoBestEffort(key); ok {
v := ensure(key)
if strings.TrimSpace(ci.Category) != "" {
v.Category = strings.TrimSpace(ci.Category)
}
if strings.TrimSpace(ci.Model) != "" {
v.Model = strings.TrimSpace(ci.Model)
}
if strings.TrimSpace(ci.GeneratedAt) != "" {
v.GeneratedAt = strings.TrimSpace(ci.GeneratedAt)
}
}
continue
}
// cover image
ext := filepath.Ext(name)
if isCoverExt(ext) {
key := strings.TrimSuffix(name, ext)
v := ensure(key)
v.HasCover = true
}
}
// ✅ WICHTIG: Model nur ausgeben, wenn wirklich ein Cover-Bild existiert
for _, v := range byKey {
if !v.HasCover {
v.Model = ""
v.GeneratedAt = ""
}
if strings.TrimSpace(v.Category) == "" {
v.Category = ""
}
}
// sortiert ausgeben (stabil)
keys := make([]string, 0, len(byKey))
for k := range byKey {
keys = append(keys, k)
}
sort.Strings(keys)
out := make([]coverInfoListItem, 0, len(keys))
for _, k := range keys {
out = append(out, *byKey[k])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
_ = json.NewEncoder(w).Encode(out)
}
func generatedCover(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
category := r.URL.Query().Get("category")
key, err := sanitizeCoverKey(category)
if err != nil {
http.Error(w, "category ungültig: "+err.Error(), http.StatusBadRequest)
return
}
refresh := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("refresh")))
force := refresh == "1" || refresh == "true" || refresh == "yes"
// Optional: model overlay
modelQ := strings.TrimSpace(r.URL.Query().Get("model"))
modelExplicit := modelQ != ""
model := modelQ
// Optional: src (Frontend kann ein konkretes Thumb vorgeben)
src := strings.TrimSpace(r.URL.Query().Get("src"))
fallbackModel := ""
if ci, ok := readCoverInfoBestEffort(key); ok {
if m := strings.TrimSpace(ci.Model); m != "" {
fallbackModel = m
}
}
// Wenn kein explizites model im Request: erstmal fallback übernehmen
if model == "" {
model = fallbackModel
}
// Wenn src gesetzt ist und model NICHT explizit: schon hier aus src ableiten (ok)
if !modelExplicit && src != "" {
if m := inferModelFromThumbLike(src); m != "" {
model = m
}
}
// kleine Request-ID (nur als Header/Debug; kein per-request log)
reqID := strconv.FormatInt(time.Now().UnixNano(), 36)
setDebugHeaders := func(cache string) {
w.Header().Set("X-Cover-Key", key)
w.Header().Set("X-Cover-Category", category)
if model != "" {
w.Header().Set("X-Cover-Model", model)
}
w.Header().Set("X-Cover-Cache", cache) // HIT | MISS | FORCED
w.Header().Set("X-Request-Id", reqID)
}
// 1) Cache hit: direkt von Disk (nur wenn nicht force)
if !force {
// Wenn model im Request/abgeleitet da ist: info.json muss existieren & gleich sein, sonst neu erzeugen
if model != "" {
if ci, ok := readCoverInfoBestEffort(key); ok {
if strings.TrimSpace(ci.Model) != model { // auch wenn ci.Model leer ist
force = true
}
} else {
// kein info.json -> force, damit wir model persistent bekommen
force = true
}
}
if !force {
if p, fi, ok := findExistingCoverFile(key); ok {
setDebugHeaders("HIT")
// ✅ Cache-Hit: Cover existiert wirklich -> info.json best-effort updaten,
// aber NUR wenn wir ein Model kennen (damit "kein Cover" auch kein Model zeigt)
if model != "" {
ci, ok := readCoverInfoBestEffort(key)
if !ok {
ci = coverInfo{Category: category}
}
ci.Category = category
ci.Model = strings.TrimSpace(model)
// Src NICHT überschreiben, wenn wir es bei HIT nicht kennen
// ci.Src bleibt wie gehabt
ci.GeneratedAt = time.Now().UTC().Format(time.RFC3339Nano)
writeCoverInfoBestEffort(key, ci)
}
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("X-Content-Type-Options", "nosniff")
ext := strings.ToLower(filepath.Ext(p))
switch ext {
case ".png":
w.Header().Set("Content-Type", "image/png")
case ".webp":
w.Header().Set("Content-Type", "image/webp")
case ".gif":
w.Header().Set("Content-Type", "image/gif")
default:
w.Header().Set("Content-Type", "image/jpeg")
}
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
f, err := os.Open(p)
if err != nil {
http.NotFound(w, r)
return
}
defer f.Close()
http.ServeContent(w, r, filepath.Base(p), fi.ModTime(), f)
return
}
}
}
// Ab hier: entweder MISS oder FORCED => wir erzeugen (oder versuchen es)
cacheStatus := "MISS"
if force {
cacheStatus = "FORCED"
}
setDebugHeaders(cacheStatus)
// ✅ Globales Batch-Logging: nur beim echten Generate-Path
coverBatchEnter(force)
start := time.Now()
status := http.StatusOK
outcome := "ok"
defer func() {
w.Header().Set("X-Cover-Gen-Ms", strconv.FormatInt(time.Since(start).Milliseconds(), 10))
coverBatchLeave(outcome, status)
}()
// 2) Kein Cache (oder force): Cover erzeugen und persistieren
if _, err := ensureCoversDir(); err != nil {
status = http.StatusInternalServerError
outcome = "covers-dir"
http.Error(w, "covers-dir nicht verfügbar: "+err.Error(), status)
return
}
ctx, cancel := context.WithTimeout(r.Context(), 20*time.Second)
defer cancel()
var (
raw []byte
mimeType string
ext string
)
thumbPath := "" // nur fürs Ableiten von model, wenn src leer ist
usedSrc := "" // ✅ das speichern wir später in coverInfo
// 2a) Wenn src gegeben: daraus bauen (lokal /generated/... oder http(s))
if src != "" {
var derr error
raw, mimeType, derr = downloadBytes(ctx, src, r.Header.Get("User-Agent"))
usedSrc = normalizeCoverSrc(src)
if derr != nil {
status = http.StatusBadRequest
outcome = "src-download"
http.Error(w, "src download failed: "+derr.Error(), status)
return
}
ext, mimeType = detectImageExt(mimeType, raw)
if len(raw) == 0 {
status = http.StatusBadRequest
outcome = "src-empty"
http.Error(w, "src leer", status)
return
}
// ✅ falls model immer noch leer ist, versuch JETZT aus src abzuleiten
if model == "" {
if m := inferModelFromThumbLike(src); m != "" {
model = m
// Header aktualisieren (weil model jetzt da ist)
w.Header().Set("X-Cover-Model", model)
}
}
} else {
// 2b) Sonst: Backend wählt random Thumb passend zur Category
var perr error
thumbPath, perr = pickRandomThumbForCategory(ctx, category)
if perr != nil {
// Wenn es schon ein Cover gibt, lieber das liefern (auch wenn force)
if p, fi, ok := findExistingCoverFile(key); ok {
outcome = "fallback-existing-cover"
status = http.StatusOK
w.Header().Set("Cache-Control", "public, max-age=600")
w.Header().Set("X-Content-Type-Options", "nosniff")
ext2 := strings.ToLower(filepath.Ext(p))
switch ext2 {
case ".png":
w.Header().Set("Content-Type", "image/png")
case ".webp":
w.Header().Set("Content-Type", "image/webp")
case ".gif":
w.Header().Set("Content-Type", "image/gif")
default:
w.Header().Set("Content-Type", "image/jpeg")
}
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
f, err := os.Open(p)
if err != nil {
servePreviewStatusSVG(w, "No Cover", status)
return
}
defer f.Close()
http.ServeContent(w, r, filepath.Base(p), fi.ModTime(), f)
return
}
outcome = "no-thumb"
status = http.StatusNotFound
if r.Method == http.MethodHead {
w.WriteHeader(status)
return
}
servePreviewStatusSVG(w, "No Cover", status)
return
}
usedSrc = normalizeCoverSrc(thumbPath)
raw, err = os.ReadFile(thumbPath)
if err != nil || len(raw) == 0 {
status = http.StatusInternalServerError
outcome = "thumb-read"
http.Error(w, "cover read fehlgeschlagen", status)
return
}
// thumbs bei dir sind JPEG
ext = ".jpg"
mimeType = "image/jpeg"
// ✅ falls model leer ist: aus thumbPath ableiten
// thumbPath ist i.d.R. ".../generated/meta/<id>/thumbs.jpg"
if model == "" {
if m := inferModelFromThumbLike(thumbPath); m != "" {
model = m
w.Header().Set("X-Cover-Model", model)
}
}
}
// ✅ Final: wenn model nicht explizit gesetzt wurde, binde es an usedSrc
if !modelExplicit {
if m := inferModelFromThumbLike(usedSrc); m != "" {
model = m
w.Header().Set("X-Cover-Model", model)
}
}
// ✅ 3) Overlay + Re-Encode
img, _, derr := image.Decode(bytes.NewReader(raw))
if derr == nil && img != nil {
rgba := image.NewRGBA(img.Bounds())
draw.Draw(rgba, rgba.Bounds(), img, img.Bounds().Min, draw.Src)
var buf bytes.Buffer
switch strings.ToLower(ext) {
case ".png":
_ = png.Encode(&buf, rgba)
raw = buf.Bytes()
ext = ".png"
mimeType = "image/png"
default:
_ = jpeg.Encode(&buf, rgba, &jpeg.Options{Quality: 85})
raw = buf.Bytes()
ext = ".jpg"
mimeType = "image/jpeg"
}
} else {
outcome = "decode-failed-no-overlay"
}
// 4) Vorherige Cover-Dateien entfernen
root, _ := coversRoot()
for _, e := range []string{".jpg", ".png", ".webp", ".gif"} {
_ = os.Remove(filepath.Join(root, key+e))
}
_ = os.Remove(filepath.Join(root, key+".info.json"))
// 5) Persistieren
dst, err := coverPathForCategory(key, ext)
if err != nil {
status = http.StatusInternalServerError
outcome = "cover-path"
http.Error(w, "cover path: "+err.Error(), status)
return
}
if err := atomicWriteFile(dst, raw); err != nil {
status = http.StatusInternalServerError
outcome = "cover-write"
http.Error(w, "cover write: "+err.Error(), status)
return
}
// ✅ 6) info.json schreiben (best-effort)
// model ist jetzt: Query-Param ODER aus info.json ODER aus src/thumbPath abgeleitet
writeCoverInfoBestEffort(key, coverInfo{
Category: category,
Model: strings.TrimSpace(model),
Src: strings.TrimSpace(usedSrc),
GeneratedAt: time.Now().UTC().Format(time.RFC3339Nano),
})
// 7) Ausliefern
w.Header().Set("Cache-Control", "public, max-age=600")
w.Header().Set("Content-Type", mimeType)
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Cover-Bytes", strconv.Itoa(len(raw)))
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write(raw)
}
// --------------------------
// generated/meta/<id>/meta.json
// --------------------------
type videoMeta struct {
Version int `json:"version"`
DurationSeconds float64 `json:"durationSeconds"`
FileSize int64 `json:"fileSize"`
FileModUnix int64 `json:"fileModUnix"`
VideoWidth int `json:"videoWidth,omitempty"`
VideoHeight int `json:"videoHeight,omitempty"`
FPS float64 `json:"fps,omitempty"`
SourceURL string `json:"sourceUrl,omitempty"`
UpdatedAtUnix int64 `json:"updatedAtUnix"`
}
// liest Meta (v2 ODER altes v1) und validiert gegen fi (Size/ModTime)
func readVideoMeta(metaPath string, fi os.FileInfo) (dur float64, w int, h int, fps float64, ok bool) {
b, err := os.ReadFile(metaPath)
if err != nil || len(b) == 0 {
return 0, 0, 0, 0, false
}
// 1) Neues Format (oder v1 mit gleichen Feldern)
var m videoMeta
if err := json.Unmarshal(b, &m); err == nil && (m.Version == 2 || m.Version == 1) {
if m.FileSize != fi.Size() || m.FileModUnix != fi.ModTime().Unix() {
return 0, 0, 0, 0, false
}
if m.DurationSeconds <= 0 {
return 0, 0, 0, 0, false
}
return m.DurationSeconds, m.VideoWidth, m.VideoHeight, m.FPS, true
}
// 2) Fallback: ganz altes v1-Format (nur Duration etc.)
var m1 struct {
Version int `json:"version"`
DurationSeconds float64 `json:"durationSeconds"`
FileSize int64 `json:"fileSize"`
FileModUnix int64 `json:"fileModUnix"`
UpdatedAtUnix int64 `json:"updatedAtUnix"`
}
if err := json.Unmarshal(b, &m1); err != nil {
return 0, 0, 0, 0, false
}
if m1.Version != 1 {
return 0, 0, 0, 0, false
}
if m1.FileSize != fi.Size() || m1.FileModUnix != fi.ModTime().Unix() {
return 0, 0, 0, 0, false
}
if m1.DurationSeconds <= 0 {
return 0, 0, 0, 0, false
}
return m1.DurationSeconds, 0, 0, 0, true
}
func readVideoMetaDuration(metaPath string, fi os.FileInfo) (float64, bool) {
d, _, _, _, ok := readVideoMeta(metaPath, fi)
return d, ok
}
func readVideoMetaSourceURL(metaPath string, fi os.FileInfo) (string, bool) {
b, err := os.ReadFile(metaPath)
if err != nil || len(b) == 0 {
return "", false
}
var m videoMeta
if err := json.Unmarshal(b, &m); err == nil && (m.Version == 2 || m.Version == 1) {
if m.FileSize != fi.Size() || m.FileModUnix != fi.ModTime().Unix() {
return "", false
}
u := strings.TrimSpace(m.SourceURL)
if u == "" {
return "", false
}
return u, true
}
// altes v1 ohne SourceURL -> keine URL
return "", false
}
// Voll-Write (wenn du dur + props schon hast)
func writeVideoMeta(metaPath string, fi os.FileInfo, dur float64, w int, h int, fps float64, sourceURL string) error {
if strings.TrimSpace(metaPath) == "" || dur <= 0 {
return nil
}
m := videoMeta{
Version: 2, // du kannst 2 lassen; nur "v2"-Name ist weg
DurationSeconds: dur,
FileSize: fi.Size(),
FileModUnix: fi.ModTime().Unix(),
VideoWidth: w,
VideoHeight: h,
FPS: fps,
SourceURL: strings.TrimSpace(sourceURL),
UpdatedAtUnix: time.Now().Unix(),
}
buf, err := json.Marshal(m)
if err != nil {
return err
}
buf = append(buf, '\n')
return atomicWriteFile(metaPath, buf)
}
// Duration-only Write (ohne props)
func writeVideoMetaDuration(metaPath string, fi os.FileInfo, dur float64, sourceURL string) error {
return writeVideoMeta(metaPath, fi, dur, 0, 0, 0, sourceURL)
}
func generatedMetaFile(id string) (string, error) {
dir, err := generatedDirForID(id) // erzeugt KEIN Verzeichnis
if err != nil {
return "", err
}
return filepath.Join(dir, "meta.json"), nil
}
// ✅ Neu: /generated/meta/<id>/...
func generatedDirForID(id string) (string, error) {
id, err := sanitizeID(id)
if err != nil {
return "", err
}
root, err := generatedMetaRoot()
if err != nil {
return "", err
}
if strings.TrimSpace(root) == "" {
return "", fmt.Errorf("generated meta root ist leer")
}
return filepath.Join(root, id), nil
}
func ensureGeneratedDir(id string) (string, error) {
dir, err := generatedDirForID(id)
if err != nil {
return "", err
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return "", err
}
return dir, nil
}
func generatedThumbFile(id string) (string, error) {
dir, err := generatedDirForID(id)
if err != nil {
return "", err
}
return filepath.Join(dir, "thumbs.jpg"), nil
}
func generatedPreviewFile(id string) (string, error) {
dir, err := generatedDirForID(id)
if err != nil {
return "", err
}
return filepath.Join(dir, "preview.mp4"), nil
}
func ensureGeneratedDirs() error {
root, err := generatedMetaRoot()
if err != nil {
return err
}
if strings.TrimSpace(root) == "" {
return fmt.Errorf("generated meta root ist leer")
}
return os.MkdirAll(root, 0o755)
}
func sanitizeID(id string) (string, error) {
id = strings.TrimSpace(id)
if id == "" {
return "", fmt.Errorf("id fehlt")
}
if strings.ContainsAny(id, `/\`) {
return "", fmt.Errorf("ungültige id")
}
return id, nil
}
func sanitizeModelKey(k string) string {
k = stripHotPrefix(strings.TrimSpace(k))
if k == "" || k == "—" || strings.ContainsAny(k, `/\`) {
return ""
}
return k
}
func modelKeyFromFilenameOrPath(file string, srcPath string, doneAbs string) string {
// 1) bevorzugt aus Dateiname (OHNE Extension!)
stem := strings.TrimSuffix(filepath.Base(strings.TrimSpace(file)), filepath.Ext(file))
k := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem)))
if k != "" {
return k
}
// 2) Fallback: aus Quellpfad ableiten, wenn Datei in done/<model>/... lag
if strings.TrimSpace(srcPath) != "" && strings.TrimSpace(doneAbs) != "" {
srcDir := filepath.Clean(filepath.Dir(srcPath))
doneAbs = filepath.Clean(doneAbs)
// srcDir != doneAbs => wir sind in einem Unterordner, dessen Name i.d.R. das Model ist
if !strings.EqualFold(srcDir, doneAbs) {
k2 := sanitizeModelKey(filepath.Base(srcDir))
if k2 != "" && !strings.EqualFold(k2, "keep") {
return k2
}
}
}
return ""
}
func uniqueDestPath(dstDir, file string) (string, error) {
dst := filepath.Join(dstDir, file)
if _, err := os.Stat(dst); err == nil {
ext := filepath.Ext(file)
base := strings.TrimSuffix(file, ext)
for i := 2; i <= 200; i++ {
alt := fmt.Sprintf("%s__dup%d%s", base, i, ext)
cand := filepath.Join(dstDir, alt)
if _, err := os.Stat(cand); os.IsNotExist(err) {
return cand, nil
}
}
return "", fmt.Errorf("too many duplicates for %s", file)
} else if err != nil && !os.IsNotExist(err) {
return "", err
}
return dst, nil
}
func idFromVideoPath(videoPath string) string {
name := filepath.Base(strings.TrimSpace(videoPath))
return strings.TrimSuffix(name, filepath.Ext(name))
}
func assetIDForJob(job *RecordJob) string {
if job == nil {
return ""
}
// Prefer: Dateiname ohne Endung (und ohne HOT Prefix)
out := strings.TrimSpace(job.Output)
if out != "" {
id := stripHotPrefix(idFromVideoPath(out))
if strings.TrimSpace(id) != "" {
return id
}
}
// Fallback: JobID (sollte praktisch nie nötig sein)
return strings.TrimSpace(job.ID)
}
func atomicWriteFile(dst string, data []byte) error {
dir := filepath.Dir(dst)
if err := os.MkdirAll(dir, 0o755); err != nil {
return err
}
tmp, err := os.CreateTemp(dir, ".tmp-*")
if err != nil {
return err
}
tmpName := tmp.Name()
_ = tmp.Chmod(0o644)
_, werr := tmp.Write(data)
cerr := tmp.Close()
if werr != nil {
_ = os.Remove(tmpName)
return werr
}
if cerr != nil {
_ = os.Remove(tmpName)
return cerr
}
return os.Rename(tmpName, dst)
}
// Beim Start: loose Dateien in /done/keep (Root) in /done/keep/<model>/ einsortieren.
// Best-effort: wenn Model nicht ableitbar oder Ziel kollidiert, wird geskippt bzw. umbenannt.
func fixKeepRootFilesIntoModelSubdirs() {
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil || strings.TrimSpace(doneAbs) == "" {
return
}
keepRoot := filepath.Join(doneAbs, "keep")
ents, err := os.ReadDir(keepRoot)
if err != nil {
// keep existiert evtl. noch nicht -> nichts zu tun
if os.IsNotExist(err) {
return
}
fmt.Println("⚠️ keep scan failed:", err)
return
}
moved := 0
skipped := 0
isVideo := func(name string) bool {
low := strings.ToLower(name)
if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") {
return false
}
ext := strings.ToLower(filepath.Ext(name))
return ext == ".mp4" || ext == ".ts"
}
for _, e := range ents {
if e.IsDir() {
continue
}
name := e.Name()
if !isVideo(name) {
continue
}
// Quelle: /done/keep/<file>
src := filepath.Join(keepRoot, name)
// Model aus Dateiname ableiten (wie im keep-handler)
stem := strings.TrimSuffix(name, filepath.Ext(name)) // ✅ ohne .mp4/.ts
modelKey := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem)))
// wenn nicht ableitbar -> skip
if modelKey == "" || modelKey == "—" || strings.ContainsAny(modelKey, `/\`) {
skipped++
continue
}
dstDir := filepath.Join(keepRoot, modelKey)
if err := os.MkdirAll(dstDir, 0o755); err != nil {
fmt.Println("⚠️ keep mkdir failed:", err)
skipped++
continue
}
dst := filepath.Join(dstDir, name)
// Wenn Ziel schon existiert -> unique Name finden
if _, err := os.Stat(dst); err == nil {
ext := filepath.Ext(name)
base := strings.TrimSuffix(name, ext)
found := false
for i := 2; i <= 200; i++ {
alt := fmt.Sprintf("%s__dup%d%s", base, i, ext)
cand := filepath.Join(dstDir, alt)
if _, err := os.Stat(cand); os.IsNotExist(err) {
dst = cand
found = true
break
}
}
if !found {
fmt.Println("⚠️ keep fix: too many duplicates, skip:", name)
skipped++
continue
}
} else if err != nil && !os.IsNotExist(err) {
fmt.Println("⚠️ keep stat dst failed:", err)
skipped++
continue
}
// Verschieben (Windows-lock robust)
if err := renameWithRetry(src, dst); err != nil {
fmt.Println("⚠️ keep fix rename failed:", err)
skipped++
continue
}
moved++
}
if moved > 0 || skipped > 0 {
fmt.Printf("🧹 keep fix: moved=%d skipped=%d (root=%s)\n", moved, skipped, keepRoot)
}
}
func findFinishedFileByID(id string) (string, error) {
s := getSettings()
recordAbs, _ := resolvePathRelativeToApp(s.RecordDir)
doneAbs, _ := resolvePathRelativeToApp(s.DoneDir)
base := stripHotPrefix(strings.TrimSpace(id))
if base == "" {
return "", fmt.Errorf("not found")
}
names := []string{
base + ".mp4",
"HOT " + base + ".mp4",
base + ".ts",
"HOT " + base + ".ts",
}
// done (root + /done/<subdir>/) + keep (root + /done/keep/<subdir>/)
for _, name := range names {
if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok {
return p, nil
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok {
return p, nil
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok {
return p, nil
}
}
return "", fmt.Errorf("not found")
}
func servePreviewThumbAlias(w http.ResponseWriter, r *http.Request, id, file string) {
// 1) Wenn Job bekannt (id = job.ID): assetID aus Output ableiten
jobsMu.Lock()
job := jobs[id]
jobsMu.Unlock()
if job != nil {
assetID := assetIDForJob(job)
if assetID != "" {
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
// running => no-store, finished => cache ok (du willst live eher no-store)
if job.Status == JobRunning {
serveLivePreviewJPEGFile(w, r, thumbPath)
} else {
servePreviewJPEGFile(w, r, thumbPath)
}
return
}
}
}
// Optional: wenn du bei running noch in-memory fallback willst:
if job.Status == JobRunning {
job.previewMu.Lock()
cached := job.previewJpeg
job.previewMu.Unlock()
if len(cached) > 0 {
serveLivePreviewJPEGBytes(w, cached)
return
}
}
// Placeholder statt hartem 404
servePreviewStatusSVG(w, "Preview", http.StatusOK)
return
}
// 2) Kein Job im RAM: id als assetID behandeln (finished files nach Neustart)
// "preview.jpg" als Alias auf thumbs.jpg
assetID := stripHotPrefix(strings.TrimSpace(id))
if assetID == "" {
http.NotFound(w, r)
return
}
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
servePreviewJPEGFile(w, r, thumbPath)
return
}
}
http.NotFound(w, r)
}
func isHover(r *http.Request) bool {
v := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("hover")))
return v == "1" || v == "true" || v == "yes"
}
func touchPreview(job *RecordJob) {
if job == nil {
return
}
jobsMu.Lock()
job.previewLastHit = time.Now()
jobsMu.Unlock()
}
func ensurePreviewStarted(r *http.Request, job *RecordJob) {
if job == nil {
return
}
job.previewStartMu.Lock()
defer job.previewStartMu.Unlock()
jobsMu.Lock()
// läuft schon?
if job.previewCmd != nil && job.PreviewDir != "" {
job.previewLastHit = time.Now()
jobsMu.Unlock()
return
}
// brauchen M3U8 URL
m3u8 := strings.TrimSpace(job.PreviewM3U8)
cookie := strings.TrimSpace(job.PreviewCookie)
ua := strings.TrimSpace(job.PreviewUA)
jobsMu.Unlock()
if m3u8 == "" {
return
}
// eigener Context für Preview (WICHTIG: nicht der Recording ctx)
pctx, cancel := context.WithCancel(context.Background())
// PreviewDir temp
assetID := assetIDForJob(job)
pdir := filepath.Join(os.TempDir(), "rec_preview", assetID)
jobsMu.Lock()
job.PreviewDir = pdir
job.previewCancel = cancel
job.previewLastHit = time.Now()
jobsMu.Unlock()
_ = startPreviewHLS(pctx, job, m3u8, pdir, cookie, ua)
}
func recordPreview(w http.ResponseWriter, r *http.Request) {
// optional aber sinnvoll: nur GET/HEAD erlauben
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
// ✅ Alias: Frontend schickt "name"
id = strings.TrimSpace(r.URL.Query().Get("name"))
}
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
// ✅ NEW: JPEG requests abfangen
if file := strings.TrimSpace(r.URL.Query().Get("file")); file != "" {
if file == "thumbs.jpg" || file == "preview.jpg" {
servePreviewThumbAlias(w, r, id, file)
return
}
// HLS wie gehabt
servePreviewHLSFile(w, r, id, file)
return
}
// Schauen, ob wir einen Job mit dieser ID kennen (laufend oder gerade fertig)
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if ok {
// ✅ 0) Running: wenn generated/<jobId>/thumbs.jpg existiert -> sofort ausliefern
// (kein ffmpeg pro HTTP-Request)
if job.Status == "running" {
assetID := assetIDForJob(job)
if assetID != "" {
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
serveLivePreviewJPEGFile(w, r, thumbPath)
return
}
}
}
}
// ✅ Fallback: alter In-Memory-Cache (falls thumbs.jpg noch nicht da ist)
job.previewMu.Lock()
cached := job.previewJpeg
cachedAt := job.previewJpegAt
freshWindow := 8 * time.Second
fresh := len(cached) > 0 && !cachedAt.IsZero() && time.Since(cachedAt) < freshWindow
// Wenn nicht frisch, ggf. im Hintergrund aktualisieren (einmal gleichzeitig)
if !fresh && !job.previewGen {
job.previewGen = true
go func(j *RecordJob, jobID string) {
defer func() {
j.previewMu.Lock()
j.previewGen = false
j.previewMu.Unlock()
}()
var img []byte
var genErr error
// 1) aus Preview-Segmenten
previewDir := strings.TrimSpace(j.PreviewDir)
if previewDir != "" {
img, genErr = extractLastFrameFromPreviewDir(previewDir)
}
// 2) Fallback: aus der Ausgabedatei
if genErr != nil || len(img) == 0 {
outPath := strings.TrimSpace(j.Output)
if outPath != "" {
outPath = filepath.Clean(outPath)
if !filepath.IsAbs(outPath) {
if abs, err := resolvePathRelativeToApp(outPath); err == nil {
outPath = abs
}
}
if fi, err := os.Stat(outPath); err == nil && !fi.IsDir() && fi.Size() > 0 {
img, genErr = extractLastFrameJPEG(outPath)
if genErr != nil {
img, _ = extractFirstFrameJPEG(outPath)
}
}
}
}
if len(img) > 0 {
j.previewMu.Lock()
j.previewJpeg = img
j.previewJpegAt = time.Now()
j.previewMu.Unlock()
}
}(job, id)
}
// Wir liefern entweder ein frisches Bild, oder das zuletzt gecachte.
out := cached
job.previewMu.Unlock()
if len(out) > 0 {
serveLivePreviewJPEGBytes(w, out) // ✅ no-store für laufende Jobs
return
}
// ✅ Wenn Preview definitiv nicht geht -> Placeholder statt 204
jobsMu.Lock()
state := strings.TrimSpace(job.PreviewState)
jobsMu.Unlock()
if state == "private" {
servePreviewStatusSVG(w, "Private", http.StatusOK)
return
}
if state == "offline" {
servePreviewStatusSVG(w, "Offline", http.StatusOK)
return
}
// noch kein Bild verfügbar -> 204 (Frontend zeigt Placeholder und retry)
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusNoContent)
return
}
// 3⃣ Kein Job im RAM → id als Dateistamm für fertige Downloads behandeln
servePreviewForFinishedFile(w, r, id)
}
func serveLivePreviewJPEGFile(w http.ResponseWriter, r *http.Request, path string) {
f, err := os.Open(path)
if err != nil {
http.NotFound(w, r)
return
}
defer f.Close()
st, err := f.Stat()
if err != nil || st.IsDir() || st.Size() == 0 {
http.NotFound(w, r)
return
}
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "no-store")
http.ServeContent(w, r, "thumbs.jpg", st.ModTime(), f)
}
func updateLiveThumbOnce(ctx context.Context, job *RecordJob) {
// Snapshot unter Lock holen
jobsMu.Lock()
status := job.Status
previewDir := job.PreviewDir
out := job.Output
jobsMu.Unlock()
if status != "running" {
return
}
// Zielpfad: generated/<jobId>/thumbs.jpg
assetID := assetIDForJob(job)
thumbPath, err := generatedThumbFile(assetID)
if err != nil {
return
}
// Wenn frisch genug: skip
if st, err := os.Stat(thumbPath); err == nil && st.Size() > 0 {
if time.Since(st.ModTime()) < 10*time.Second {
return
}
}
// Concurrency limit über thumbSem
if thumbSem != nil {
thumbCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
if err := thumbSem.Acquire(thumbCtx); err != nil {
return
}
defer thumbSem.Release()
}
var img []byte
// 1) bevorzugt aus Preview-Segmenten
if previewDir != "" {
if b, err := extractLastFrameFromPreviewDirThumb(previewDir); err == nil && len(b) > 0 {
img = b
}
}
// 2) fallback aus Output-Datei (kann bei partial files manchmal langsamer sein)
if len(img) == 0 && out != "" {
if b, err := extractLastFrameJPEGScaled(out, 320, 14); err == nil && len(b) > 0 {
img = b
}
}
if len(img) == 0 {
return
}
_ = atomicWriteFile(thumbPath, img)
}
func startLiveThumbLoop(ctx context.Context, job *RecordJob) {
// einmalig starten
jobsMu.Lock()
if job.LiveThumbStarted {
jobsMu.Unlock()
return
}
job.LiveThumbStarted = true
jobsMu.Unlock()
go func() {
// sofort einmal versuchen
updateLiveThumbOnce(ctx, job)
for {
// dynamische Frequenz: je mehr aktive Jobs, desto langsamer (weniger Last)
jobsMu.Lock()
nRunning := 0
for _, j := range jobs {
if j != nil && j.Status == "running" {
nRunning++
}
}
jobsMu.Unlock()
delay := 12 * time.Second
if nRunning >= 6 {
delay = 18 * time.Second
}
if nRunning >= 12 {
delay = 25 * time.Second
}
select {
case <-ctx.Done():
return
case <-time.After(delay):
// ✅ Stoppen, sobald Job nicht mehr läuft
jobsMu.Lock()
st := job.Status
jobsMu.Unlock()
if st != "running" {
return
}
updateLiveThumbOnce(ctx, job)
}
}
}()
}
// Fallback: Preview für fertige Dateien nur anhand des Dateistamms (id)
func servePreviewForFinishedFile(w http.ResponseWriter, r *http.Request, id string) {
var err error
id, err = sanitizeID(id)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
outPath, err := findFinishedFileByID(id)
if err != nil {
http.Error(w, "preview nicht verfügbar", http.StatusNotFound)
return
}
if err := ensureGeneratedDirs(); err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Assets immer auf "basename ohne HOT" ablegen
assetID := stripHotPrefix(id)
if assetID == "" {
assetID = id
}
assetDir, err := ensureGeneratedDir(assetID)
if err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Frame-Caching für t=... (optional)
if tStr := strings.TrimSpace(r.URL.Query().Get("t")); tStr != "" {
if sec, err := strconv.ParseFloat(tStr, 64); err == nil && sec >= 0 {
secI := int64(sec + 0.5)
if secI < 0 {
secI = 0
}
framePath := filepath.Join(assetDir, fmt.Sprintf("t_%d.jpg", secI))
if fi, err := os.Stat(framePath); err == nil && !fi.IsDir() && fi.Size() > 0 {
servePreviewJPEGFile(w, r, framePath)
return
}
img, err := extractFrameAtTimeJPEG(outPath, float64(secI))
if err == nil && len(img) > 0 {
_ = atomicWriteFile(framePath, img)
servePreviewJPEGBytes(w, img)
return
}
}
}
thumbPath := filepath.Join(assetDir, "thumbs.jpg")
// 1) Cache hit (neu)
if fi, err := os.Stat(thumbPath); err == nil && !fi.IsDir() && fi.Size() > 0 {
servePreviewJPEGFile(w, r, thumbPath)
return
}
// 2) Legacy-Migration (best effort)
if thumbsLegacy, _ := generatedThumbsRoot(); strings.TrimSpace(thumbsLegacy) != "" {
candidates := []string{
filepath.Join(thumbsLegacy, assetID, "preview.jpg"),
filepath.Join(thumbsLegacy, id, "preview.jpg"),
filepath.Join(thumbsLegacy, assetID+".jpg"),
filepath.Join(thumbsLegacy, id+".jpg"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() && fi.Size() > 0 {
if b, rerr := os.ReadFile(c); rerr == nil && len(b) > 0 {
_ = atomicWriteFile(thumbPath, b)
servePreviewJPEGBytes(w, b)
return
}
}
}
}
// 3) Neu erzeugen
genCtx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
var t float64 = 0
if dur, derr := durationSecondsCached(genCtx, outPath); derr == nil && dur > 0 {
t = dur * 0.5
}
img, err := extractFrameAtTimeJPEG(outPath, t)
if err != nil || len(img) == 0 {
img, err = extractLastFrameJPEG(outPath)
if err != nil || len(img) == 0 {
img, err = extractFirstFrameJPEG(outPath)
if err != nil || len(img) == 0 {
http.Error(w, "konnte preview nicht erzeugen", http.StatusInternalServerError)
return
}
}
}
_ = atomicWriteFile(thumbPath, img)
servePreviewJPEGBytes(w, img)
}
func serveTeaserFile(w http.ResponseWriter, r *http.Request, path string) {
f, err := openForReadShareDelete(path)
if err != nil {
http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil || fi.IsDir() || fi.Size() == 0 {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("Content-Type", "video/mp4")
http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f)
}
// tolerante Input-Flags für kaputte/abgeschnittene H264/TS Streams
var ffmpegInputTol = []string{
"-fflags", "+discardcorrupt+genpts",
"-err_detect", "ignore_err",
"-max_error_rate", "1.0",
}
var coverModelStore *ModelStore
func setCoverModelStore(s *ModelStore) {
coverModelStore = s
// random seed (einmalig)
rand.Seed(time.Now().UnixNano())
}
func generateTeaserMP4(ctx context.Context, srcPath, outPath string, startSec, durSec float64) error {
if durSec <= 0 {
durSec = 8
}
if startSec < 0 {
startSec = 0
}
// temp schreiben -> rename
tmp := outPath + ".tmp.mp4"
args := []string{
"-y",
"-hide_banner",
"-loglevel", "error",
}
args = append(args, ffmpegInputTol...)
args = append(args,
"-ss", fmt.Sprintf("%.3f", startSec),
"-i", srcPath,
"-t", fmt.Sprintf("%.3f", durSec),
// Video
"-vf", "scale=720:-2",
"-map", "0:v:0",
// Audio (optional: falls kein Audio vorhanden ist, bricht ffmpeg NICHT ab)
"-map", "0:a:0",
"-c:a", "aac",
"-b:a", "128k",
"-ac", "2",
"-c:v", "libx264",
"-preset", "veryfast",
"-crf", "28",
"-pix_fmt", "yuv420p",
// Wenn Audio minimal kürzer/länger ist, sauber beenden
"-shortest",
"-movflags", "+faststart",
"-f", "mp4",
tmp,
)
cmd := exec.CommandContext(ctx, ffmpegPath, args...)
if out, err := cmd.CombinedOutput(); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("ffmpeg teaser failed: %v (%s)", err, strings.TrimSpace(string(out)))
}
_ = os.Remove(outPath)
return os.Rename(tmp, outPath)
}
func generatedTeaser(w http.ResponseWriter, r *http.Request) {
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
var err error
id, err = sanitizeID(id)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
outPath, err := findFinishedFileByID(id)
if err != nil {
http.Error(w, "preview nicht verfügbar", http.StatusNotFound)
return
}
if err := ensureGeneratedDirs(); err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
assetID := stripHotPrefix(id)
if assetID == "" {
assetID = id
}
assetDir, err := ensureGeneratedDir(assetID)
if err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
previewPath := filepath.Join(assetDir, "preview.mp4")
// ✅ NEU: noGenerate=1 -> niemals on-the-fly erzeugen, nur liefern wenn vorhanden
qNoGen := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("noGenerate")))
noGen := qNoGen == "1" || qNoGen == "true" || qNoGen == "yes"
// Cache hit (neu)
if fi, err := os.Stat(previewPath); err == nil && !fi.IsDir() && fi.Size() > 0 {
serveTeaserFile(w, r, previewPath)
return
}
// Legacy: generated/teaser/<id>_teaser.mp4 oder <id>.mp4
if teaserLegacy, _ := generatedTeaserRoot(); strings.TrimSpace(teaserLegacy) != "" {
cids := []string{assetID, id}
for _, cid := range cids {
candidates := []string{
filepath.Join(teaserLegacy, cid+"_teaser.mp4"),
filepath.Join(teaserLegacy, cid+".mp4"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() && fi.Size() > 0 {
if _, err2 := os.Stat(previewPath); os.IsNotExist(err2) {
_ = os.MkdirAll(filepath.Dir(previewPath), 0o755)
_ = os.Rename(c, previewPath)
}
if fi2, err2 := os.Stat(previewPath); err2 == nil && !fi2.IsDir() && fi2.Size() > 0 {
serveTeaserFile(w, r, previewPath)
return
}
serveTeaserFile(w, r, c)
return
}
}
}
}
// ✅ NEU: wenn noGenerate aktiv und bisher kein Teaser gefunden -> 404
if noGen {
http.Error(w, "preview nicht verfügbar", http.StatusNotFound)
return
}
// Neu erzeugen
if err := genSem.Acquire(r.Context()); err != nil {
http.Error(w, "abgebrochen: "+err.Error(), http.StatusRequestTimeout)
return
}
defer genSem.Release()
genCtx, cancel := context.WithTimeout(r.Context(), 3*time.Minute)
defer cancel()
if err := generateTeaserClipsMP4(genCtx, outPath, previewPath, 1.0, 18); err != nil {
// Fallback: einzelner kurzer Teaser ab Anfang (trifft seltener kaputte Stellen)
if err2 := generateTeaserMP4(genCtx, outPath, previewPath, 0, 8); err2 != nil {
http.Error(w, "konnte preview nicht erzeugen: "+err.Error()+" (fallback ebenfalls fehlgeschlagen: "+err2.Error()+")", http.StatusInternalServerError)
return
}
}
serveTeaserFile(w, r, previewPath)
}
// ---------------------------
// Tasks: Missing Assets erzeugen
// ---------------------------
type AssetsTaskState struct {
Running bool `json:"running"`
Total int `json:"total"`
Done int `json:"done"`
GeneratedThumbs int `json:"generatedThumbs"`
GeneratedPreviews int `json:"generatedPreviews"`
Skipped int `json:"skipped"`
StartedAt time.Time `json:"startedAt"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
Error string `json:"error,omitempty"`
}
var assetsTaskMu sync.Mutex
var assetsTaskState AssetsTaskState
var assetsTaskCancel context.CancelFunc
func tasksGenerateAssets(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
assetsTaskMu.Lock()
st := assetsTaskState
assetsTaskMu.Unlock()
writeJSON(w, http.StatusOK, st)
return
case http.MethodPost:
assetsTaskMu.Lock()
if assetsTaskState.Running {
st := assetsTaskState
assetsTaskMu.Unlock()
writeJSON(w, http.StatusOK, st)
return
}
// ✅ cancelbaren Context erzeugen
ctx, cancel := context.WithCancel(context.Background())
assetsTaskCancel = cancel
assetsTaskState = AssetsTaskState{
Running: true,
StartedAt: time.Now(),
}
st := assetsTaskState
assetsTaskMu.Unlock()
go runGenerateMissingAssets(ctx)
writeJSON(w, http.StatusOK, st)
return
case http.MethodDelete:
assetsTaskMu.Lock()
cancel := assetsTaskCancel
running := assetsTaskState.Running
assetsTaskMu.Unlock()
if !running || cancel == nil {
// nichts zu stoppen
w.WriteHeader(http.StatusNoContent)
return
}
cancel()
// optional: sofortiges Feedback in state.error
assetsTaskMu.Lock()
if assetsTaskState.Running {
assetsTaskState.Error = "abgebrochen"
}
st := assetsTaskState
assetsTaskMu.Unlock()
writeJSON(w, http.StatusOK, st)
return
default:
http.Error(w, "Nur GET/POST", http.StatusMethodNotAllowed)
return
}
}
func runGenerateMissingAssets(ctx context.Context) {
finishWithErr := func(err error) {
now := time.Now()
assetsTaskMu.Lock()
assetsTaskState.Running = false
assetsTaskState.FinishedAt = &now
if err != nil {
assetsTaskState.Error = err.Error()
}
assetsTaskMu.Unlock()
}
defer func() {
assetsTaskMu.Lock()
assetsTaskCancel = nil
assetsTaskMu.Unlock()
}()
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil || strings.TrimSpace(doneAbs) == "" {
finishWithErr(fmt.Errorf("doneDir auflösung fehlgeschlagen: %v", err))
return
}
type item struct {
name string
path string
}
// .trash niemals verarbeiten
isTrashPath := func(full string) bool {
p := strings.ToLower(strings.ReplaceAll(full, "\\", "/"))
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
seen := map[string]struct{}{}
items := make([]item, 0, 512)
addIfVideo := func(full string) {
if isTrashPath(full) {
return
}
name := filepath.Base(full)
low := strings.ToLower(name)
if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") {
return
}
ext := strings.ToLower(filepath.Ext(name))
if ext != ".mp4" && ext != ".ts" {
return
}
// Dedupe
if _, ok := seen[full]; ok {
return
}
seen[full] = struct{}{}
items = append(items, item{name: name, path: full})
}
scanOneLevel := func(dir string) {
ents, err := os.ReadDir(dir)
if err != nil {
return
}
for _, e := range ents {
// .trash-Ordner nie scannen
if e.IsDir() && strings.EqualFold(e.Name(), ".trash") {
continue
}
full := filepath.Join(dir, e.Name())
if e.IsDir() {
sub, err := os.ReadDir(full)
if err != nil {
continue
}
for _, se := range sub {
if se.IsDir() {
continue
}
addIfVideo(filepath.Join(full, se.Name()))
}
continue
}
addIfVideo(full)
}
}
// ✅ done + done/<model>/ + done/keep + done/keep/<model>/
scanOneLevel(doneAbs)
scanOneLevel(filepath.Join(doneAbs, "keep"))
assetsTaskMu.Lock()
assetsTaskState.Total = len(items)
assetsTaskState.Done = 0
assetsTaskState.GeneratedThumbs = 0
assetsTaskState.GeneratedPreviews = 0
assetsTaskState.Skipped = 0
assetsTaskState.Error = ""
assetsTaskMu.Unlock()
for i, it := range items {
if err := ctx.Err(); err != nil {
finishWithErr(err)
return
}
base := strings.TrimSuffix(it.name, filepath.Ext(it.name))
id := stripHotPrefix(base)
if strings.TrimSpace(id) == "" {
assetsTaskMu.Lock()
assetsTaskState.Done = i + 1
assetsTaskMu.Unlock()
continue
}
assetDir, derr := ensureGeneratedDir(id)
if derr != nil {
assetsTaskMu.Lock()
assetsTaskState.Error = "mindestens ein Eintrag konnte nicht verarbeitet werden (siehe Logs)"
assetsTaskState.Done = i + 1
assetsTaskMu.Unlock()
fmt.Println("⚠️ ensureGeneratedDir:", derr)
continue
}
thumbPath := filepath.Join(assetDir, "thumbs.jpg")
previewPath := filepath.Join(assetDir, "preview.mp4")
metaPath := filepath.Join(assetDir, "meta.json")
thumbOK := func() bool {
fi, err := os.Stat(thumbPath)
return err == nil && !fi.IsDir() && fi.Size() > 0
}()
previewOK := func() bool {
fi, err := os.Stat(previewPath)
return err == nil && !fi.IsDir() && fi.Size() > 0
}()
// Datei-Info (für Meta-Validierung)
vfi, verr := os.Stat(it.path)
if verr != nil || vfi.IsDir() || vfi.Size() <= 0 {
assetsTaskMu.Lock()
assetsTaskState.Done = i + 1
assetsTaskMu.Unlock()
continue
}
// ✅ SourceURL best-effort: aus bestehender meta.json, wenn vorhanden/valide
sourceURL := ""
if u, ok := readVideoMetaSourceURL(metaPath, vfi); ok {
sourceURL = u
}
// ✅ Dauer zuerst aus meta.json, sonst 1× ffprobe & meta.json schreiben
durSec := 0.0
metaOK := false
if d, ok := readVideoMetaDuration(metaPath, vfi); ok {
durSec = d
metaOK = true
// meta ist valide (Duration ok), aber falls wir (irgendwoher) eine SourceURL hätten
// und sie in meta noch fehlt -> meta anreichern ohne ffprobe.
if strings.TrimSpace(sourceURL) != "" {
if u, ok := readVideoMetaSourceURL(metaPath, vfi); !ok || strings.TrimSpace(u) == "" {
_ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL)
}
}
} else {
dctx, cancel := context.WithTimeout(ctx, 6*time.Second)
d, derr := durationSecondsCached(dctx, it.path)
cancel()
if derr == nil && d > 0 {
durSec = d
// ✅ HIER: nicht writeVideoMeta(metaPath, fi, dur, sourceURL) !!
// sondern Duration-only writer nutzen
_ = writeVideoMetaDuration(metaPath, vfi, durSec, sourceURL)
metaOK = true
}
}
if thumbOK && previewOK && metaOK {
assetsTaskMu.Lock()
assetsTaskState.Skipped++
assetsTaskState.Done = i + 1
assetsTaskMu.Unlock()
continue
}
// ----------------
// Thumbs
// ----------------
if !thumbOK {
genCtx, cancel := context.WithTimeout(ctx, 45*time.Second)
if err := thumbSem.Acquire(genCtx); err != nil {
cancel()
finishWithErr(err)
return
}
cancel() // Timeout-Context freigeben, Semaphore bleibt gehalten
defer thumbSem.Release()
t := 0.0
if durSec > 0 {
t = durSec * 0.5
}
img, e1 := extractFrameAtTimeJPEG(it.path, t)
if e1 != nil || len(img) == 0 {
img, e1 = extractLastFrameJPEG(it.path)
if e1 != nil || len(img) == 0 {
img, e1 = extractFirstFrameJPEG(it.path)
}
}
// Release wurde defert, aber wir wollen pro Iteration releasen:
thumbSem.Release()
if e1 == nil && len(img) > 0 {
if err := atomicWriteFile(thumbPath, img); err == nil {
assetsTaskMu.Lock()
assetsTaskState.GeneratedThumbs++
assetsTaskMu.Unlock()
} else {
fmt.Println("⚠️ thumb write:", err)
}
}
}
// ----------------
// Preview
// ----------------
if !previewOK {
genCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
if err := genSem.Acquire(genCtx); err != nil {
cancel()
finishWithErr(err)
return
}
err := generateTeaserClipsMP4(genCtx, it.path, previewPath, 1.0, 18)
genSem.Release()
cancel()
if err == nil {
assetsTaskMu.Lock()
assetsTaskState.GeneratedPreviews++
assetsTaskMu.Unlock()
} else {
fmt.Println("⚠️ preview clips:", err)
}
}
assetsTaskMu.Lock()
assetsTaskState.Done = i + 1
assetsTaskMu.Unlock()
}
finishWithErr(nil)
}
// --- Teaser Preview Options + Helpers ---
// Minimale Segmentdauer, damit ffmpeg nicht mit zu kurzen Schnipseln zickt.
const minSegmentDuration = 0.50 // Sekunden
type TeaserPreviewOptions struct {
Segments int
SegmentDuration float64
Width int
Preset string
CRF int
// wird von uns "hart" auf true gesetzt (Audio ist NICHT optional)
Audio bool
AudioBitrate string
UseVsync2 bool
}
// stepSizeAndOffset verteilt die Startpunkte über das Video.
// Rückgabe: stepSize, offset (beide in Sekunden).
func (o TeaserPreviewOptions) stepSizeAndOffset(dur float64) (float64, float64) {
if dur <= 0 {
return 0, 0
}
n := o.Segments
if n < 1 {
n = 1
}
segDur := o.SegmentDuration
if segDur <= 0 {
segDur = 1
}
if segDur < minSegmentDuration {
segDur = minSegmentDuration
}
// letzter sinnvoller Start (kleiner Sicherheitsabstand)
maxStart := dur - 0.05 - segDur
if maxStart < 0 {
maxStart = 0
}
// 1 Segment -> Mitte
if n == 1 {
return 0, maxStart * 0.5
}
// kleine Ränder, damit nicht immer ganz am Anfang/Ende
margin := 0.05 * maxStart
if margin < 0 {
margin = 0
}
span := maxStart - 2*margin
if span < 0 {
span = maxStart
margin = 0
}
step := 0.0
if n > 1 {
step = span / float64(n-1)
}
return step, margin
}
func generateTeaserClipsMP4(ctx context.Context, srcPath, outPath string, clipLenSec float64, maxClips int) error {
return generateTeaserClipsMP4WithProgress(ctx, srcPath, outPath, clipLenSec, maxClips, nil)
}
func generateTeaserClipsMP4WithProgress(
ctx context.Context,
srcPath, outPath string,
clipLenSec float64,
maxClips int,
onRatio func(r float64),
) error {
// kompatible Defaults aus deiner Signatur -> Options
opts := TeaserPreviewOptions{
Segments: maxClips,
SegmentDuration: clipLenSec,
// stash-like Defaults
Width: 640,
Preset: "veryfast",
CRF: 21,
Audio: true,
AudioBitrate: "128k",
UseVsync2: false,
}
return generateTeaserPreviewMP4WithProgress(ctx, srcPath, outPath, opts, onRatio)
}
func generateTeaserChunkMP4(ctx context.Context, src, out string, start, dur float64, opts TeaserPreviewOptions) error {
// ✅ Audio ist Pflicht (nicht optional)
opts.Audio = true
tmp := strings.TrimSuffix(out, ".mp4") + ".part.mp4"
segDur := dur
if segDur < minSegmentDuration {
segDur = minSegmentDuration
}
args := []string{
"-y", "-hide_banner", "-loglevel", "error",
}
args = append(args, ffmpegInputTol...)
args = append(args,
"-ss", fmt.Sprintf("%.3f", start),
"-t", fmt.Sprintf("%.3f", segDur),
"-i", src,
"-map", "0:v:0",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-profile:v", "high",
"-level", "4.2",
"-preset", opts.Preset,
"-crf", strconv.Itoa(opts.CRF),
"-threads", "4",
)
if opts.UseVsync2 {
args = append(args, "-vsync", "2")
}
if opts.Audio {
args = append(args,
"-map", "0:a:0", // Audio Pflicht
"-c:a", "aac",
"-b:a", opts.AudioBitrate,
"-ac", "2",
"-shortest",
)
} else {
args = append(args, "-an")
}
args = append(args, "-movflags", "+faststart", tmp)
cmd := exec.CommandContext(ctx, ffmpegPath, args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("ffmpeg teaser chunk failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
_ = os.Remove(out)
return os.Rename(tmp, out)
}
func generateTeaserPreviewMP4WithProgress(
ctx context.Context,
srcPath, outPath string,
opts TeaserPreviewOptions,
onRatio func(r float64),
) error {
// ✅ Audio ist Pflicht (nicht optional)
opts.Audio = true
// Defaults
if opts.SegmentDuration <= 0 {
opts.SegmentDuration = 1
}
if opts.Segments <= 0 {
opts.Segments = 18
}
if opts.Width <= 0 {
opts.Width = 640
}
if opts.Preset == "" {
opts.Preset = "veryfast"
}
if opts.CRF <= 0 {
opts.CRF = 21
}
if opts.AudioBitrate == "" {
opts.AudioBitrate = "128k"
}
segDur := opts.SegmentDuration
if segDur < minSegmentDuration {
segDur = minSegmentDuration
}
// Dauer holen (einmalig; wird gecached)
dur, _ := durationSecondsCached(ctx, srcPath)
// Kurzvideo-Fallback wie "die andere":
// Wenn Video kürzer als Segments*SegmentDuration -> Single Preview über komplette Dauer
if dur > 0 && dur < segDur*float64(opts.Segments) {
// als 1 Segment behandeln, Duration = dur
opts.Segments = 1
segDur = dur
}
// Wenn Dauer unbekannt/zu klein: ab 0 ein Stück
if !(dur > 0) {
if onRatio != nil {
onRatio(0)
}
// hier könntest du auch segDur verwenden; ich nehme min(8, segDur) ähnlich wie vorher
err := generateTeaserChunkMP4(ctx, srcPath, outPath, 0, math.Min(8, segDur), opts)
if onRatio != nil {
onRatio(1)
}
return err
}
// Startpunkte wie "die andere": offset + i*stepSize
stepSize, offset := opts.stepSizeAndOffset(dur)
starts := make([]float64, 0, opts.Segments)
for i := 0; i < opts.Segments; i++ {
t := offset + float64(i)*stepSize
// clamp: sicherstellen, dass wir nicht über Ende hinaus trimmen
maxStart := math.Max(0, dur-0.05-segDur)
if t < 0 {
t = 0
}
if t > maxStart {
t = maxStart
}
if t < 0.05 {
t = 0.05
}
starts = append(starts, t)
}
expectedOutSec := float64(len(starts)) * segDur
tmp := strings.TrimSuffix(outPath, ".mp4") + ".part.mp4"
args := []string{
"-y",
"-nostats",
"-progress", "pipe:1",
"-hide_banner",
"-loglevel", "error",
}
// Inputs: pro Segment eigener -ss/-t/-i (wie bei dir)
for _, t := range starts {
args = append(args, ffmpegInputTol...)
args = append(args,
"-ss", fmt.Sprintf("%.3f", t),
"-t", fmt.Sprintf("%.3f", segDur),
"-i", srcPath,
)
}
// filter_complex bauen
var fc strings.Builder
for i := range starts {
// stash-like: ScaleWidth(640), pix_fmt yuv420p, profile high/level 4.2 später in output args
fmt.Fprintf(&fc,
"[%d:v]scale=%d:-2,setsar=1,setpts=PTS-STARTPTS[v%d];",
i, opts.Width, i,
)
if opts.Audio {
// dein “concat-safe” Audio normalisieren (gute Idee)
fmt.Fprintf(&fc,
"[%d:a]aresample=48000,aformat=channel_layouts=stereo,asetpts=PTS-STARTPTS[a%d];",
i, i,
)
}
}
// interleaved concat inputs
for i := range starts {
if opts.Audio {
fmt.Fprintf(&fc, "[v%d][a%d]", i, i)
} else {
fmt.Fprintf(&fc, "[v%d]", i)
}
}
if opts.Audio {
fmt.Fprintf(&fc, "concat=n=%d:v=1:a=1[v][a]", len(starts))
} else {
fmt.Fprintf(&fc, "concat=n=%d:v=1:a=0[v]", len(starts))
}
args = append(args, "-filter_complex", fc.String())
// map outputs
args = append(args, "-map", "[v]")
if opts.Audio {
args = append(args, "-map", "[a]")
}
// Video encode (stash-like)
args = append(args,
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-profile:v", "high",
"-level", "4.2",
"-preset", opts.Preset,
"-crf", strconv.Itoa(opts.CRF),
"-threads", "4",
)
if opts.UseVsync2 {
args = append(args, "-vsync", "2")
}
// Audio encode optional (stash-like 128k), plus dein -ac 2
if opts.Audio {
args = append(args,
"-c:a", "aac",
"-b:a", opts.AudioBitrate,
"-ac", "2",
"-shortest",
)
}
args = append(args, "-movflags", "+faststart", tmp)
cmd := exec.CommandContext(ctx, ffmpegPath, args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return err
}
sc := bufio.NewScanner(stdout)
sc.Buffer(make([]byte, 0, 64*1024), 1024*1024)
var lastSent float64
var lastAt time.Time
send := func(outSec float64, force bool) {
if onRatio == nil {
return
}
if expectedOutSec > 0 && outSec > 0 {
r := outSec / expectedOutSec
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
if r-lastSent < 0.01 && !force {
return
}
if !lastAt.IsZero() && time.Since(lastAt) < 150*time.Millisecond && !force {
return
}
lastSent = r
lastAt = time.Now()
onRatio(r)
return
}
if force {
onRatio(1)
}
}
var outSec float64
for sc.Scan() {
line := strings.TrimSpace(sc.Text())
if line == "" {
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
continue
}
k, v := parts[0], parts[1]
switch k {
case "out_time_ms":
if n, perr := strconv.ParseInt(strings.TrimSpace(v), 10, 64); perr == nil && n > 0 {
outSec = float64(n) / 1_000_000.0
send(outSec, false)
}
case "out_time":
if s := parseFFmpegOutTime(v); s > 0 {
outSec = s
send(outSec, false)
}
case "progress":
if strings.TrimSpace(v) == "end" {
send(outSec, true)
}
}
}
if err := cmd.Wait(); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("ffmpeg teaser preview failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
_ = os.Remove(outPath)
return os.Rename(tmp, outPath)
}
func prunePreviewCacheDir(previewDir string, maxFrames int, maxAge time.Duration) {
entries, err := os.ReadDir(previewDir)
if err != nil {
return
}
type frame struct {
path string
mt time.Time
}
now := time.Now()
var frames []frame
for _, e := range entries {
name := e.Name()
path := filepath.Join(previewDir, name)
// .part Dateien immer weg
if strings.HasSuffix(name, ".part") {
_ = os.Remove(path)
continue
}
// optional: preview.jpg neu erzeugen lassen, wenn uralt
if name == "preview.jpg" {
if info, err := e.Info(); err == nil {
if maxAge > 0 && now.Sub(info.ModTime()) > maxAge {
_ = os.Remove(path)
}
}
continue
}
// Nur t_*.jpg verwalten
if strings.HasPrefix(name, "t_") && strings.HasSuffix(name, ".jpg") {
info, err := e.Info()
if err != nil {
continue
}
// alte Frames löschen
if maxAge > 0 && now.Sub(info.ModTime()) > maxAge {
_ = os.Remove(path)
continue
}
frames = append(frames, frame{path: path, mt: info.ModTime()})
}
}
// Anzahl begrenzen: älteste zuerst löschen
if maxFrames > 0 && len(frames) > maxFrames {
sort.Slice(frames, func(i, j int) bool { return frames[i].mt.Before(frames[j].mt) })
toDelete := len(frames) - maxFrames
for i := 0; i < toDelete; i++ {
_ = os.Remove(frames[i].path)
}
}
}
func servePreviewJPEGBytes(w http.ResponseWriter, img []byte) {
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(img)
}
func servePreviewJPEGBytesNoStore(w http.ResponseWriter, img []byte) {
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "no-store, max-age=0")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(img)
}
func serveLivePreviewJPEGBytes(w http.ResponseWriter, img []byte) {
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "no-store, max-age=0, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(img)
}
func servePreviewJPEGFile(w http.ResponseWriter, r *http.Request, path string) {
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("X-Content-Type-Options", "nosniff")
http.ServeFile(w, r, path)
}
func recordList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
// ✅ NEU: Hidden (und nil) nicht ausgeben -> UI sieht Probe-Jobs nicht
if j == nil || j.Hidden {
continue
}
list = append(list, j)
}
jobsMu.Unlock()
// optional: neueste zuerst
sort.Slice(list, func(i, j int) bool {
return list[i].StartedAt.After(list[j].StartedAt)
})
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(list)
}
var previewFileRe = regexp.MustCompile(`^(index(_hq)?\.m3u8|seg_(low|hq)_\d+\.ts|seg_\d+\.ts)$`)
func serveEmptyLiveM3U8(w http.ResponseWriter, r *http.Request) {
// Für Player: gültige Playlist statt 204 liefern
w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("X-Content-Type-Options", "nosniff")
// Optional: Player/Proxy darf schnell retryen
w.Header().Set("Retry-After", "1")
// Bei HEAD nur Header schicken
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
// Minimal gültige LIVE-Playlist (keine Segmente, kein ENDLIST)
// Viele Player bleiben damit im "loading", statt hart zu failen.
body := "#EXTM3U\n" +
"#EXT-X-VERSION:3\n" +
"#EXT-X-TARGETDURATION:2\n" +
"#EXT-X-MEDIA-SEQUENCE:0\n"
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(body))
}
func stopPreview(job *RecordJob) {
jobsMu.Lock()
cmd := job.previewCmd
cancel := job.previewCancel
job.previewCmd = nil
job.previewCancel = nil
job.LiveThumbStarted = false
job.PreviewDir = ""
jobsMu.Unlock()
if cancel != nil {
cancel()
}
if cmd != nil && cmd.Process != nil {
_ = cmd.Process.Kill()
}
}
func servePreviewHLSFile(w http.ResponseWriter, r *http.Request, id, file string) {
file = strings.TrimSpace(file)
if file == "" || filepath.Base(file) != file || !previewFileRe.MatchString(file) {
http.Error(w, "ungültige file", http.StatusBadRequest)
return
}
isIndex := file == "index.m3u8" || file == "index_hq.m3u8"
jobsMu.Lock()
job, ok := jobs[id]
state := ""
if ok && job != nil {
state = strings.TrimSpace(job.PreviewState)
}
jobsMu.Unlock()
// =========================
// ✅ HEAD = nur Existenzcheck (kein hover nötig, kein Preview-Start)
// =========================
if r.Method == http.MethodHead {
if !ok || job == nil {
w.WriteHeader(http.StatusNotFound)
return
}
if state == "private" {
w.WriteHeader(http.StatusForbidden)
return
}
if state == "offline" {
w.WriteHeader(http.StatusNotFound)
return
}
previewDir := strings.TrimSpace(job.PreviewDir)
if previewDir == "" {
w.WriteHeader(http.StatusNotFound)
return
}
p := filepath.Join(previewDir, file)
if st, err := os.Stat(p); err == nil && !st.IsDir() {
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusNotFound)
return
}
// =========================
// ✅ NEU: Player darf Preview auch ohne Hover starten
// - Frontend hängt &play=1 an (empfohlen)
// - Wir akzeptieren zusätzlich: play=1 => treat as active
// =========================
active := isHover(r) || strings.TrimSpace(r.URL.Query().Get("play")) == "1"
if !active {
// Kein Hover/Play => niemals Live-HLS abgreifen
if isIndex {
serveEmptyLiveM3U8(w, r)
return
}
http.Error(w, "preview not active", http.StatusNotFound)
return
}
// active => wenn Job unbekannt, sauber raus
if !ok || job == nil {
if isIndex {
serveEmptyLiveM3U8(w, r)
return
}
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
// active => Preview starten/keepalive
ensurePreviewStarted(r, job)
touchPreview(job)
// state ggf. nach Start nochmal lesen
jobsMu.Lock()
state = strings.TrimSpace(job.PreviewState)
jobsMu.Unlock()
if state == "private" {
http.Error(w, "model private", http.StatusForbidden)
return
}
if state == "offline" {
http.Error(w, "model offline", http.StatusNotFound)
return
}
if state == "error" {
http.Error(w, "preview error", http.StatusServiceUnavailable)
return
}
previewDir := strings.TrimSpace(job.PreviewDir)
if previewDir == "" {
if isIndex {
serveEmptyLiveM3U8(w, r)
return
}
http.Error(w, "preview nicht verfügbar", http.StatusNotFound)
return
}
p := filepath.Join(previewDir, file)
st, err := os.Stat(p)
if err != nil || st.IsDir() {
if isIndex {
serveEmptyLiveM3U8(w, r)
return
}
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
ext := strings.ToLower(filepath.Ext(p))
// ✅ common: always no-store
w.Header().Set("Cache-Control", "no-store")
// ✅ avoids some proxy buffering surprises (harmless if ignored)
w.Header().Set("X-Accel-Buffering", "no")
// =========================
// ✅ .m3u8: rewrite (klein, ReadFile ok)
// =========================
if ext == ".m3u8" {
raw, err := os.ReadFile(p)
if err != nil {
http.Error(w, "m3u8 read failed", http.StatusInternalServerError)
return
}
rewritten := rewriteM3U8(raw, id)
w.Header().Set("Content-Type", "application/vnd.apple.mpegurl; charset=utf-8")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(rewritten)
return
}
// =========================
// ✅ Segmente: robust streamen + Range-support
// =========================
switch ext {
case ".ts":
w.Header().Set("Content-Type", "video/mp2t")
case ".m4s":
w.Header().Set("Content-Type", "video/iso.segment")
default:
w.Header().Set("Content-Type", "application/octet-stream")
}
// ✅ Optional aber sehr hilfreich:
// liefere ein Segment erst aus, wenn es nicht mehr wächst (verhindert "hängende" große .ts)
if ext == ".ts" || ext == ".m4s" {
if !waitForStableFile(p, 2, 120*time.Millisecond) {
// Segment ist vermutlich noch im Schreiben -> lieber 404, Player retryt
http.Error(w, "segment not ready", http.StatusNotFound)
return
}
}
f, err := os.Open(p)
if err != nil {
http.Error(w, "open failed", http.StatusNotFound)
return
}
defer f.Close()
// ✅ ServeContent macht Range korrekt und streamt ohne ReadAll.
// name ist nur für logs/cache; modTime für If-Modified-Since etc.
http.ServeContent(w, r, file, st.ModTime(), f)
}
func waitForStableFile(path string, checks int, interval time.Duration) bool {
// returns true if size is stable across N checks
var last int64 = -1
for i := 0; i < checks; i++ {
st, err := os.Stat(path)
if err != nil || st.IsDir() {
return false
}
sz := st.Size()
if last >= 0 && sz == last {
return true
}
last = sz
time.Sleep(interval)
}
// if we never saw stability, assume not ready
return false
}
func rewriteM3U8(raw []byte, id string) []byte {
// Wir bauen alle URIs so um, dass sie wieder über /api/record/preview laufen.
// Wichtig: play=1 bleibt dran, damit Folge-Requests (segments, chunklists) auch ohne Hover gehen.
base := "/api/record/preview?id=" + url.QueryEscape(id) + "&file="
var out bytes.Buffer
sc := bufio.NewScanner(bytes.NewReader(raw))
// Scanner default token limit 64K m3u8 ist normalerweise klein, passt.
// Wenn du riesige Playlists hast, kannst du Buffer erhöhen.
for sc.Scan() {
line := sc.Text()
trim := strings.TrimSpace(line)
if trim == "" {
out.WriteByte('\n')
continue
}
// Kommentare/Tags: ggf. URI="..." in Tags rewriten
if strings.HasPrefix(trim, "#") {
// EXT-X-KEY:URI="..."
line = rewriteAttrURI(line, base)
out.WriteString(line)
out.WriteByte('\n')
continue
}
// Nicht-Tag => URI (segment oder child-playlist)
u := trim
// Absolut? dann lassen
if strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://") {
out.WriteString(line)
out.WriteByte('\n')
continue
}
// Wenn es schon unser API ist, lassen
if strings.Contains(u, "/api/record/preview") {
out.WriteString(line)
out.WriteByte('\n')
continue
}
// Nur basename nehmen (ffmpeg schreibt i.d.R. keine Subdirs)
name := path.Base(u)
// Hier play=1 mitschicken:
out.WriteString(base + url.QueryEscape(name) + "&play=1")
out.WriteByte('\n')
}
if err := sc.Err(); err != nil {
// Wenn Scanner aus irgendeinem Grund scheitert: lieber raw zurück (besser als kaputt)
return raw
}
return out.Bytes()
}
func rewriteAttrURI(line, base string) string {
// Rewritet URI="xyz" in EXT-X-KEY / EXT-X-MAP / EXT-X-MEDIA / EXT-X-I-FRAME-STREAM-INF etc.
// Nur relative URIs werden angefasst.
const key = `URI="`
i := strings.Index(line, key)
if i < 0 {
return line
}
j := strings.Index(line[i+len(key):], `"`)
if j < 0 {
return line
}
start := i + len(key)
end := start + j
val := line[start:end]
valTrim := strings.TrimSpace(val)
// absolut oder schon preview => nix tun
if strings.HasPrefix(valTrim, "http://") || strings.HasPrefix(valTrim, "https://") || strings.Contains(valTrim, "/api/record/preview") {
return line
}
name := path.Base(valTrim)
repl := base + url.QueryEscape(name) + "&play=1"
return line[:start] + repl + line[end:]
}
func rewriteQuotedURI(line, id string) string {
re := regexp.MustCompile(`URI="([^"]+)"`)
return re.ReplaceAllStringFunc(line, func(m string) string {
sub := re.FindStringSubmatch(m)
if len(sub) != 2 {
return m
}
u := sub[1]
uu := strings.TrimSpace(u)
if uu == "" || strings.HasPrefix(uu, "http://") || strings.HasPrefix(uu, "https://") || strings.HasPrefix(uu, "/") {
return m
}
repl := "/api/record/preview?id=" + url.QueryEscape(id) + "&file=" + url.QueryEscape(uu)
return `URI="` + repl + `"`
})
}
func rewriteM3U8ToPreviewEndpoint(m3u8 string, id string) string {
lines := strings.Split(m3u8, "\n")
escapedID := url.QueryEscape(id)
for i, line := range lines {
l := strings.TrimSpace(line)
if l == "" || strings.HasPrefix(l, "#") {
continue
}
// Segment/URI-Zeilen umschreiben
lines[i] = "/api/record/preview?id=" + escapedID + "&file=" + url.QueryEscape(l)
}
return strings.Join(lines, "\n")
}
func classifyPreviewFFmpegStderr(stderr string) (state string, httpStatus int) {
s := strings.ToLower(stderr)
// ffmpeg schreibt typischerweise:
// "HTTP error 403 Forbidden" oder "Server returned 403 Forbidden"
if strings.Contains(s, "403 forbidden") || strings.Contains(s, "http error 403") || strings.Contains(s, "server returned 403") {
return "private", http.StatusForbidden
}
// "HTTP error 404 Not Found" oder "Server returned 404 Not Found"
if strings.Contains(s, "404 not found") || strings.Contains(s, "http error 404") || strings.Contains(s, "server returned 404") {
return "offline", http.StatusNotFound
}
return "", 0
}
func servePreviewStatusSVG(w http.ResponseWriter, label string, status int) {
w.Header().Set("Content-Type", "image/svg+xml; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("X-Content-Type-Options", "nosniff")
if status <= 0 {
status = http.StatusOK
}
title := html.EscapeString(strings.TrimSpace(label))
if title == "" {
title = "Preview"
}
// 16:9 (passt zu deinen Cards)
svg := `<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 320 180" preserveAspectRatio="xMidYMid slice">
<defs>
<!-- Subtle gradient background -->
<linearGradient id="bg" x1="0" y1="0" x2="1" y2="1">
<stop offset="0" stop-color="rgba(99,102,241,0.10)"/>
<stop offset="1" stop-color="rgba(14,165,233,0.08)"/>
</linearGradient>
<!-- Soft vignette -->
<radialGradient id="vig" cx="50%" cy="45%" r="75%">
<stop offset="0" stop-color="rgba(0,0,0,0)"/>
<stop offset="1" stop-color="rgba(0,0,0,0.18)"/>
</radialGradient>
<!-- Card shadow -->
<filter id="shadow" x="-20%" y="-20%" width="140%" height="140%">
<feDropShadow dx="0" dy="6" stdDeviation="8" flood-color="rgba(0,0,0,0.18)"/>
</filter>
</defs>
<!-- base -->
<rect x="0" y="0" width="320" height="180" rx="18" fill="rgba(17,24,39,0.06)"/>
<rect x="0" y="0" width="320" height="180" rx="18" fill="url(#bg)"/>
<rect x="0" y="0" width="320" height="180" rx="18" fill="url(#vig)"/>
<!-- inner card -->
<g filter="url(#shadow)">
<rect x="18" y="18" width="284" height="144" rx="16"
fill="rgba(255,255,255,0.72)"
stroke="rgba(0,0,0,0.08)"/>
<rect x="18" y="18" width="284" height="144" rx="16"
fill="rgba(255,255,255,0)"
stroke="rgba(99,102,241,0.18)"
stroke-width="2"
stroke-dasharray="6 6"/>
</g>
<!-- icon -->
<g transform="translate(160 70)">
<circle r="20" fill="rgba(17,24,39,0.08)" stroke="rgba(0,0,0,0.08)"/>
<!-- simple "image-off" icon -->
<path d="M-10 6 L-4 0 L2 6 L10 -2" fill="none" stroke="rgba(17,24,39,0.55)" stroke-width="2.4" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M-10 -6 H10" fill="none" stroke="rgba(17,24,39,0.35)" stroke-width="2.4" stroke-linecap="round"/>
<path d="M-12 12 L12 -12" fill="none" stroke="rgba(239,68,68,0.55)" stroke-width="2.6" stroke-linecap="round"/>
</g>
<!-- text -->
<text x="160" y="118" text-anchor="middle"
font-family="ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto"
font-size="16" font-weight="750"
fill="rgba(17,24,39,0.88)">` + title + `</text>
<text x="160" y="140" text-anchor="middle"
font-family="ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto"
font-size="11.5" font-weight="650"
fill="rgba(75,85,99,0.82)">Preview nicht verfügbar</text>
</svg>
`
w.WriteHeader(status)
_, _ = w.Write([]byte(svg))
}
func startPreviewHLS(ctx context.Context, job *RecordJob, m3u8URL, previewDir, httpCookie, userAgent string) error {
if strings.TrimSpace(ffmpegPath) == "" {
return fmt.Errorf("kein ffmpeg gefunden setze FFMPEG_PATH oder lege ffmpeg(.exe) neben das Backend")
}
if err := os.MkdirAll(previewDir, 0755); err != nil {
return err
}
// ✅ PreviewState reset (neuer Start)
jobsMu.Lock()
job.PreviewState = ""
job.PreviewStateAt = ""
job.PreviewStateMsg = ""
jobsMu.Unlock()
notifyJobsChanged()
commonIn := []string{"-y"}
if strings.TrimSpace(userAgent) != "" {
commonIn = append(commonIn, "-user_agent", userAgent)
}
if strings.TrimSpace(httpCookie) != "" {
commonIn = append(commonIn, "-headers", fmt.Sprintf("Cookie: %s\r\n", httpCookie))
}
commonIn = append(commonIn, "-i", m3u8URL)
hqArgs := append(commonIn,
"-vf", "scale=480:-2",
"-c:v", "libx264", "-preset", "veryfast", "-tune", "zerolatency",
"-pix_fmt", "yuv420p",
"-profile:v", "main",
"-level", "3.1",
"-threads", "4",
// GOP ~ 2s (bei 24fps). Optional force_key_frames zusätzlich.
"-g", "48", "-keyint_min", "48", "-sc_threshold", "0",
// optional, wenn du noch große Segmente bekommst:
// "-force_key_frames", "expr:gte(t,n_forced*2)",
"-map", "0:v:0",
"-map", "0:a:0?",
"-c:a", "aac", "-b:a", "128k", "-ac", "2",
"-f", "hls",
"-hls_time", "2",
"-hls_list_size", "6",
"-hls_allow_cache", "0",
// ✅ wichtig: temp_file
"-hls_flags", "delete_segments+append_list+independent_segments+temp_file",
"-hls_segment_filename", filepath.Join(previewDir, "seg_hq_%05d.ts"),
// ✅ Empfehlung: weglassen (du rewritest ohnehin)
// "-hls_base_url", baseURL,
filepath.Join(previewDir, "index_hq.m3u8"),
)
cmd := exec.CommandContext(ctx, ffmpegPath, hqArgs...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
jobsMu.Lock()
job.previewCmd = cmd
jobsMu.Unlock()
go func() {
if err := previewSem.Acquire(ctx); err != nil {
jobsMu.Lock()
if job.previewCmd == cmd {
job.previewCmd = nil
}
jobsMu.Unlock()
return
}
defer previewSem.Release()
if err := cmd.Run(); err != nil && ctx.Err() == nil {
st := strings.TrimSpace(stderr.String())
// ✅ 403/404 erkennen -> Private/Offline setzen
state, code := classifyPreviewFFmpegStderr(st)
jobsMu.Lock()
if state != "" {
job.PreviewState = state
job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano)
job.PreviewStateMsg = fmt.Sprintf("ffmpeg input returned HTTP %d", code)
} else {
job.PreviewState = "error"
job.PreviewStateAt = time.Now().UTC().Format(time.RFC3339Nano)
if len(st) > 280 {
job.PreviewStateMsg = st[:280] + "…"
} else {
job.PreviewStateMsg = st
}
}
jobsMu.Unlock()
notifyJobsChanged()
fmt.Printf("⚠️ preview hq ffmpeg failed: %v (%s)\n", err, st)
}
jobsMu.Lock()
if job.previewCmd == cmd {
job.previewCmd = nil
}
jobsMu.Unlock()
}()
// ✅ Live thumb writer starten (schreibt generated/<jobId>/thumbs.jpg regelmäßig neu)
startLiveThumbLoop(ctx, job)
return nil
}
func extractFirstFrameJPEG(path string) ([]byte, error) {
cmd := exec.Command(
ffmpegPath,
"-hide_banner",
"-loglevel", "error",
"-i", path,
"-frames:v", "1",
"-vf", "scale=720:-2",
"-q:v", "10",
"-f", "image2pipe",
"-vcodec", "mjpeg",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg first-frame: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
return out.Bytes(), nil
}
func resolvePathRelativeToApp(p string) (string, error) {
p = strings.TrimSpace(p)
if p == "" {
return "", nil
}
p = filepath.Clean(filepath.FromSlash(p))
if filepath.IsAbs(p) {
return p, nil
}
exe, err := os.Executable()
if err == nil {
exeDir := filepath.Dir(exe)
low := strings.ToLower(exeDir)
// Heuristik: go run / tests -> exe liegt in Temp/go-build
isTemp := strings.Contains(low, `\appdata\local\temp`) ||
strings.Contains(low, `\temp\`) ||
strings.Contains(low, `\tmp\`) ||
strings.Contains(low, `\go-build`) ||
strings.Contains(low, `/tmp/`) ||
strings.Contains(low, `/go-build`)
if !isTemp {
return filepath.Join(exeDir, p), nil
}
}
// Fallback: Working Directory (Dev)
wd, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(wd, p), nil
}
// Frontend (Vite build) als SPA ausliefern: Dateien aus dist, sonst index.html
func registerFrontend(mux *http.ServeMux) {
// Kandidaten: zuerst ENV, dann typische Ordner
candidates := []string{
strings.TrimSpace(os.Getenv("FRONTEND_DIST")),
"web/dist",
"dist",
}
var distAbs string
for _, c := range candidates {
if c == "" {
continue
}
abs, err := resolvePathRelativeToApp(c)
if err != nil {
continue
}
if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() {
distAbs = abs
break
}
}
if distAbs == "" {
fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, frontend/dist, dist) API läuft trotzdem.")
return
}
fmt.Println("🖼️ Frontend dist:", distAbs)
fileServer := http.FileServer(http.Dir(distAbs))
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// /api bleibt bei deinen API-Routen (längeres Pattern gewinnt),
// aber falls mal was durchrutscht:
if strings.HasPrefix(r.URL.Path, "/api/") {
http.NotFound(w, r)
return
}
// 1) Wenn echte Datei existiert -> ausliefern
reqPath := r.URL.Path
if reqPath == "" || reqPath == "/" {
// index.html
w.Header().Set("Cache-Control", "no-store")
http.ServeFile(w, r, filepath.Join(distAbs, "index.html"))
return
}
// URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal)
clean := path.Clean("/" + reqPath) // path.Clean (für URL-Slashes)
rel := strings.TrimPrefix(clean, "/")
onDisk := filepath.Join(distAbs, filepath.FromSlash(rel))
if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() {
// Statische Assets ruhig cachen (Vite hashed assets)
ext := strings.ToLower(filepath.Ext(onDisk))
if ext != "" && ext != ".html" {
w.Header().Set("Cache-Control", "public, max-age=31536000, immutable")
} else {
w.Header().Set("Cache-Control", "no-store")
}
fileServer.ServeHTTP(w, r)
return
}
// 2) SPA-Fallback: alle "Routen" ohne Datei -> index.html
w.Header().Set("Cache-Control", "no-store")
http.ServeFile(w, r, filepath.Join(distAbs, "index.html"))
})
}
func makeFrontendHandler() (http.Handler, bool) {
// Kandidaten: zuerst ENV, dann typische Ordner
candidates := []string{
strings.TrimSpace(os.Getenv("FRONTEND_DIST")),
"web/dist",
"dist",
}
var distAbs string
for _, c := range candidates {
if c == "" {
continue
}
abs, err := resolvePathRelativeToApp(c)
if err != nil {
continue
}
if fi, err := os.Stat(filepath.Join(abs, "index.html")); err == nil && !fi.IsDir() {
distAbs = abs
break
}
}
if distAbs == "" {
fmt.Println("⚠️ Frontend dist nicht gefunden (tried: FRONTEND_DIST, web/dist, dist) API läuft trotzdem.")
return nil, false
}
fmt.Println("🖼️ Frontend dist:", distAbs)
fileServer := http.FileServer(http.Dir(distAbs))
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// /api bleibt API
if strings.HasPrefix(r.URL.Path, "/api/") {
http.NotFound(w, r)
return
}
reqPath := r.URL.Path
if reqPath == "" || reqPath == "/" {
w.Header().Set("Cache-Control", "no-store")
http.ServeFile(w, r, filepath.Join(distAbs, "index.html"))
return
}
// URL-Pfad in Dateisystem-Pfad umwandeln (ohne Traversal)
clean := path.Clean("/" + reqPath)
rel := strings.TrimPrefix(clean, "/")
onDisk := filepath.Join(distAbs, filepath.FromSlash(rel))
if fi, err := os.Stat(onDisk); err == nil && !fi.IsDir() {
ext := strings.ToLower(filepath.Ext(onDisk))
if ext != "" && ext != ".html" {
w.Header().Set("Cache-Control", "public, max-age=31536000, immutable")
} else {
w.Header().Set("Cache-Control", "no-store")
}
fileServer.ServeHTTP(w, r)
return
}
// SPA-Fallback
w.Header().Set("Cache-Control", "no-store")
http.ServeFile(w, r, filepath.Join(distAbs, "index.html"))
})
return h, true
}
// routes.go (package main)
func registerRoutes(mux *http.ServeMux, auth *AuthManager) *ModelStore {
// --------------------------
// 1) Public Auth Endpoints
// --------------------------
mux.HandleFunc("/api/auth/login", authLoginHandler(auth))
mux.HandleFunc("/api/auth/logout", authLogoutHandler(auth))
mux.HandleFunc("/api/auth/me", authMeHandler(auth))
// 2FA (Authenticator/TOTP)
mux.HandleFunc("/api/auth/2fa/setup", auth2FASetupHandler(auth))
mux.HandleFunc("/api/auth/2fa/enable", auth2FAEnableHandler(auth))
// mux.HandleFunc("/api/auth/2fa/disable", auth2FADisableHandler(auth))
// --------------------------
// 2) Protected API Mux
// --------------------------
api := http.NewServeMux()
api.HandleFunc("/api/cookies", cookiesHandler)
api.HandleFunc("/api/record/done/stream", handleDoneStream)
api.HandleFunc("/api/perf/stream", perfStreamHandler)
api.HandleFunc("/api/status/disk", diskStatusHandler)
api.HandleFunc("/api/autostart/state", autostartStateHandler)
api.HandleFunc("/api/autostart/state/stream", autostartStateStreamHandler)
api.HandleFunc("/api/autostart/pause", autostartPauseQuickHandler)
api.HandleFunc("/api/autostart/resume", autostartResumeHandler)
api.HandleFunc("/api/settings", recordSettingsHandler)
api.HandleFunc("/api/settings/browse", settingsBrowse)
api.HandleFunc("/api/settings/cleanup", settingsCleanupHandler)
api.HandleFunc("/api/record", startRecordingFromRequest)
api.HandleFunc("/api/record/status", recordStatus)
api.HandleFunc("/api/record/stop", recordStop)
api.HandleFunc("/api/record/preview", recordPreview)
api.HandleFunc("/api/record/list", recordList)
api.HandleFunc("/api/record/stream", recordStream)
api.HandleFunc("/api/record/video", recordVideo)
api.HandleFunc("/api/record/done", recordDoneList)
api.HandleFunc("/api/record/delete", recordDeleteVideo)
api.HandleFunc("/api/record/toggle-hot", recordToggleHot)
api.HandleFunc("/api/record/keep", recordKeepVideo)
api.HandleFunc("/api/record/unkeep", recordUnkeepVideo)
api.HandleFunc("/api/record/restore", recordRestoreVideo)
api.HandleFunc("/api/chaturbate/online", chaturbateOnlineHandler)
api.HandleFunc("/api/chaturbate/biocontext", chaturbateBioContextHandler)
api.HandleFunc("/api/generated/teaser", generatedTeaser)
api.HandleFunc("/api/generated/cover", generatedCover)
api.HandleFunc("/api/generated/coverinfo/list", generatedCoverInfoList)
// Tasks
api.HandleFunc("/api/tasks/generate-assets", tasksGenerateAssets)
// --------------------------
// 3) ModelStore
// --------------------------
modelsPath, _ := resolvePathRelativeToApp("data/models_store.db")
fmt.Println("📦 Models DB:", modelsPath)
store := NewModelStore(modelsPath)
if err := store.Load(); err != nil {
fmt.Println("⚠️ models load:", err)
}
setCoverModelStore(store)
RegisterModelAPI(api, store)
setChaturbateOnlineModelStore(store)
// --------------------------
// 4) Mount Protected API
// --------------------------
// /api/auth/* ist schon public am root mux und gewinnt als längeres Pattern.
mux.Handle("/api/", requireAuth(auth, api, false))
// --------------------------
// 5) Mount Protected SPA (/)
// --------------------------
frontend, ok := makeFrontendHandler()
if ok && frontend != nil {
// allowPaths: login + assets müssen öffentlich sein, sonst Redirect-Loop
mux.Handle("/", requireAuth(auth, frontend, true,
"/login",
"/assets/",
"/favicon.ico",
"/manifest.webmanifest",
"/robots.txt",
"/service-worker.js",
))
}
return store
}
func withCORS(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origin := strings.TrimSpace(r.Header.Get("Origin"))
// Dev-Origins erlauben
if origin == "http://localhost:5173" || origin == "http://127.0.0.1:5173" {
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Vary", "Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET,POST,DELETE,HEAD,OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Range, Last-Event-ID")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges")
// Nur wenn du wirklich Cookies/Authorization cross-origin brauchst:
// w.Header().Set("Access-Control-Allow-Credentials", "true")
}
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
next.ServeHTTP(w, r)
})
}
// --- main ---
func main() {
loadSettings()
fixKeepRootFilesIntoModelSubdirs()
postWorkQ.StartWorkers(1)
startPostWorkStatusRefresher()
go startGeneratedGarbageCollector()
mux := http.NewServeMux()
// ✅ AuthManager erstellen (Beispiel)
// Du brauchst hier typischerweise:
// - ein Secret/Key (Cookie signen / Sessions)
// - Username+Pass Hash oder config
// - optional 2FA store
auth, err := NewAuthManager()
if err != nil {
fmt.Println("❌ auth init:", err)
os.Exit(1)
}
if err != nil {
fmt.Println("❌ auth init:", err)
os.Exit(1)
}
store := registerRoutes(mux, auth)
go startChaturbateOnlinePoller(store)
go startChaturbateAutoStartWorker(store)
go startMyFreeCamsAutoStartWorker(store)
go startDiskSpaceGuard()
if _, err := ensureCoversDir(); err != nil {
fmt.Println("⚠️ covers dir:", err)
}
fmt.Println("🌐 HTTP-API aktiv: http://localhost:9999")
handler := withCORS(mux)
if err := http.ListenAndServe(":9999", handler); err != nil {
fmt.Println("❌ HTTP-Server Fehler:", err)
os.Exit(1)
}
}
type RecordRequest struct {
URL string `json:"url"`
Cookie string `json:"cookie,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Hidden bool `json:"hidden,omitempty"`
}
func getRecordingsDir() string {
s := getSettings()
abs, err := resolvePathRelativeToApp(s.RecordDir)
if err == nil && strings.TrimSpace(abs) != "" {
return abs
}
// Fallback (falls resolve fehlschlägt)
return strings.TrimSpace(s.RecordDir)
}
func getKeepDir() string {
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil || strings.TrimSpace(doneAbs) == "" {
doneAbs = strings.TrimSpace(s.DoneDir)
}
if strings.TrimSpace(doneAbs) == "" {
return ""
}
return filepath.Join(doneAbs, "keep")
}
func getDoneDir() string {
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err == nil && strings.TrimSpace(doneAbs) != "" {
return doneAbs
}
return strings.TrimSpace(s.DoneDir)
}
func findVideoPath(file string) (string, error) {
base := filepath.Base(file) // verhindert path traversal
// TODO: passe diese Root-Dirs an deine echten Pfade an:
roots := []string{
getRecordingsDir(), // z.B. downloads/output root
getDoneDir(), // ✅ NEU: fertige Dateien liegen typischerweise hier
getKeepDir(), // keep root
}
// 1) direkt in den Roots
for _, root := range roots {
root = strings.TrimSpace(root)
if root == "" {
continue
}
p := filepath.Join(root, base)
if st, err := os.Stat(p); err == nil && !st.IsDir() {
return p, nil
}
}
// 2) 1 Ebene Unterordner: root/*/file
for _, root := range roots {
root = strings.TrimSpace(root)
if root == "" {
continue
}
matches, _ := filepath.Glob(filepath.Join(root, "*", base))
for _, p := range matches {
if st, err := os.Stat(p); err == nil && !st.IsDir() {
return p, nil
}
}
}
return "", os.ErrNotExist
}
// main.go (oder wo deine Routes sind)
func writeSSE(w http.ResponseWriter, data []byte) {
// SSE spec: jede Zeile mit "data:" prefixen
s := strings.ReplaceAll(string(data), "\r\n", "\n")
lines := strings.Split(s, "\n")
for _, line := range lines {
fmt.Fprintf(w, "data: %s\n", line)
}
fmt.Fprint(w, "\n")
}
func handleDoneStream(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
return
}
ch := make(chan []byte, 16)
doneHub.add(ch)
defer doneHub.remove(ch)
// optional: initial ping/hello, damit Client sofort "lebt"
fmt.Fprintf(w, "event: doneChanged\ndata: {\"type\":\"doneChanged\",\"seq\":%d,\"ts\":%d}\n\n",
atomic.LoadUint64(&doneSeq), time.Now().UnixMilli())
flusher.Flush()
ctx := r.Context()
for {
select {
case <-ctx.Done():
return
case b := <-ch:
// wichtig: event-name setzen -> Client kann addEventListener("doneChanged", ...)
fmt.Fprintf(w, "event: doneChanged\ndata: %s\n\n", b)
flusher.Flush()
}
}
}
func handleRecordVideo(w http.ResponseWriter, r *http.Request) {
// Priorität: id -> (dein bestehendes Mapping), sonst file
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id != "" {
// ✅ wenn du schon eine bestehende Logik hast: Pfad aus JobStore holen und dann ServeContent nutzen
// path := lookupPathByJobID(id)
// ...
}
file := strings.TrimSpace(r.URL.Query().Get("file"))
if file == "" && id == "" {
http.Error(w, "missing id or file", http.StatusBadRequest)
return
}
var path string
var err error
if file != "" {
path, err = findVideoPath(file)
if err != nil {
http.NotFound(w, r)
return
}
} else {
// TODO: wenn id verwendet wurde, path hier setzen
http.NotFound(w, r)
return
}
f, err := openForReadShareDelete(path)
if err != nil {
http.Error(w, "open failed", http.StatusInternalServerError)
return
}
defer f.Close()
st, err := f.Stat()
if err != nil {
http.Error(w, "stat failed", http.StatusInternalServerError)
return
}
// ✅ wichtig für Browser/VideoJS
ext := strings.ToLower(filepath.Ext(path))
switch ext {
case ".ts":
w.Header().Set("Content-Type", "video/mp2t")
default:
w.Header().Set("Content-Type", "video/mp4")
}
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Cache-Control", "no-store")
// ✅ Range/206/Seeking korrekt
http.ServeContent(w, r, filepath.Base(path), st.ModTime(), f)
}
func durationFromMetaIfFresh(videoPath, assetDir string, fi os.FileInfo) (float64, bool) {
metaPath := filepath.Join(assetDir, "meta.json")
return readVideoMetaDuration(metaPath, fi)
}
// shared: wird vom HTTP-Handler UND vom Autostart-Worker genutzt
func startRecordingInternal(req RecordRequest) (*RecordJob, error) {
url := strings.TrimSpace(req.URL)
if url == "" {
return nil, errors.New("url fehlt")
}
// Duplicate-running guard (identische URL)
jobsMu.Lock()
for _, j := range jobs {
if j != nil && j.Status == JobRunning && strings.TrimSpace(j.SourceURL) == url {
// ✅ Wenn ein versteckter Auto-Check-Job läuft und der User manuell startet -> sofort sichtbar machen
if j.Hidden && !req.Hidden {
j.Hidden = false
jobsMu.Unlock()
notifyJobsChanged()
return j, nil
}
jobsMu.Unlock()
return j, nil
}
}
// ✅ Timestamp + Output schon hier setzen, damit UI sofort Model/Filename/Details hat
startedAt := time.Now()
provider := detectProvider(url)
// best-effort Username aus URL
username := ""
switch provider {
case "chaturbate":
username = extractUsername(url)
case "mfc":
username = extractMFCUsername(url)
}
if strings.TrimSpace(username) == "" {
username = "unknown"
}
// Dateiname (konsistent zu runJob: gleicher Timestamp)
filename := fmt.Sprintf("%s_%s.ts", username, startedAt.Format("01_02_2006__15-04-05"))
// best-effort: absoluter RecordDir (fallback auf Settings-Wert)
s := getSettings()
recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir)
recordDir := strings.TrimSpace(recordDirAbs)
if recordDir == "" {
recordDir = strings.TrimSpace(s.RecordDir)
}
outPath := filepath.Join(recordDir, filename)
jobID := uuid.NewString()
ctx, cancel := context.WithCancel(context.Background())
job := &RecordJob{
ID: jobID,
SourceURL: url,
Status: JobRunning,
StartedAt: startedAt,
Output: outPath, // ✅ sofort befüllt
Hidden: req.Hidden, // ✅ NEU
cancel: cancel,
}
jobs[jobID] = job
jobsMu.Unlock()
// ✅ NEU: Hidden-Jobs nicht sofort ins UI broadcasten
if !job.Hidden {
notifyJobsChanged()
}
go runJob(ctx, job, req)
return job, nil
}
func startRecordingFromRequest(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
var req RecordRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
job, err := startRecordingInternal(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
func parseCookieString(cookieStr string) map[string]string {
out := map[string]string{}
for _, pair := range strings.Split(cookieStr, ";") {
parts := strings.SplitN(strings.TrimSpace(pair), "=", 2)
if len(parts) != 2 {
continue
}
name := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
if name == "" {
continue
}
out[strings.ToLower(name)] = value
}
return out
}
func hasChaturbateCookies(cookieStr string) bool {
m := parseCookieString(cookieStr)
_, hasCF := m["cf_clearance"]
// akzeptiere session_id ODER sessionid ODER sessionid/sessionId Varianten (case-insensitive durch ToLower)
_, hasSessID := m["session_id"]
_, hasSessIdAlt := m["sessionid"] // falls es ohne underscore kommt
return hasCF && (hasSessID || hasSessIdAlt)
}
func runJob(ctx context.Context, job *RecordJob, req RecordRequest) {
hc := NewHTTPClient(req.UserAgent)
provider := detectProvider(req.URL)
var err error
// ✅ nutze den Timestamp vom Job (damit Start/Output konsistent sind)
now := job.StartedAt
if now.IsZero() {
now = time.Now()
}
// ---- Aufnahme starten (Output-Pfad sauber relativ zur EXE auflösen) ----
switch provider {
case "chaturbate":
if !hasChaturbateCookies(req.Cookie) {
err = errors.New("cf_clearance und session_id (oder sessionid) Cookies sind für Chaturbate erforderlich")
break
}
s := getSettings()
recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir)
if rerr != nil || strings.TrimSpace(recordDirAbs) == "" {
err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr)
break
}
_ = os.MkdirAll(recordDirAbs, 0o755)
username := extractUsername(req.URL)
filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05"))
// ✅ wenn Output schon beim Start gesetzt wurde, nutze ihn (falls absolut)
jobsMu.Lock()
existingOut := strings.TrimSpace(job.Output)
jobsMu.Unlock()
outPath := existingOut
if outPath == "" || !filepath.IsAbs(outPath) {
outPath = filepath.Join(recordDirAbs, filename)
}
// Output nur aktualisieren, wenn es sich ändert
if strings.TrimSpace(existingOut) != strings.TrimSpace(outPath) {
jobsMu.Lock()
job.Output = outPath
jobsMu.Unlock()
notifyJobsChanged()
}
err = RecordStream(ctx, hc, "https://chaturbate.com/", username, outPath, req.Cookie, job)
case "mfc":
s := getSettings()
recordDirAbs, rerr := resolvePathRelativeToApp(s.RecordDir)
if rerr != nil || strings.TrimSpace(recordDirAbs) == "" {
err = fmt.Errorf("recordDir auflösung fehlgeschlagen: %v", rerr)
break
}
_ = os.MkdirAll(recordDirAbs, 0o755)
username := extractMFCUsername(req.URL)
filename := fmt.Sprintf("%s_%s.ts", username, now.Format("01_02_2006__15-04-05"))
outPath := filepath.Join(recordDirAbs, filename)
jobsMu.Lock()
job.Output = outPath
jobsMu.Unlock()
notifyJobsChanged()
err = RecordStreamMFC(ctx, hc, username, outPath, job)
default:
err = errors.New("unsupported provider")
}
// ---- Recording fertig: EndedAt/Error setzen ----
end := time.Now()
// Zielstatus bestimmen (finaler Status wird erst NACH Postwork gesetzt!)
target := JobFinished
var errText string
if err != nil {
if errors.Is(err, context.Canceled) {
target = JobStopped
} else {
target = JobFailed
errText = err.Error()
}
}
// direkt nach provider record endet (egal ob err != nil oder nil)
stopPreview(job)
// EndedAt + Error speichern (kurz locken)
jobsMu.Lock()
job.EndedAt = &end
if errText != "" {
job.Error = errText
}
out := strings.TrimSpace(job.Output)
jobsMu.Unlock()
notifyJobsChanged()
// Falls Output fehlt (z.B. provider error), direkt final status setzen
if out == "" {
jobsMu.Lock()
job.Status = target
job.Phase = ""
job.Progress = 100
job.PostWorkKey = ""
job.PostWork = nil
jobsMu.Unlock()
notifyJobsChanged()
notifyDoneChanged()
return
}
// ✅ Postwork: remux/move/ffprobe/assets begrenzen -> in Queue
postOut := out
postTarget := target
postKey := "postwork:" + job.ID
// ✅ WICHTIG:
// - Status noch NICHT auf JobStopped/JobFinished setzen, sonst verschwindet er aus der Downloads-Tabelle.
// - Stattdessen Phase "postwork" + Progress hochsetzen (monoton).
// - Zusätzlich: PostWorkKey setzen + initialen Queue-Status ins Job-JSON hängen.
jobsMu.Lock()
job.Phase = "postwork"
if job.Progress < 70 {
job.Progress = 70
}
job.PostWorkKey = postKey
// initialer Status (meist "missing", bis Enqueue done ist wir updaten direkt danach nochmal)
{
s := postWorkQ.StatusForKey(postKey)
job.PostWork = &s
}
jobsMu.Unlock()
notifyJobsChanged()
okQueued := postWorkQ.Enqueue(PostWorkTask{
Key: postKey,
Added: time.Now(),
Run: func(ctx context.Context) error {
// beim Start: Queue-Status refresh (sollte jetzt "running" werden)
{
st := postWorkQ.StatusForKey(postKey)
jobsMu.Lock()
job.PostWork = &st
// optional: wenn du "queued" Progress optisch unterscheiden willst
if job.Phase == "postwork" && job.Progress < 71 {
job.Progress = 71
}
jobsMu.Unlock()
notifyJobsChanged()
}
out := strings.TrimSpace(postOut)
if out == "" {
jobsMu.Lock()
job.Phase = ""
job.Progress = 100
job.Status = postTarget
job.PostWorkKey = ""
job.PostWork = nil
jobsMu.Unlock()
notifyJobsChanged()
notifyDoneChanged()
return nil
}
// Helper: Progress nur nach oben (gegen "rückwärts")
setPhase := func(phase string, pct int) {
jobsMu.Lock()
if pct < job.Progress {
pct = job.Progress
}
job.Phase = phase
job.Progress = pct
// Queue-Status auch bei Phase-Wechsel aktuell halten (nice für UI)
st := postWorkQ.StatusForKey(postKey)
job.PostWork = &st
jobsMu.Unlock()
notifyJobsChanged()
}
// 1) Remux (nur wenn TS)
if strings.EqualFold(filepath.Ext(out), ".ts") {
setPhase("remuxing", 72)
if newOut, err2 := maybeRemuxTSForJob(job, out); err2 == nil && strings.TrimSpace(newOut) != "" {
out = strings.TrimSpace(newOut)
jobsMu.Lock()
job.Output = out
jobsMu.Unlock()
notifyJobsChanged()
}
}
// 2) Move to done (best-effort)
setPhase("moving", 78)
if moved, err2 := moveToDoneDir(out); err2 == nil && strings.TrimSpace(moved) != "" {
out = strings.TrimSpace(moved)
jobsMu.Lock()
job.Output = out
jobsMu.Unlock()
notifyJobsChanged()
// ✅ erst JETZT ist done wirklich betroffen
notifyDoneChanged()
}
// 3) Optional: kleine Downloads automatisch löschen
setPhase("postwork", 82)
if fi, serr := os.Stat(out); serr == nil && fi != nil && !fi.IsDir() {
jobsMu.Lock()
job.SizeBytes = fi.Size()
jobsMu.Unlock()
notifyJobsChanged()
s := getSettings()
minMB := s.AutoDeleteSmallDownloadsBelowMB
if s.AutoDeleteSmallDownloads && minMB > 0 {
threshold := int64(minMB) * 1024 * 1024
if fi.Size() > 0 && fi.Size() < threshold {
base := filepath.Base(out)
id := stripHotPrefix(strings.TrimSuffix(base, filepath.Ext(base)))
if derr := removeWithRetry(out); derr == nil || os.IsNotExist(derr) {
removeGeneratedForID(id)
if doneAbs, rerr := resolvePathRelativeToApp(getSettings().DoneDir); rerr == nil && strings.TrimSpace(doneAbs) != "" {
_ = os.RemoveAll(filepath.Join(doneAbs, "preview", id))
_ = os.RemoveAll(filepath.Join(doneAbs, "thumbs", id))
}
purgeDurationCacheForPath(out)
jobsMu.Lock()
delete(jobs, job.ID)
jobsMu.Unlock()
notifyJobsChanged()
notifyDoneChanged()
fmt.Println("🧹 auto-deleted:", base, "size:", formatBytesSI(fi.Size()))
return nil
} else {
fmt.Println("⚠️ auto-delete failed:", derr)
}
}
}
}
// 4) Dauer (ffprobe)
setPhase("ffprobe", 84)
{
dctx, cancel := context.WithTimeout(ctx, 6*time.Second)
if sec, derr := durationSecondsCached(dctx, out); derr == nil && sec > 0 {
jobsMu.Lock()
job.DurationSeconds = sec
jobsMu.Unlock()
notifyJobsChanged()
}
cancel()
}
// 5) Video-Props
setPhase("probe", 86)
{
pctx, cancel := context.WithTimeout(ctx, 6*time.Second)
w, h, fps, perr := probeVideoProps(pctx, out)
cancel()
if perr == nil {
jobsMu.Lock()
job.VideoWidth = w
job.VideoHeight = h
job.FPS = fps
jobsMu.Unlock()
notifyJobsChanged()
}
}
// 6) Assets (thumbs.jpg + preview.mp4)
const (
assetsStart = 86
assetsEnd = 99
)
setPhase("assets", assetsStart)
lastPct := -1
lastTick := time.Time{}
update := func(r float64) {
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
pct := assetsStart + int(math.Round(r*float64(assetsEnd-assetsStart)))
if pct < assetsStart {
pct = assetsStart
}
if pct > assetsEnd {
pct = assetsEnd
}
if pct == lastPct {
return
}
if !lastTick.IsZero() && time.Since(lastTick) < 150*time.Millisecond {
return
}
lastPct = pct
lastTick = time.Now()
setPhase("assets", pct)
}
if err := ensureAssetsForVideoWithProgress(out, job.SourceURL, update); err != nil {
fmt.Println("⚠️ ensureAssetsForVideo:", err)
}
setPhase("assets", assetsEnd)
// 7) Finalize: JETZT finalen Status setzen (damit er erst dann aus Downloads verschwindet)
jobsMu.Lock()
job.Status = postTarget
job.Phase = ""
job.Progress = 100
job.PostWorkKey = ""
job.PostWork = nil
jobsMu.Unlock()
notifyJobsChanged()
notifyDoneChanged()
return nil
},
})
if okQueued {
// ✅ direkt nach erfolgreichem Enqueue nochmal Status holen (nun "queued" + Position möglich)
st := postWorkQ.StatusForKey(postKey)
jobsMu.Lock()
job.PostWork = &st
jobsMu.Unlock()
notifyJobsChanged()
} else {
// Queue voll -> Fallback: finalisieren
jobsMu.Lock()
job.Status = postTarget
job.Phase = ""
job.Progress = 100
job.PostWorkKey = ""
job.PostWork = nil
jobsMu.Unlock()
notifyJobsChanged()
notifyDoneChanged()
}
return
}
func formatBytesSI(b int64) string {
if b < 0 {
b = 0
}
const unit = 1024
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
suffix := []string{"KB", "MB", "GB", "TB", "PB"}
v := float64(b) / float64(div)
// 1 Nachkommastelle, außer sehr große ganze Zahlen
if v >= 10 {
return fmt.Sprintf("%.0f %s", v, suffix[exp])
}
return fmt.Sprintf("%.1f %s", v, suffix[exp])
}
const maxInt64 = int64(^uint64(0) >> 1)
func u64ToI64(x uint64) int64 {
if x > uint64(maxInt64) {
return maxInt64
}
return int64(x)
}
func ensureAssetsForVideo(videoPath string) error {
// Default: keine SourceURL (für Covers egal)
return ensureAssetsForVideoWithProgress(videoPath, "", nil)
}
// Optional: für Stellen, wo du die URL hast (z.B. Postwork / Jobs)
func ensureAssetsForVideoWithSource(videoPath string, sourceURL string) error {
return ensureAssetsForVideoWithProgress(videoPath, sourceURL, nil)
}
// onRatio: 0..1 (Assets-Gesamtfortschritt)
func ensureAssetsForVideoWithProgress(videoPath string, sourceURL string, onRatio func(r float64)) error {
videoPath = strings.TrimSpace(videoPath)
if videoPath == "" {
return nil
}
fi, statErr := os.Stat(videoPath)
if statErr != nil || fi.IsDir() || fi.Size() <= 0 {
return nil
}
// ✅ ID = Dateiname ohne Endung (immer OHNE "HOT " Prefix)
base := filepath.Base(videoPath)
id := strings.TrimSuffix(base, filepath.Ext(base))
id = stripHotPrefix(id)
if strings.TrimSpace(id) == "" {
return nil
}
assetDir, gerr := ensureGeneratedDir(id)
if gerr != nil || strings.TrimSpace(assetDir) == "" {
return fmt.Errorf("generated dir: %v", gerr)
}
metaPath := filepath.Join(assetDir, "meta.json")
// ---- Meta / Duration ----
durSec := 0.0
if d, ok := readVideoMetaDuration(metaPath, fi); ok {
durSec = d
} else {
dctx, cancel := context.WithTimeout(context.Background(), 6*time.Second)
d, derr := durationSecondsCached(dctx, videoPath)
cancel()
if derr == nil && d > 0 {
durSec = d
// ✅ Duration-only meta schreiben (inkl. sourceURL)
_ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL)
}
}
// ✅ Wenn Duration aus Meta kam, aber SourceURL jetzt neu vorhanden ist,
// dann Meta "anreichern" (ohne ffprobe).
if durSec > 0 && strings.TrimSpace(sourceURL) != "" {
if u, ok := readVideoMetaSourceURL(metaPath, fi); !ok || strings.TrimSpace(u) == "" {
_ = writeVideoMetaDuration(metaPath, fi, durSec, sourceURL)
}
}
// Gewichte: thumbs klein, preview groß
const (
thumbsW = 0.25
previewW = 0.75
)
progress := func(r float64) {
if onRatio == nil {
return
}
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
onRatio(r)
}
progress(0)
// ----------------
// Thumbs
// ----------------
thumbPath := filepath.Join(assetDir, "thumbs.jpg")
if tfi, err := os.Stat(thumbPath); err == nil && !tfi.IsDir() && tfi.Size() > 0 {
progress(thumbsW)
} else {
progress(0.05)
genCtx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
if err := thumbSem.Acquire(genCtx); err != nil {
// best-effort
progress(thumbsW)
goto PREVIEW
}
defer thumbSem.Release()
progress(0.10)
t := 0.0
if durSec > 0 {
t = durSec * 0.5
}
progress(0.15)
img, e1 := extractFrameAtTimeJPEG(videoPath, t)
if e1 != nil || len(img) == 0 {
img, e1 = extractLastFrameJPEG(videoPath)
if e1 != nil || len(img) == 0 {
img, e1 = extractFirstFrameJPEG(videoPath)
}
}
progress(0.20)
if e1 == nil && len(img) > 0 {
if err := atomicWriteFile(thumbPath, img); err != nil {
fmt.Println("⚠️ thumb write:", err)
}
}
progress(thumbsW)
}
PREVIEW:
// ----------------
// Preview
// ----------------
previewPath := filepath.Join(assetDir, "preview.mp4")
if pfi, err := os.Stat(previewPath); err == nil && !pfi.IsDir() && pfi.Size() > 0 {
progress(1)
return nil
}
genCtx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
progress(thumbsW + 0.02)
if err := genSem.Acquire(genCtx); err != nil {
progress(1)
return nil
}
defer genSem.Release()
progress(thumbsW + 0.05)
if err := generateTeaserClipsMP4WithProgress(genCtx, videoPath, previewPath, 1.0, 18, func(r float64) {
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
progress(thumbsW + r*previewW)
}); err != nil {
fmt.Println("⚠️ preview clips:", err)
}
progress(1)
return nil
}
func recordVideo(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin != "" {
// ✅ dev origin erlauben (oder "*" wenns dir egal ist)
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Vary", "Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET,HEAD,OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Range")
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges")
}
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
// ✅ Wiedergabe über Dateiname (für doneDir / recordDir)
if raw := strings.TrimSpace(r.URL.Query().Get("file")); raw != "" {
// explizit decoden (zur Sicherheit)
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// kein Pfad, keine Backslashes, kein Traversal
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
recordAbs, err := resolvePathRelativeToApp(s.RecordDir)
if err != nil {
http.Error(w, "recordDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Kandidaten: erst done (inkl. 1 Level Subdir, aber ohne "keep"),
// dann keep (inkl. 1 Level Subdir), dann recordDir
names := []string{file}
// Falls UI noch ".ts" kennt, die Datei aber schon als ".mp4" existiert:
if ext == ".ts" {
mp4File := strings.TrimSuffix(file, ext) + ".mp4"
names = append(names, mp4File)
}
var outPath string
for _, name := range names {
// done root + done/<subdir>/ (skip "keep")
if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok {
outPath = p
break
}
// keep root + keep/<subdir>/
if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok {
outPath = p
break
}
// record root (+ optional 1 Level Subdir)
if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok {
outPath = p
break
}
}
if outPath == "" {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
// TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(newOut) == "" {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError)
return
}
outPath = newOut
// sicherstellen, dass wirklich eine MP4 existiert
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" {
http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError)
return
}
}
// ✅ Falls Datei ".mp4" heißt, aber eigentlich TS/HTML ist -> nicht als MP4 ausliefern
if strings.ToLower(filepath.Ext(outPath)) == ".mp4" {
kind, _ := sniffVideoKind(outPath)
switch kind {
case "ts":
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "Datei ist TS (nur .mp4 benannt); Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
outPath = newOut
case "html":
http.Error(w, "Server liefert HTML statt Video (Pfad/Lookup prüfen)", http.StatusInternalServerError)
return
}
}
w.Header().Set("Cache-Control", "no-store")
serveVideoFile(w, r, outPath)
return
}
// ✅ ALT: Wiedergabe über Job-ID (funktioniert nur solange Job im RAM existiert)
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
outPath := filepath.Clean(strings.TrimSpace(job.Output))
if outPath == "" {
http.Error(w, "output fehlt", http.StatusNotFound)
return
}
if !filepath.IsAbs(outPath) {
abs, err := resolvePathRelativeToApp(outPath)
if err != nil {
http.Error(w, "pfad auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
outPath = abs
}
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
// TS kann der Browser nicht zuverlässig direkt -> on-demand remux nach MP4
if strings.ToLower(filepath.Ext(outPath)) == ".ts" {
newOut, err := maybeRemuxTS(outPath)
if err != nil {
http.Error(w, "TS Remux fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(newOut) == "" {
http.Error(w, "TS kann im Browser nicht abgespielt werden; Remux hat keine MP4 erzeugt", http.StatusInternalServerError)
return
}
outPath = newOut
fi, err := os.Stat(outPath)
if err != nil || fi.IsDir() || fi.Size() == 0 || strings.ToLower(filepath.Ext(outPath)) != ".mp4" {
http.Error(w, "Remux-Ergebnis ungültig", http.StatusInternalServerError)
return
}
}
serveVideoFile(w, r, outPath)
}
func setNoStoreHeaders(w http.ResponseWriter) {
// verhindert Browser/Proxy Caching (wichtig für Logs/Status)
w.Header().Set("Cache-Control", "no-store, max-age=0")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
}
func durationSecondsCacheOnly(path string, fi os.FileInfo) float64 {
durCache.mu.Lock()
e, ok := durCache.m[path]
durCache.mu.Unlock()
if ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 {
return e.sec
}
return 0
}
func findFileInDirOrOneLevelSubdirs(root string, file string, skipDirName string) (string, os.FileInfo, bool) {
// direct
p := filepath.Join(root, file)
if fi, err := os.Stat(p); err == nil && !fi.IsDir() && fi.Size() > 0 {
return p, fi, true
}
entries, err := os.ReadDir(root)
if err != nil {
return "", nil, false
}
for _, e := range entries {
if !e.IsDir() {
continue
}
if skipDirName != "" && e.Name() == skipDirName {
continue
}
pp := filepath.Join(root, e.Name(), file)
if fi, err := os.Stat(pp); err == nil && !fi.IsDir() && fi.Size() > 0 {
return pp, fi, true
}
}
return "", nil, false
}
func resolveDoneFileByName(doneAbs string, file string) (full string, from string, fi os.FileInfo, err error) {
// 1) done (root + /done/<subdir>/) — "keep" wird übersprungen
if p, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep"); ok {
return p, "done", fi, nil
}
// 2) keep (root + /done/keep/<subdir>/)
keepDir := filepath.Join(doneAbs, "keep")
if p, fi, ok := findFileInDirOrOneLevelSubdirs(keepDir, file, ""); ok {
return p, "keep", fi, nil
}
return "", "", nil, fmt.Errorf("not found")
}
type doneListResponse struct {
Items []*RecordJob `json:"items"`
TotalCount int `json:"totalCount"`
Page int `json:"page,omitempty"`
PageSize int `json:"pageSize,omitempty"`
}
func isTrashPath(p string) bool {
p = strings.ReplaceAll(p, "\\", "/")
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
func recordDoneList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
// ✅ optional: auch /done/keep/ einbeziehen (Standard: false)
qKeep := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("includeKeep")))
includeKeep := qKeep == "1" || qKeep == "true" || qKeep == "yes"
// ✅ NEU: optionaler Model-Filter (Pagination dann "pro Model" sinnvoll)
normalizeQueryModel := func(raw string) string {
s := strings.TrimSpace(raw)
if s == "" {
return ""
}
s = strings.TrimPrefix(s, "http://")
s = strings.TrimPrefix(s, "https://")
// letzter URL-Segment, falls jemand "…/modelname" übergibt
if strings.Contains(s, "/") {
parts := strings.Split(s, "/")
for i := len(parts) - 1; i >= 0; i-- {
p := strings.TrimSpace(parts[i])
if p != "" {
s = p
break
}
}
}
// falls "host:model" übergeben wird
if strings.Contains(s, ":") {
s = strings.TrimSpace(strings.Split(s, ":")[len(strings.Split(s, ":"))-1])
}
s = strings.TrimPrefix(s, "@")
return strings.ToLower(strings.TrimSpace(s))
}
qModel := normalizeQueryModel(r.URL.Query().Get("model"))
// optional: Pagination (1-based). Wenn page/pageSize fehlen -> wie vorher: komplette Liste
page := 0
pageSize := 0
if v := strings.TrimSpace(r.URL.Query().Get("page")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
page = n
}
}
if v := strings.TrimSpace(r.URL.Query().Get("pageSize")); v != "" {
if n, err := strconv.Atoi(v); err == nil && n > 0 {
pageSize = n
}
}
// optional: Sort
// supported: completed_(asc|desc), model_(asc|desc), file_(asc|desc), duration_(asc|desc), size_(asc|desc)
sortMode := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("sort")))
if sortMode == "" {
sortMode = "completed_desc"
}
// ⚠️ Backwards-Compat: alte model_* Sorts auf file_* mappen
if sortMode == "model_asc" {
sortMode = "file_asc"
}
if sortMode == "model_desc" {
sortMode = "file_desc"
}
// ✅ all=1 -> immer komplette Liste zurückgeben (Pagination deaktivieren)
qAll := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("all")))
fetchAll := qAll == "1" || qAll == "true" || qAll == "yes"
if fetchAll {
page = 0
pageSize = 0
}
// ✅ .trash niemals als "done item" zählen/listen
isTrashOutput := func(p string) bool {
pp := strings.ToLower(filepath.ToSlash(strings.TrimSpace(p)))
return strings.Contains(pp, "/.trash/") || strings.HasSuffix(pp, "/.trash")
}
// --- helpers (ModelKey aus Filename/Dir ableiten) ---
modelFromStem := func(stem string) string {
// stem: lower, ohne ext, ohne HOT
if stem == "" {
return ""
}
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
return strings.ToLower(strings.TrimSpace(m[1]))
}
// fallback: alles vor letztem "_" (oder kompletter stem)
if i := strings.LastIndex(stem, "_"); i > 0 {
return strings.ToLower(strings.TrimSpace(stem[:i]))
}
return strings.ToLower(strings.TrimSpace(stem))
}
modelFromFullPath := func(full string) string {
name := strings.ToLower(filepath.Base(full))
stem := strings.TrimSuffix(name, filepath.Ext(name))
stem = strings.TrimPrefix(stem, "hot ")
mk := modelFromStem(stem)
// fallback: wenn Dateiname nichts taugt, aus Ordner nehmen (/done/<model>/file)
if mk == "" {
parent := strings.ToLower(filepath.Base(filepath.Dir(full)))
parent = strings.TrimSpace(parent)
if parent != "" && parent != "keep" {
mk = parent
}
}
return mk
}
isTrashPath := func(full string) bool {
p := strings.ReplaceAll(full, "\\", "/")
// match: ".../.trash/file.ext" oder ".../.trash"
return strings.Contains(p, "/.trash/") || strings.HasSuffix(p, "/.trash")
}
// --- resolve done path ---
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Wenn kein DoneDir gesetzt ist → einfach leere Liste zurückgeben
if strings.TrimSpace(doneAbs) == "" {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: []*RecordJob{},
TotalCount: 0,
Page: page,
PageSize: pageSize,
})
return
}
type scanDir struct {
dir string
skipKeep bool // nur für doneAbs: "keep" nicht doppelt scannen
}
dirs := []scanDir{{dir: doneAbs, skipKeep: true}}
if includeKeep {
dirs = append(dirs, scanDir{dir: filepath.Join(doneAbs, "keep"), skipKeep: false})
}
list := make([]*RecordJob, 0, 256)
addFile := func(full string, fi os.FileInfo) {
// ✅ .trash niemals zählen / zurückgeben
if isTrashPath(full) {
return
}
name := filepath.Base(full)
ext := strings.ToLower(filepath.Ext(name))
if ext != ".mp4" && ext != ".ts" {
return
}
// ✅ .trash aus Done-Liste ausschließen (auch für totalCount/tab counter)
if isTrashOutput(full) {
return
}
// ✅ NEU: Model-Filter vor dem teureren Meta-Kram
if qModel != "" {
if mk := modelFromFullPath(full); mk != qModel {
return
}
}
base := strings.TrimSuffix(name, filepath.Ext(name))
t := fi.ModTime()
// StartedAt aus Dateiname (Fallback: ModTime)
start := t
stem := base
if strings.HasPrefix(stem, "HOT ") {
stem = strings.TrimPrefix(stem, "HOT ")
}
if m := startedAtFromFilenameRe.FindStringSubmatch(stem); m != nil {
mm, _ := strconv.Atoi(m[2])
dd, _ := strconv.Atoi(m[3])
yy, _ := strconv.Atoi(m[4])
hh, _ := strconv.Atoi(m[5])
mi, _ := strconv.Atoi(m[6])
ss, _ := strconv.Atoi(m[7])
start = time.Date(yy, time.Month(mm), dd, hh, mi, ss, 0, time.Local)
}
dur := 0.0
// 1) meta.json aus generated/<id>/meta.json lesen (schnell)
id := stripHotPrefix(strings.TrimSuffix(filepath.Base(full), filepath.Ext(full)))
srcURL := ""
if strings.TrimSpace(id) != "" {
if mp, err := generatedMetaFile(id); err == nil {
if d, ok := readVideoMetaDuration(mp, fi); ok {
dur = d
}
if u, ok := readVideoMetaSourceURL(mp, fi); ok {
srcURL = u
}
}
}
// 2) Fallback: RAM-Cache only (immer noch schnell, kein ffprobe)
if dur <= 0 {
dur = durationSecondsCacheOnly(full, fi)
}
// 3) KEIN ffprobe hier! (sonst wird die API wieder langsam)
list = append(list, &RecordJob{
ID: base,
Output: full,
SourceURL: srcURL,
Status: JobFinished,
StartedAt: start,
EndedAt: &t,
DurationSeconds: dur,
SizeBytes: fi.Size(),
})
}
for _, sd := range dirs {
entries, err := os.ReadDir(sd.dir)
if err != nil {
if os.IsNotExist(err) {
if sd.dir == doneAbs {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: []*RecordJob{},
TotalCount: 0,
Page: page,
PageSize: pageSize,
})
return
}
continue
}
if sd.dir == doneAbs {
http.Error(w, "doneDir lesen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
continue
}
for _, e := range entries {
// Subdir: 1 Level rein (z.B. /done/<model>/ oder /done/keep/<model>/)
if e.IsDir() {
// ✅ .trash Ordner niemals scannen
if e.Name() == ".trash" {
continue
}
if sd.skipKeep && e.Name() == "keep" {
continue
}
// ✅ .trash nie scannen
if strings.EqualFold(e.Name(), ".trash") {
continue
}
sub := filepath.Join(sd.dir, e.Name())
subEntries, err := os.ReadDir(sub)
if err != nil {
continue
}
for _, se := range subEntries {
if se.IsDir() {
continue
}
full := filepath.Join(sub, se.Name())
fi, err := os.Stat(full)
if err != nil || fi.IsDir() || fi.Size() == 0 {
continue
}
addFile(full, fi)
}
continue
}
full := filepath.Join(sd.dir, e.Name())
fi, err := os.Stat(full)
if err != nil || fi.IsDir() || fi.Size() == 0 {
continue
}
addFile(full, fi)
}
}
// helpers (Sort)
fileForSort := func(j *RecordJob) string {
f := strings.ToLower(filepath.Base(j.Output))
// HOT Prefix aus Sortierung rausnehmen
f = strings.TrimPrefix(f, "hot ")
return f
}
durationForSort := func(j *RecordJob) (sec float64, ok bool) {
if j.DurationSeconds > 0 {
return j.DurationSeconds, true
}
return 0, false
}
// Sortierung
sort.Slice(list, func(i, j int) bool {
a, b := list[i], list[j]
ta, tb := time.Time{}, time.Time{}
if a.EndedAt != nil {
ta = *a.EndedAt
}
if b.EndedAt != nil {
tb = *b.EndedAt
}
switch sortMode {
case "completed_asc":
if !ta.Equal(tb) {
return ta.Before(tb)
}
return fileForSort(a) < fileForSort(b)
case "completed_desc":
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "file_asc":
fa, fb := fileForSort(a), fileForSort(b)
if fa != fb {
return fa < fb
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "file_desc":
fa, fb := fileForSort(a), fileForSort(b)
if fa != fb {
return fa > fb
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "duration_asc":
da, okA := durationForSort(a)
db, okB := durationForSort(b)
if okA != okB {
return okA // unbekannt nach hinten
}
if okA && okB && da != db {
return da < db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "duration_desc":
da, okA := durationForSort(a)
db, okB := durationForSort(b)
if okA != okB {
return okA
}
if okA && okB && da != db {
return da > db
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "size_asc":
if a.SizeBytes != b.SizeBytes {
return a.SizeBytes < b.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
case "size_desc":
if a.SizeBytes != b.SizeBytes {
return a.SizeBytes > b.SizeBytes
}
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
default:
if !ta.Equal(tb) {
return ta.After(tb)
}
return fileForSort(a) < fileForSort(b)
}
})
// ✅ optional: count mitsenden
qWithCount := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("withCount")))
withCount := qWithCount == "1" || qWithCount == "true" || qWithCount == "yes"
// ✅ Gesamtanzahl IMMER vor Pagination merken
totalCount := len(list)
// ✅ Pagination nur auf "items" anwenden (list bleibt für totalCount intakt)
items := list
if pageSize > 0 && !fetchAll {
if page <= 0 {
page = 1
}
start := (page - 1) * pageSize
if start < 0 {
start = 0
}
if start >= totalCount {
items = []*RecordJob{}
} else {
end := start + pageSize
if end > totalCount {
end = totalCount
}
items = list[start:end]
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
// ✅ Wenn Frontend "withCount=1" nutzt: {count, items}
if withCount {
_ = json.NewEncoder(w).Encode(map[string]any{
"count": totalCount,
"items": items,
})
return
}
// ✅ Standard-Response: immer auch totalCount mitsenden
_ = json.NewEncoder(w).Encode(doneListResponse{
Items: items,
TotalCount: totalCount,
Page: page,
PageSize: pageSize,
})
return
}
type doneMetaResp struct {
Count int `json:"count"`
}
type durationReq struct {
Files []string `json:"files"`
}
type durationItem struct {
File string `json:"file"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
Error string `json:"error,omitempty"`
}
func removeJobsByOutputBasename(file string) {
file = strings.TrimSpace(file)
if file == "" {
return
}
removed := false
jobsMu.Lock()
for id, j := range jobs {
if j == nil {
continue
}
out := strings.TrimSpace(j.Output)
if out == "" {
continue
}
if filepath.Base(out) == file {
delete(jobs, id)
removed = true
}
}
jobsMu.Unlock()
if removed {
notifyJobsChanged()
}
}
func renameJobsOutputBasename(oldFile, newFile string) {
oldFile = strings.TrimSpace(oldFile)
newFile = strings.TrimSpace(newFile)
if oldFile == "" || newFile == "" {
return
}
changed := false
jobsMu.Lock()
for _, j := range jobs {
if j == nil {
continue
}
out := strings.TrimSpace(j.Output)
if out == "" {
continue
}
if filepath.Base(out) == oldFile {
j.Output = filepath.Join(filepath.Dir(out), newFile)
changed = true
}
}
jobsMu.Unlock()
if changed {
notifyJobsChanged()
}
}
// --- Undo Token (Trash Restore) ---
type undoDeleteToken struct {
Trash string `json:"trash"` // basename in .trash
RelDir string `json:"relDir"` // dir relativ zu doneAbs, z.B. ".", "keep/model", "model"
File string `json:"file"` // original basename, z.B. "HOT xyz.mp4"
}
func encodeUndoDeleteToken(t undoDeleteToken) (string, error) {
b, err := json.Marshal(t)
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(b), nil
}
func decodeUndoDeleteToken(raw string) (undoDeleteToken, error) {
var t undoDeleteToken
b, err := base64.RawURLEncoding.DecodeString(raw)
if err != nil {
return t, err
}
if err := json.Unmarshal(b, &t); err != nil {
return t, err
}
return t, nil
}
func isSafeRelDir(rel string) bool {
rel = strings.TrimSpace(rel)
if rel == "" {
return false
}
// normalize to slash for validation
rel = filepath.ToSlash(rel)
if strings.HasPrefix(rel, "/") {
return false
}
clean := path.Clean(rel) // path.Clean => forward slashes
if clean == "." {
return true
}
if strings.HasPrefix(clean, "../") || clean == ".." {
return false
}
// prevent weird traversal
if strings.Contains(clean, `\`) {
return false
}
return true
}
func isSafeBasename(name string) bool {
name = strings.TrimSpace(name)
if name == "" {
return false
}
if strings.Contains(name, "/") || strings.Contains(name, "\\") {
return false
}
return filepath.Base(name) == name
}
func recordDeleteVideo(w http.ResponseWriter, r *http.Request) {
// Frontend nutzt aktuell POST (siehe FinishedDownloads), daher erlauben wir POST + DELETE
if r.Method != http.MethodPost && r.Method != http.MethodDelete {
http.Error(w, "Nur POST oder DELETE erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
// sicher decoden
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben (keine Unterordner, kein Traversal)
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ done + done/<subdir> sowie keep + keep/<subdir>
target, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ Single-slot Trash: immer nur die *zuletzt* gelöschte Datei erlauben
trashDir := filepath.Join(doneAbs, ".trash")
// ✅ Wenn im Single-slot Trash schon was liegt: ID merken,
// aber generated erst löschen, NACHDEM .trash wirklich erfolgreich geleert wurde.
prevBase := ""
prevCanonical := ""
if b, err := os.ReadFile(filepath.Join(trashDir, "last.json")); err == nil && len(b) > 0 {
var prev struct {
File string `json:"file"`
}
if err := json.Unmarshal(b, &prev); err == nil {
prevFile := strings.TrimSpace(prev.File)
if prevFile != "" {
prevBase = strings.TrimSuffix(prevFile, filepath.Ext(prevFile))
prevCanonical = stripHotPrefix(prevBase)
}
}
}
// Trash komplett leeren => ältere Undos sind automatisch ungültig
// ⚠️ Fehler NICHT schlucken: wenn .trash nicht leerbar ist, darf der neue Delete nicht weiterlaufen.
if err := os.RemoveAll(trashDir); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "konnte .trash nicht leeren (Datei wird gerade verwendet). Bitte Player schließen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash leeren fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Jetzt ist das alte Trash-Video wirklich endgültig weg → generated/meta/<id>/ entfernen.
if prevCanonical != "" {
removeGeneratedForID(prevCanonical)
// Best-effort: falls irgendwo mal Assets mit HOT-ID entstanden sind
if prevBase != "" && prevBase != prevCanonical {
removeGeneratedForID(prevBase)
}
}
if err := os.MkdirAll(trashDir, 0o755); err != nil {
http.Error(w, "trash dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// Original-Dir relativ zu doneAbs merken (inkl. keep/<subdir> oder <subdir>)
origDir := filepath.Dir(target)
relDir, err := filepath.Rel(doneAbs, origDir)
if err != nil {
http.Error(w, "rel dir berechnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
relDir = filepath.ToSlash(relDir)
if strings.TrimSpace(relDir) == "" {
relDir = "."
}
// ✅ Undo-Token jetzt schon erzeugen, damit wir es als "Single-slot key" speichern können
tok, err := encodeUndoDeleteToken(undoDeleteToken{
Trash: "", // setzen wir gleich (trashName)
RelDir: relDir, // hast du oben schon berechnet
File: file,
})
if err != nil {
http.Error(w, "undo token encode fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
trashName := tok + "__" + file // eindeutig + Token sichtbar in filename
trashName = strings.ReplaceAll(trashName, string(os.PathSeparator), "_")
dst := filepath.Join(trashDir, trashName)
// ✅ Token muss auch wissen, wie der Trashname heißt
// (wir encoden den Token nicht neu — wir speichern Trashname separat in last.json)
// move mit retry (Windows file-lock robust)
if err := renameWithRetry(target, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "datei wird gerade verwendet (Player offen). Bitte kurz stoppen und erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "trash move fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ last.json schreiben: nur dieser Token ist gültig
type trashMeta struct {
Token string `json:"token"` // exakt der Query-Token (encoded)
TrashName string `json:"trashName"` // Dateiname in .trash
RelDir string `json:"relDir"` // ursprünglicher Ordner relativ zu doneAbs
File string `json:"file"` // originaler Name (basename)
DeletedAt int64 `json:"deletedAt"`
}
meta := trashMeta{
Token: tok,
TrashName: trashName,
RelDir: relDir,
File: file,
DeletedAt: time.Now().Unix(),
}
b, _ := json.Marshal(meta)
_ = os.WriteFile(filepath.Join(trashDir, "last.json"), b, 0o644)
// Cache/Jobs aufräumen (Assets NICHT hart löschen => Undo bleibt “schnell” möglich)
purgeDurationCacheForPath(target)
removeJobsByOutputBasename(file)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"from": from, // "done" | "keep"
"undoToken": tok, // ✅ für Undo
"trashed": true,
})
}
func recordRestoreVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("token"))
if raw == "" {
http.Error(w, "token fehlt", http.StatusBadRequest)
return
}
// ✅ doneDir auflösen
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Single-slot: last.json lesen und Token strikt validieren
trashDir := filepath.Join(doneAbs, ".trash")
metaPath := filepath.Join(trashDir, "last.json")
b, err := os.ReadFile(metaPath)
if err != nil {
http.Error(w, "nichts zum Wiederherstellen", http.StatusNotFound)
return
}
var meta struct {
Token string `json:"token"`
TrashName string `json:"trashName"`
RelDir string `json:"relDir"`
File string `json:"file"`
DeletedAt int64 `json:"deletedAt"`
}
if err := json.Unmarshal(b, &meta); err != nil {
http.Error(w, "trash meta ungültig", http.StatusInternalServerError)
return
}
if strings.TrimSpace(meta.Token) == "" || strings.TrimSpace(meta.TrashName) == "" || strings.TrimSpace(meta.File) == "" {
http.Error(w, "trash meta unvollständig", http.StatusInternalServerError)
return
}
// ✅ Nur der letzte Token ist gültig
if raw != meta.Token {
http.Error(w, "token ungültig (nicht der letzte)", http.StatusNotFound)
return
}
// ✅ Token zusätzlich decoden (Format/Signatur prüfen, aber Restore-Daten kommen aus last.json)
tok, err := decodeUndoDeleteToken(raw)
if err != nil {
http.Error(w, "token ungültig", http.StatusBadRequest)
return
}
// ✅ Safety: nur sichere Pfad-Bestandteile aus meta verwenden
if !isSafeBasename(meta.TrashName) || !isSafeBasename(meta.File) || !isSafeRelDir(meta.RelDir) {
http.Error(w, "token inhalt ungültig", http.StatusBadRequest)
return
}
// ✅ Extra Konsistenzchecks: token.File / token.RelDir müssen zu meta passen (optional aber sinnvoll)
if tok.File != meta.File || tok.RelDir != meta.RelDir {
http.Error(w, "token passt nicht zu letzter Löschung", http.StatusNotFound)
return
}
ext := strings.ToLower(filepath.Ext(meta.File))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
// Quelle: exakt die zuletzt gelöschte Datei
src := filepath.Join(trashDir, meta.TrashName)
// Zielordner rekonstruieren (relativ zu doneAbs)
rel := meta.RelDir
if rel == "." {
rel = ""
}
dstDir := filepath.Join(doneAbs, filepath.FromSlash(rel))
dstDirClean := filepath.Clean(dstDir)
doneClean := filepath.Clean(doneAbs)
// safety: dstDir muss innerhalb doneAbs liegen
if !strings.HasPrefix(strings.ToLower(dstDirClean)+string(os.PathSeparator), strings.ToLower(doneClean)+string(os.PathSeparator)) &&
!strings.EqualFold(dstDirClean, doneClean) {
http.Error(w, "zielpfad ungültig", http.StatusBadRequest)
return
}
if err := os.MkdirAll(dstDirClean, 0o755); err != nil {
http.Error(w, "zielordner erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDirClean, meta.File)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "restore fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "restore fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Optional: Trash leeren, damit Token danach definitiv tot ist
_ = os.RemoveAll(trashDir)
_ = os.MkdirAll(trashDir, 0o755)
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": meta.File,
"restoredFile": filepath.Base(dst), // kann __dup enthalten
})
}
func recordUnkeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
if !isSafeBasename(file) {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// Quelle muss in keep (root oder keep/<subdir>) liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if from != "keep" {
http.Error(w, "datei ist nicht in keep", http.StatusConflict)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// Ziel: zurück nach done/ (flach, ohne model-subdirs)
dstDir := doneAbs
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "done subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "unkeep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "unkeep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
notifyDoneChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": filepath.Base(dst),
})
}
func serveVideoFile(w http.ResponseWriter, r *http.Request, path string) {
f, err := openForReadShareDelete(path)
if err != nil {
http.Error(w, "datei öffnen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
defer f.Close()
fi, err := f.Stat()
if err != nil || fi.IsDir() || fi.Size() == 0 {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("X-Content-Type-Options", "nosniff")
ext := strings.ToLower(filepath.Ext(path))
switch ext {
case ".ts":
w.Header().Set("Content-Type", "video/mp2t")
default:
w.Header().Set("Content-Type", "video/mp4")
}
// ServeContent unterstützt Range Requests (wichtig für Video)
http.ServeContent(w, r, filepath.Base(path), fi.ModTime(), f)
}
func sniffVideoKind(path string) (string, error) {
f, err := openForReadShareDelete(path)
if err != nil {
return "", err
}
defer f.Close()
buf := make([]byte, 64)
n, _ := f.Read(buf)
buf = buf[:n]
// HTML?
trim := bytes.TrimSpace(buf)
if len(trim) >= 1 && trim[0] == '<' {
return "html", nil
}
// MPEG-TS: 0x47 sync byte
if len(buf) >= 1 && buf[0] == 0x47 {
return "ts", nil
}
// MP4: "ftyp" typischerweise bei Offset 4
if len(buf) >= 8 && string(buf[4:8]) == "ftyp" {
return "mp4", nil
}
return "unknown", nil
}
func recordKeepVideo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST erlaubt", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
keepRoot := filepath.Join(doneAbs, "keep")
if err := os.MkdirAll(keepRoot, 0o755); err != nil {
http.Error(w, "keep dir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ 0) Wenn schon irgendwo in keep (root oder keep/<subdir>) existiert:
// - wenn im keep-root: jetzt nach keep/<model>/ nachziehen
if p, _, ok := findFileInDirOrOneLevelSubdirs(keepRoot, file, ""); ok {
// p liegt entweder in keepRoot oder keepRoot/<subdir>
if strings.EqualFold(filepath.Clean(filepath.Dir(p)), filepath.Clean(keepRoot)) {
// im Root => versuchen einzusortieren
modelKey := modelKeyFromFilenameOrPath(file, p /* srcPath */, keepRoot /* doneAbs dummy, wird nicht genutzt */)
modelKey = sanitizeModelKey(modelKey)
// Optionaler Fallback: wenn wir aus dem keep-root Pfad nix ziehen können, nur aus Filename:
if modelKey == "" {
stem := strings.TrimSuffix(file, filepath.Ext(file))
modelKey = sanitizeModelKey(modelNameFromFilename(stem))
}
if modelKey != "" {
dstDir := filepath.Join(keepRoot, modelKey)
if err := os.MkdirAll(dstDir, 0o755); err == nil {
dst, derr := uniqueDestPath(dstDir, file)
if derr == nil {
// best-effort move
_ = renameWithRetry(p, dst)
}
}
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": true,
})
return
}
// ✅ 1) Quelle in done (root oder done/<subdir>), aber NICHT aus keep
src, fi, ok := findFileInDirOrOneLevelSubdirs(doneAbs, file, "keep")
if !ok {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi == nil || fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
// ✅ 2) Ziel: keep/<model>/file
modelKey := modelKeyFromFilenameOrPath(file, src, doneAbs)
dstDir := keepRoot
if modelKey != "" {
dstDir = filepath.Join(keepRoot, modelKey)
}
if err := os.MkdirAll(dstDir, 0o755); err != nil {
http.Error(w, "keep subdir erstellen fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
http.Error(w, "zielname nicht verfügbar: "+err.Error(), http.StatusConflict)
return
}
// rename mit retry (Windows file-lock)
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "keep fehlgeschlagen (Datei wird gerade verwendet).", http.StatusConflict)
return
}
http.Error(w, "keep fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
notifyDoneChanged()
// ... dein bestehender Cleanup-Block (generated Assets löschen, legacy cleanup, removeJobsByOutputBasename) bleibt unverändert ...
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"file": file,
"alreadyKept": false,
"newFile": filepath.Base(dst), // ✅ NEU
})
}
func recordToggleHot(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
raw := strings.TrimSpace(r.URL.Query().Get("file"))
if raw == "" {
http.Error(w, "file fehlt", http.StatusBadRequest)
return
}
file, err := url.QueryUnescape(raw)
if err != nil {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
file = strings.TrimSpace(file)
// ✅ nur Basename erlauben
if file == "" ||
strings.Contains(file, "/") ||
strings.Contains(file, "\\") ||
filepath.Base(file) != file {
http.Error(w, "ungültiger file", http.StatusBadRequest)
return
}
ext := strings.ToLower(filepath.Ext(file))
if ext != ".mp4" && ext != ".ts" {
http.Error(w, "nicht erlaubt", http.StatusForbidden)
return
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil {
http.Error(w, "doneDir auflösung fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if strings.TrimSpace(doneAbs) == "" {
http.Error(w, "doneDir ist leer", http.StatusBadRequest)
return
}
// ✅ Quelle kann in done/, done/<subdir>, keep/, keep/<subdir> liegen
src, from, fi, err := resolveDoneFileByName(doneAbs, file)
if err != nil {
http.Error(w, "datei nicht gefunden", http.StatusNotFound)
return
}
if fi != nil && fi.IsDir() {
http.Error(w, "ist ein verzeichnis", http.StatusBadRequest)
return
}
srcDir := filepath.Dir(src) // ✅ wichtig: toggeln im tatsächlichen Ordner
// toggle: HOT Prefix
newFile := file
if strings.HasPrefix(file, "HOT ") {
newFile = strings.TrimPrefix(file, "HOT ")
} else {
newFile = "HOT " + file
}
dst := filepath.Join(srcDir, newFile) // ✅ im selben Ordner toggeln (done oder keep)
if _, err := os.Stat(dst); err == nil {
http.Error(w, "ziel existiert bereits", http.StatusConflict)
return
} else if !os.IsNotExist(err) {
http.Error(w, "stat ziel fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
if err := renameWithRetry(src, dst); err != nil {
if runtime.GOOS == "windows" && isSharingViolation(err) {
http.Error(w, "rename fehlgeschlagen (Datei wird gerade abgespielt). Bitte erneut versuchen.", http.StatusConflict)
return
}
http.Error(w, "rename fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ KEIN generated-rename!
// Assets bleiben canonical (ohne HOT)
canonicalID := stripHotPrefix(strings.TrimSuffix(file, filepath.Ext(file)))
renameJobsOutputBasename(file, newFile)
notifyDoneChanged()
notifyJobsChanged()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(map[string]any{
"ok": true,
"oldFile": file,
"newFile": newFile,
"canonicalID": canonicalID,
"from": from, // "done" | "keep"
})
}
func maybeRemuxTS(path string) (string, error) {
path = strings.TrimSpace(path)
if path == "" {
return "", nil
}
if !strings.EqualFold(filepath.Ext(path), ".ts") {
return "", nil
}
mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4"
// remux (ohne neu encoden)
if err := remuxTSToMP4(path, mp4); err != nil {
return "", err
}
_ = os.Remove(path) // TS entfernen, wenn MP4 ok
return mp4, nil
}
func maybeRemuxTSForJob(job *RecordJob, path string) (string, error) {
path = strings.TrimSpace(path)
if path == "" {
return "", nil
}
if !strings.EqualFold(filepath.Ext(path), ".ts") {
return "", nil
}
mp4 := strings.TrimSuffix(path, filepath.Ext(path)) + ".mp4"
// input size für fallback
var inSize int64
if fi, err := os.Stat(path); err == nil && !fi.IsDir() {
inSize = fi.Size()
}
// duration (für sauberen progress)
var durSec float64
{
durCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
durSec, _ = durationSecondsCached(durCtx, path)
cancel()
}
const base = 10
const span = 60 // 10..69 (70 startet "moving")
lastProgress := base
lastTick := time.Now().Add(-time.Second)
onRatio := func(r float64) {
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
p := base + int(r*float64(span))
if p >= 70 {
p = 69
}
if p <= lastProgress {
return
}
// leicht throttlen
if time.Since(lastTick) < 150*time.Millisecond && p < 79 {
return
}
lastProgress = p
lastTick = time.Now()
setJobPhase(job, "remuxing", p)
}
remuxCtx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
defer cancel()
if err := remuxTSToMP4WithProgress(remuxCtx, path, mp4, durSec, inSize, onRatio); err != nil {
return "", err
}
_ = os.Remove(path) // TS entfernen, wenn MP4 ok
setJobPhase(job, "remuxing", 69) // ✅ Remux finished (nie rückwärts)
return mp4, nil
}
func moveFile(src, dst string) error {
// zuerst Rename (schnell)
if err := os.Rename(src, dst); err == nil {
return nil
} else {
// Fallback: Copy+Remove (z.B. bei EXDEV)
in, err2 := os.Open(src)
if err2 != nil {
return err
}
defer in.Close()
out, err2 := os.Create(dst)
if err2 != nil {
return err
}
if _, err2 := io.Copy(out, in); err2 != nil {
out.Close()
return err2
}
if err2 := out.Close(); err2 != nil {
return err2
}
return os.Remove(src)
}
}
const windowsSharingViolation syscall.Errno = 32 // ERROR_SHARING_VIOLATION
func isSharingViolation(err error) bool {
if runtime.GOOS != "windows" {
return false
}
// Windows: ERROR_SHARING_VIOLATION = 32, ERROR_LOCK_VIOLATION = 33
var pe *os.PathError
if errors.As(err, &pe) {
if errno, ok := pe.Err.(syscall.Errno); ok {
return errno == syscall.Errno(32) || errno == syscall.Errno(33)
}
}
// Fallback über Text
s := strings.ToLower(err.Error())
return strings.Contains(s, "sharing violation") ||
strings.Contains(s, "used by another process") ||
strings.Contains(s, "wird von einem anderen prozess verwendet")
}
func removeWithRetry(path string) error {
var err error
for i := 0; i < 40; i++ { // ~4s bei 100ms
err = os.Remove(path)
if err == nil {
return nil
}
if isSharingViolation(err) {
time.Sleep(100 * time.Millisecond)
continue
}
return err
}
return err
}
func renameWithRetry(oldPath, newPath string) error {
var err error
for i := 0; i < 40; i++ {
err = os.Rename(oldPath, newPath)
if err == nil {
return nil
}
if isSharingViolation(err) {
time.Sleep(100 * time.Millisecond)
continue
}
return err
}
return err
}
func moveToDoneDir(src string) (string, error) {
src = strings.TrimSpace(src)
if src == "" {
return "", fmt.Errorf("src empty")
}
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil || strings.TrimSpace(doneAbs) == "" {
// fallback
doneAbs = strings.TrimSpace(s.DoneDir)
}
if strings.TrimSpace(doneAbs) == "" {
return "", fmt.Errorf("doneDir empty")
}
// Quelle normalisieren/abs machen (best effort)
srcAbs := filepath.Clean(src)
if !filepath.IsAbs(srcAbs) {
if abs, rerr := resolvePathRelativeToApp(srcAbs); rerr == nil && strings.TrimSpace(abs) != "" {
srcAbs = abs
}
}
fi, err := os.Stat(srcAbs)
if err != nil || fi.IsDir() {
return "", fmt.Errorf("src not found: %v", err)
}
file := filepath.Base(srcAbs)
// Zielordner: immer done/ (keine model-subdirs)
dstDir := doneAbs
if err := os.MkdirAll(dstDir, 0o755); err != nil {
return "", err
}
// Bei Kollisionen eindeutigen Namen wählen
dst, err := uniqueDestPath(dstDir, file)
if err != nil {
return "", err
}
// Robust verschieben (Windows / Locks / Cross-device)
if err := renameWithRetry(srcAbs, dst); err != nil {
return "", err
}
// Duration-Cache invalidieren (du nutzt das ja)
purgeDurationCacheForPath(srcAbs)
return dst, nil
}
func recordStatus(w http.ResponseWriter, r *http.Request) {
id := r.URL.Query().Get("id")
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
json.NewEncoder(w).Encode(job)
}
func recordStop(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Nur POST", http.StatusMethodNotAllowed)
return
}
id := r.URL.Query().Get("id")
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if !ok {
http.Error(w, "job nicht gefunden", http.StatusNotFound)
return
}
stopJobsInternal([]*RecordJob{job})
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(job)
}
// --- DVR-ähnlicher Recorder-Ablauf ---
// Entspricht grob dem RecordStream aus dem Channel-Snippet:
func RecordStream(
ctx context.Context,
hc *HTTPClient,
domain string,
username string,
outputPath string,
httpCookie string,
job *RecordJob,
) error {
// 1) Seite laden
// Domain sauber zusammenbauen (mit/ohne Slash)
base := strings.TrimRight(domain, "/")
pageURL := base + "/" + username
body, err := hc.FetchPage(ctx, pageURL, httpCookie)
if err != nil {
return fmt.Errorf("seite laden: %w", err)
}
// 2) HLS-URL aus roomDossier extrahieren (wie DVR.ParseStream)
hlsURL, err := ParseStream(body)
if err != nil {
return fmt.Errorf("stream-parsing: %w", err)
}
// 3) Playlist holen (wie stream.GetPlaylist im DVR)
playlist, err := FetchPlaylist(ctx, hc, hlsURL, httpCookie)
if err != nil {
return fmt.Errorf("playlist abrufen: %w", err)
}
// ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar)
if job != nil {
_ = publishJob(job.ID)
}
if job != nil && strings.TrimSpace(job.PreviewDir) == "" {
assetID := assetIDForJob(job)
if strings.TrimSpace(assetID) == "" {
assetID = job.ID
}
previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID)
jobsMu.Lock()
job.PreviewDir = previewDir
jobsMu.Unlock()
if err := startPreviewHLS(ctx, job, playlist.PlaylistURL, previewDir, httpCookie, hc.userAgent); err != nil {
fmt.Println("⚠️ preview start fehlgeschlagen:", err)
}
}
// 4) Datei öffnen
file, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("datei erstellen: %w", err)
}
if job != nil {
_ = publishJob(job.ID)
}
defer func() {
_ = file.Close()
}()
// live size tracking (für UI)
var written int64
var lastPush time.Time
var lastBytes int64
// 5) Segmente „watchen“ analog zu WatchSegments + HandleSegment im DVR
err = playlist.WatchSegments(ctx, hc, httpCookie, func(b []byte, duration float64) error {
// Hier wäre im DVR ch.HandleSegment bei dir einfach in eine Datei schreiben
if _, err := file.Write(b); err != nil {
return fmt.Errorf("schreibe segment: %w", err)
}
// ✅ live size (UI) throttled
written += int64(len(b))
if job != nil {
now := time.Now()
if lastPush.IsZero() || now.Sub(lastPush) >= 750*time.Millisecond || (written-lastBytes) >= 2*1024*1024 {
jobsMu.Lock()
job.SizeBytes = written
jobsMu.Unlock()
notifyJobsChanged()
lastPush = now
lastBytes = written
}
}
// Könntest hier z.B. auch Dauer/Größe loggen, wenn du möchtest
_ = duration // aktuell unbenutzt
return nil
})
if err != nil {
return fmt.Errorf("watch segments: %w", err)
}
return nil
}
// RecordStreamMFC nimmt vorerst die URL 1:1 und ruft ffmpeg direkt darauf auf.
// In der Praxis musst du hier meist erst eine HLS-URL aus dem HTML extrahieren.
// RecordStreamMFC ist jetzt nur noch ein Wrapper um den bewährten MFC-Flow (runMFC).
func RecordStreamMFC(
ctx context.Context,
hc *HTTPClient,
username string,
outputPath string,
job *RecordJob,
) error {
mfc := NewMyFreeCams(username)
// ✅ Statt sofort zu failen: kurz auf PUBLIC warten
const waitPublicMax = 2 * time.Minute
deadline := time.Now().Add(waitPublicMax)
var lastSt *Status
for {
// Context cancel / stop
if err := ctx.Err(); err != nil {
return err
}
st, err := mfc.GetStatus()
if err == nil {
tmp := st
lastSt = &tmp
if st == StatusPublic {
break
}
}
if time.Now().After(deadline) {
if lastSt == nil {
return fmt.Errorf("mfc: stream wurde nicht public innerhalb %s", waitPublicMax)
}
return fmt.Errorf("mfc: stream ist nicht public nach %s (letzter Status: %s)", waitPublicMax, *lastSt)
}
time.Sleep(5 * time.Second)
}
// ✅ erst jetzt die Video URL holen (weil public)
m3u8URL, err := mfc.GetVideoURL(false)
if err != nil {
return fmt.Errorf("mfc get video url: %w", err)
}
if strings.TrimSpace(m3u8URL) == "" {
return fmt.Errorf("mfc: keine m3u8 URL gefunden")
}
// ✅ Job erst jetzt sichtbar machen (Stream wirklich verfügbar)
if job != nil {
_ = publishJob(job.ID)
}
// ✅ Preview starten
if job != nil && job.PreviewDir == "" {
assetID := assetIDForJob(job)
if strings.TrimSpace(assetID) == "" {
assetID = job.ID
}
previewDir := filepath.Join(os.TempDir(), "rec_preview", assetID)
job.PreviewDir = previewDir
if err := startPreviewHLS(ctx, job, m3u8URL, previewDir, "", hc.userAgent); err != nil {
fmt.Println("⚠️ preview start fehlgeschlagen:", err)
job.PreviewDir = "" // rollback
}
}
// Aufnahme starten
return handleM3U8Mode(ctx, m3u8URL, outputPath, job)
}
func detectProvider(raw string) string {
s := strings.ToLower(raw)
if strings.Contains(s, "chaturbate.com") {
return "chaturbate"
}
if strings.Contains(s, "myfreecams.com") {
return "mfc"
}
return "unknown"
}
// --- helper ---
func extractUsername(input string) string {
input = strings.TrimSpace(input)
input = strings.TrimPrefix(input, "https://")
input = strings.TrimPrefix(input, "http://")
input = strings.TrimPrefix(input, "www.")
if strings.HasPrefix(input, "chaturbate.com/") {
input = strings.TrimPrefix(input, "chaturbate.com/")
}
// alles nach dem ersten Slash abschneiden (Pfadteile, /, etc.)
if idx := strings.IndexAny(input, "/?#"); idx != -1 {
input = input[:idx]
}
// zur Sicherheit evtl. übrig gebliebene Slash/Backslash trimmen
return strings.Trim(input, "/\\")
}
// Cookie-Hilfsfunktion (wie ParseCookies + AddCookie im DVR)
func addCookiesFromString(req *http.Request, cookieStr string) {
if cookieStr == "" {
return
}
pairs := strings.Split(cookieStr, ";")
for _, pair := range pairs {
parts := strings.SplitN(strings.TrimSpace(pair), "=", 2)
if len(parts) != 2 {
continue
}
name := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
if name == "" {
continue
}
req.AddCookie(&http.Cookie{
Name: name,
Value: value,
})
}
}
// ParseStream entspricht der DVR-Variante (roomDossier → hls_source)
func ParseStream(html string) (string, error) {
matches := roomDossierRegexp.FindStringSubmatch(html)
if len(matches) == 0 {
return "", errors.New("room dossier nicht gefunden")
}
// DVR-Style Unicode-Decode
decoded, err := strconv.Unquote(
strings.Replace(strconv.Quote(matches[1]), `\\u`, `\u`, -1),
)
if err != nil {
return "", fmt.Errorf("Unicode-decode failed: %w", err)
}
var rd struct {
HLSSource string `json:"hls_source"`
}
if err := json.Unmarshal([]byte(decoded), &rd); err != nil {
return "", fmt.Errorf("JSON-parse failed: %w", err)
}
if rd.HLSSource == "" {
return "", errors.New("kein HLS-Quell-URL im JSON")
}
return rd.HLSSource, nil
}
// --- Playlist/WatchSegments wie gehabt ---
type Playlist struct {
PlaylistURL string
RootURL string
Resolution int
Framerate int
}
type Resolution struct {
Framerate map[int]string
Width int
}
// nimmt jetzt *HTTPClient entgegen
func FetchPlaylist(ctx context.Context, hc *HTTPClient, hlsSource, httpCookie string) (*Playlist, error) {
if hlsSource == "" {
return nil, errors.New("HLS-URL leer")
}
req, err := hc.NewRequest(ctx, http.MethodGet, hlsSource, httpCookie)
if err != nil {
return nil, fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err)
}
resp, err := hc.client.Do(req)
if err != nil {
return nil, fmt.Errorf("Fehler beim Laden der Playlist: %w", err)
}
defer resp.Body.Close()
playlist, listType, err := m3u8.DecodeFrom(resp.Body, true)
if err != nil || listType != m3u8.MASTER {
return nil, errors.New("keine gültige Master-Playlist")
}
master := playlist.(*m3u8.MasterPlaylist)
var bestURI string
var bestWidth int
var bestFramerate int
for _, variant := range master.Variants {
if variant == nil || variant.Resolution == "" {
continue
}
parts := strings.Split(variant.Resolution, "x")
if len(parts) != 2 {
continue
}
width, err := strconv.Atoi(parts[0])
if err != nil {
continue
}
fr := 30
if strings.Contains(variant.Name, "FPS:60.0") {
fr = 60
}
if width > bestWidth || (width == bestWidth && fr > bestFramerate) {
bestWidth = width
bestFramerate = fr
bestURI = variant.URI
}
}
if bestURI == "" {
return nil, errors.New("keine gültige Auflösung gefunden")
}
root := hlsSource[:strings.LastIndex(hlsSource, "/")+1]
return &Playlist{
PlaylistURL: root + bestURI,
RootURL: root,
Resolution: bestWidth,
Framerate: bestFramerate,
}, nil
}
// nutzt ebenfalls *HTTPClient
func (p *Playlist) WatchSegments(
ctx context.Context,
hc *HTTPClient,
httpCookie string,
handler func([]byte, float64) error,
) error {
var lastSeq int64 = -1
emptyRounds := 0
const maxEmptyRounds = 60 // statt 5
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Playlist holen
req, err := hc.NewRequest(ctx, http.MethodGet, p.PlaylistURL, httpCookie)
if err != nil {
return fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err)
}
resp, err := hc.client.Do(req)
if err != nil {
emptyRounds++
if emptyRounds >= maxEmptyRounds {
return errors.New("❌ Playlist nicht mehr erreichbar Stream vermutlich offline")
}
time.Sleep(2 * time.Second)
continue
}
playlist, listType, err := m3u8.DecodeFrom(resp.Body, true)
resp.Body.Close()
if err != nil || listType != m3u8.MEDIA {
emptyRounds++
if emptyRounds >= maxEmptyRounds {
return errors.New("❌ Fehlerhafte Playlist möglicherweise offline")
}
time.Sleep(2 * time.Second)
continue
}
media := playlist.(*m3u8.MediaPlaylist)
newSegment := false
for _, segment := range media.Segments {
if segment == nil {
continue
}
if int64(segment.SeqId) <= lastSeq {
continue
}
lastSeq = int64(segment.SeqId)
newSegment = true
segmentURL := p.RootURL + segment.URI
segReq, err := hc.NewRequest(ctx, http.MethodGet, segmentURL, httpCookie)
if err != nil {
continue
}
segResp, err := hc.client.Do(segReq)
if err != nil {
continue
}
data, err := io.ReadAll(segResp.Body)
segResp.Body.Close()
if err != nil || len(data) == 0 {
continue
}
if err := handler(data, segment.Duration); err != nil {
return err
}
}
if newSegment {
emptyRounds = 0
} else {
emptyRounds++
if emptyRounds >= maxEmptyRounds {
return errors.New("🛑 Keine neuen HLS-Segmente empfangen Stream vermutlich beendet oder offline.")
}
}
time.Sleep(1 * time.Second)
}
}
/* ───────────────────────────────
MyFreeCams (übernommener Flow)
─────────────────────────────── */
type MyFreeCams struct {
Username string
Attrs map[string]string
VideoURL string
}
func NewMyFreeCams(username string) *MyFreeCams {
return &MyFreeCams{
Username: username,
Attrs: map[string]string{},
}
}
func (m *MyFreeCams) GetWebsiteURL() string {
return "https://www.myfreecams.com/#" + m.Username
}
func (m *MyFreeCams) GetVideoURL(refresh bool) (string, error) {
if !refresh && m.VideoURL != "" {
return m.VideoURL, nil
}
// Prüfen, ob alle benötigten Attribute vorhanden sind
if _, ok := m.Attrs["data-cam-preview-model-id-value"]; !ok {
return "", nil
}
sid := m.Attrs["data-cam-preview-server-id-value"]
midBase := m.Attrs["data-cam-preview-model-id-value"]
isWzobs := strings.ToLower(m.Attrs["data-cam-preview-is-wzobs-value"]) == "true"
midInt, err := strconv.Atoi(midBase)
if err != nil {
return "", fmt.Errorf("model-id parse error: %w", err)
}
mid := 100000000 + midInt
a := ""
if isWzobs {
a = "a_"
}
playlistURL := fmt.Sprintf(
"https://previews.myfreecams.com/hls/NxServer/%s/ngrp:mfc_%s%d.f4v_mobile_mhp1080_previewurl/playlist.m3u8",
sid, a, mid,
)
// Validieren (HTTP 200) & ggf. auf gewünschte Auflösung verlinken
u, err := getWantedResolutionPlaylist(playlistURL)
if err != nil {
return "", err
}
m.VideoURL = u
return m.VideoURL, nil
}
func (m *MyFreeCams) GetStatus() (Status, error) {
// 1) share-Seite prüfen (existiert/nicht existiert)
shareURL := "https://share.myfreecams.com/" + m.Username
resp, err := http.Get(shareURL)
if err != nil {
return StatusUnknown, err
}
defer resp.Body.Close()
if resp.StatusCode == 404 {
return StatusNotExist, nil
}
if resp.StatusCode != 200 {
return StatusUnknown, fmt.Errorf("HTTP %d", resp.StatusCode)
}
// wir brauchen sowohl Bytes (für Suche) als auch Reader (für HTML)
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return StatusUnknown, err
}
// 2) „tracking.php?“ suchen und prüfen, ob model_id vorhanden ist
start := bytes.Index(bodyBytes, []byte("https://www.myfreecams.com/php/tracking.php?"))
if start == -1 {
// ohne tracking Parameter -> behandeln wie nicht existent
return StatusNotExist, nil
}
end := bytes.IndexByte(bodyBytes[start:], '"')
if end == -1 {
return StatusUnknown, errors.New("tracking url parse failed")
}
raw := string(bodyBytes[start : start+end])
u, err := url.Parse(raw)
if err != nil {
return StatusUnknown, fmt.Errorf("tracking url invalid: %w", err)
}
qs := u.Query()
if qs.Get("model_id") == "" {
return StatusNotExist, nil
}
// 3) HTML parsen und <div class="campreview" ...> Attribute auslesen
doc, err := goquery.NewDocumentFromReader(bytes.NewReader(bodyBytes))
if err != nil {
return StatusUnknown, err
}
params := doc.Find(".campreview").First()
if params.Length() == 0 {
// keine campreview -> offline
return StatusOffline, nil
}
attrs := map[string]string{}
params.Each(func(_ int, s *goquery.Selection) {
for _, a := range []string{
"data-cam-preview-server-id-value",
"data-cam-preview-model-id-value",
"data-cam-preview-is-wzobs-value",
} {
if v, ok := s.Attr(a); ok {
attrs[a] = v
}
}
})
m.Attrs = attrs
// 4) Versuchen, VideoURL (Preview-HLS) zu ermitteln
uStr, err := m.GetVideoURL(true)
if err != nil {
return StatusUnknown, err
}
if uStr != "" {
return StatusPublic, nil
}
// campreview vorhanden, aber keine playable url -> „PRIVATE“
return StatusPrivate, nil
}
func runMFC(ctx context.Context, username string, outArg string) error {
mfc := NewMyFreeCams(username)
st, err := mfc.GetStatus()
if err != nil {
return err
}
if st != StatusPublic {
return fmt.Errorf("Stream ist nicht öffentlich (Status: %s)", st)
}
m3u8URL, err := mfc.GetVideoURL(false)
if err != nil {
return err
}
if m3u8URL == "" {
return errors.New("keine m3u8 URL gefunden")
}
return handleM3U8Mode(ctx, m3u8URL, outArg, nil)
}
/* ───────────────────────────────
Gemeinsame HLS/M3U8-Helper (MFC)
─────────────────────────────── */
func getWantedResolutionPlaylist(playlistURL string) (string, error) {
// Holt eine URL; wenn MASTER, wähle beste Variante; wenn MEDIA, gib die URL zurück.
resp, err := http.Get(playlistURL)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode)
}
playlist, listType, err := m3u8.DecodeFrom(resp.Body, true)
if err != nil {
return "", fmt.Errorf("m3u8 parse: %w", err)
}
if listType == m3u8.MEDIA {
return playlistURL, nil
}
master := playlist.(*m3u8.MasterPlaylist)
var bestURI string
var bestWidth int
var bestFramerate float64
for _, v := range master.Variants {
if v == nil {
continue
}
// Resolution kommt als "WxH" wir nutzen die Höhe als Vergleichswert.
w := 0
if v.Resolution != "" {
parts := strings.Split(v.Resolution, "x")
if len(parts) == 2 {
if ww, err := strconv.Atoi(parts[1]); err == nil {
w = ww
}
}
}
fr := 30.0
if v.FrameRate > 0 {
fr = v.FrameRate
} else if strings.Contains(v.Name, "FPS:60") {
fr = 60
}
if w > bestWidth || (w == bestWidth && fr > bestFramerate) {
bestWidth = w
bestFramerate = fr
bestURI = v.URI
}
}
if bestURI == "" {
return "", errors.New("Master-Playlist ohne gültige Varianten")
}
// Absolutieren
root := playlistURL[:strings.LastIndex(playlistURL, "/")+1]
if strings.HasPrefix(bestURI, "http://") || strings.HasPrefix(bestURI, "https://") {
return bestURI, nil
}
return root + bestURI, nil
}
func handleM3U8Mode(ctx context.Context, m3u8URL, outFile string, job *RecordJob) error {
// Validierung
u, err := url.Parse(m3u8URL)
if err != nil || (u.Scheme != "http" && u.Scheme != "https") {
return fmt.Errorf("ungültige URL: %q", m3u8URL)
}
// HTTP-Check MIT Context
req, err := http.NewRequestWithContext(ctx, "GET", m3u8URL, nil)
if err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("HTTP %d beim Abruf der m3u8", resp.StatusCode)
}
if strings.TrimSpace(outFile) == "" {
return errors.New("output file path leer")
}
// ffmpeg mit Context (STOP FUNKTIONIERT HIER!)
cmd := exec.CommandContext(
ctx,
ffmpegPath,
"-y",
"-hide_banner",
"-nostats",
"-loglevel", "warning",
"-i", m3u8URL,
"-c", "copy",
outFile,
)
var stderr bytes.Buffer
cmd.Stdout = io.Discard
cmd.Stderr = &stderr
// ✅ live size polling während ffmpeg läuft
stopStat := make(chan struct{})
if job != nil {
go func() {
t := time.NewTicker(1 * time.Second)
defer t.Stop()
var last int64
for {
select {
case <-ctx.Done():
return
case <-stopStat:
return
case <-t.C:
fi, err := os.Stat(outFile)
if err != nil {
continue
}
sz := fi.Size()
if sz > 0 && sz != last {
jobsMu.Lock()
job.SizeBytes = sz
jobsMu.Unlock()
notifyJobsChanged()
last = sz
}
}
}
}()
}
// ✅ WICHTIG: ffmpeg wirklich laufen lassen
err = cmd.Run()
close(stopStat)
if err != nil {
msg := strings.TrimSpace(stderr.String())
if msg != "" {
return fmt.Errorf("ffmpeg m3u8 failed: %w: %s", err, msg)
}
return fmt.Errorf("ffmpeg m3u8 failed: %w", err)
}
return nil
}
/* ───────────────────────────────
Kleine Helper für MFC
─────────────────────────────── */
func extractMFCUsername(input string) string {
s := strings.TrimSpace(input)
if s == "" {
return ""
}
// 1) URL mit Fragment (#username)
if u, err := url.Parse(s); err == nil && u.Fragment != "" {
return strings.Trim(strings.TrimSpace(u.Fragment), "/")
}
// 2) URL Pfad: letztes Segment nehmen
if u, err := url.Parse(s); err == nil && u.Host != "" {
p := strings.Trim(u.Path, "/")
if p == "" {
return ""
}
parts := strings.Split(p, "/")
return strings.TrimSpace(parts[len(parts)-1])
}
// 3) Fallback: raw
return s
}
func readLine() string {
r := bufio.NewReader(os.Stdin)
s, _ := r.ReadString('\n')
return strings.TrimRight(s, "\r\n")
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}