nsfwapp/backend/main.go
2026-02-09 12:29:19 +01:00

4434 lines
104 KiB
Go
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// backend\main.go
package main
import (
"bufio"
"bytes"
"context"
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"image/png"
"io"
"log"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/grafov/m3u8"
gocpu "github.com/shirou/gopsutil/v3/cpu"
godisk "github.com/shirou/gopsutil/v3/disk"
"github.com/sqweek/dialog"
"golang.org/x/image/font"
"golang.org/x/image/font/basicfont"
"golang.org/x/image/math/fixed"
)
var roomDossierRegexp = regexp.MustCompile(`window\.initialRoomDossier = "(.*?)"`)
type JobStatus string
const (
JobRunning JobStatus = "running"
JobPostwork JobStatus = "postwork" // ✅ NEU: Aufnahme vorbei, Nacharbeiten laufen noch
JobFinished JobStatus = "finished"
JobStopped JobStatus = "stopped"
JobFailed JobStatus = "failed"
)
type RecordJob struct {
ID string `json:"id"`
SourceURL string `json:"sourceUrl"`
Output string `json:"output"`
Status JobStatus `json:"status"`
StartedAt time.Time `json:"startedAt"`
EndedAt *time.Time `json:"endedAt,omitempty"`
DurationSeconds float64 `json:"durationSeconds,omitempty"`
SizeBytes int64 `json:"sizeBytes,omitempty"`
VideoWidth int `json:"videoWidth,omitempty"`
VideoHeight int `json:"videoHeight,omitempty"`
FPS float64 `json:"fps,omitempty"`
Hidden bool `json:"-"`
Error string `json:"error,omitempty"`
PreviewDir string `json:"-"`
PreviewImage string `json:"-"`
previewCmd *exec.Cmd `json:"-"`
LiveThumbStarted bool `json:"-"`
// ✅ Preview-Status (z.B. private/offline anhand ffmpeg HTTP Fehler)
PreviewState string `json:"previewState,omitempty"` // "", "private", "offline", "error"
PreviewStateAt string `json:"previewStateAt,omitempty"` // RFC3339Nano
PreviewStateMsg string `json:"previewStateMsg,omitempty"` // kurze Info
// Thumbnail cache (verhindert, dass pro HTTP-Request ffmpeg läuft)
previewMu sync.Mutex `json:"-"`
previewJpeg []byte `json:"-"`
previewJpegAt time.Time `json:"-"`
previewGen bool `json:"-"`
PreviewM3U8 string `json:"-"` // HLS url, die ffmpeg inputt
PreviewCookie string `json:"-"` // Cookie header (falls nötig)
PreviewUA string `json:"-"` // user-agent
previewCancel context.CancelFunc `json:"-"`
previewLastHit time.Time `json:"-"`
previewStartMu sync.Mutex `json:"-"`
// ✅ Frontend Progress beim Stop/Finalize
Phase string `json:"phase,omitempty"` // stopping | remuxing | moving
Progress int `json:"progress,omitempty"` // 0..100
PostWorkKey string `json:"postWorkKey,omitempty"`
PostWork *PostWorkKeyStatus `json:"postWork,omitempty"`
cancel context.CancelFunc `json:"-"`
}
type dummyResponseWriter struct {
h http.Header
}
type ffprobeStreamInfo struct {
Width int `json:"width"`
Height int `json:"height"`
AvgFrameRate string `json:"avg_frame_rate"`
RFrameRate string `json:"r_frame_rate"`
}
type ffprobeInfo struct {
Streams []ffprobeStreamInfo `json:"streams"`
}
func parseFFRate(s string) float64 {
s = strings.TrimSpace(s)
if s == "" || s == "0/0" {
return 0
}
// "30000/1001"
if a, b, ok := strings.Cut(s, "/"); ok {
num, err1 := strconv.ParseFloat(strings.TrimSpace(a), 64)
den, err2 := strconv.ParseFloat(strings.TrimSpace(b), 64)
if err1 == nil && err2 == nil && den != 0 {
return num / den
}
return 0
}
// "25"
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0
}
return f
}
func probeVideoProps(ctx context.Context, filePath string) (w int, h int, fps float64, err error) {
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return 0, 0, 0, fmt.Errorf("empty path")
}
cmd := exec.CommandContext(ctx, ffprobePath,
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream=width,height,avg_frame_rate,r_frame_rate",
"-of", "json",
filePath,
)
out, err := cmd.Output()
if err != nil {
return 0, 0, 0, err
}
var info ffprobeInfo
if err := json.Unmarshal(out, &info); err != nil {
return 0, 0, 0, err
}
if len(info.Streams) == 0 {
return 0, 0, 0, fmt.Errorf("no video stream")
}
s := info.Streams[0]
w, h = s.Width, s.Height
// bevorzugt avg_frame_rate, fallback r_frame_rate
fps = parseFFRate(s.AvgFrameRate)
if fps <= 0 {
fps = parseFFRate(s.RFrameRate)
}
return w, h, fps, nil
}
func (d *dummyResponseWriter) Header() http.Header {
if d.h == nil {
d.h = make(http.Header)
}
return d.h
}
func (d *dummyResponseWriter) Write(b []byte) (int, error) { return len(b), nil }
func (d *dummyResponseWriter) WriteHeader(statusCode int) {}
var (
jobs = map[string]*RecordJob{}
jobsMu = sync.Mutex{}
)
var serverStartedAt = time.Now()
var lastCPUUsageBits uint64 // atomic float64 bits
func setLastCPUUsage(v float64) { atomic.StoreUint64(&lastCPUUsageBits, math.Float64bits(v)) }
func getLastCPUUsage() float64 { return math.Float64frombits(atomic.LoadUint64(&lastCPUUsageBits)) }
// -------------------- SSE: /api/record/stream --------------------
type sseHub struct {
mu sync.Mutex
clients map[chan []byte]struct{}
}
func newSSEHub() *sseHub {
return &sseHub{clients: map[chan []byte]struct{}{}}
}
func (h *sseHub) add(ch chan []byte) {
h.mu.Lock()
h.clients[ch] = struct{}{}
h.mu.Unlock()
}
func (h *sseHub) remove(ch chan []byte) {
h.mu.Lock()
delete(h.clients, ch)
h.mu.Unlock()
close(ch)
}
func (h *sseHub) broadcast(b []byte) {
h.mu.Lock()
defer h.mu.Unlock()
for ch := range h.clients {
// Non-blocking: langsame Clients droppen Updates (holen sich beim nächsten Update wieder ein)
select {
case ch <- b:
default:
}
}
}
var doneHub = newSSEHub()
var doneNotify = make(chan struct{}, 1)
// optional: monotoner Zähler, damit Clients auch bei "gleichen" payloads triggern können
var doneSeq uint64
func notifyDoneChanged() {
select {
case doneNotify <- struct{}{}:
default:
}
}
var recordJobsHub = newSSEHub()
var recordJobsNotify = make(chan struct{}, 1)
func startPreviewIdleKiller() {
t := time.NewTicker(5 * time.Second)
go func() {
defer t.Stop()
for range t.C {
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
if j != nil {
list = append(list, j)
}
}
jobsMu.Unlock()
for _, j := range list {
jobsMu.Lock()
cmdRunning := j.previewCmd != nil
last := j.previewLastHit
st := j.Status
jobsMu.Unlock()
if !cmdRunning {
continue
}
// wenn Job nicht mehr läuft oder Hover weg
if st != JobRunning || (!last.IsZero() && time.Since(last) > 10*time.Minute) {
stopPreview(j)
}
}
}
}()
}
func init() {
initFFmpegSemaphores()
startAdaptiveSemController(context.Background())
startPreviewIdleKiller()
// Debounced broadcaster (jobs)
go func() {
for range recordJobsNotify {
time.Sleep(40 * time.Millisecond)
for {
select {
case <-recordJobsNotify:
default:
goto SEND
}
}
SEND:
recordJobsHub.broadcast(jobsSnapshotJSON())
}
}()
// Debounced broadcaster (done changed)
go func() {
for range doneNotify {
time.Sleep(40 * time.Millisecond)
for {
select {
case <-doneNotify:
default:
goto SEND
}
}
SEND:
seq := atomic.AddUint64(&doneSeq, 1)
// Payload bewusst klein halten: Client soll nur "refetch done" machen
b := []byte(fmt.Sprintf(`{"type":"doneChanged","seq":%d,"ts":%d}`, seq, time.Now().UnixMilli()))
doneHub.broadcast(b)
}
}()
}
func publishJob(jobID string) bool {
jobsMu.Lock()
j := jobs[jobID]
if j == nil || !j.Hidden {
jobsMu.Unlock()
return false
}
j.Hidden = false
jobsMu.Unlock()
notifyJobsChanged()
return true
}
func notifyJobsChanged() {
select {
case recordJobsNotify <- struct{}{}:
default:
}
}
func jobsSnapshotJSON() []byte {
jobsMu.Lock()
list := make([]*RecordJob, 0, len(jobs))
for _, j := range jobs {
// ✅ Hidden-Jobs niemals an die UI senden (verhindert „UI springt“)
if j == nil || j.Hidden {
continue
}
c := *j
c.cancel = nil // nicht serialisieren
list = append(list, &c)
}
jobsMu.Unlock()
// optional: neueste zuerst
sort.Slice(list, func(i, j int) bool {
return list[i].StartedAt.After(list[j].StartedAt)
})
b, _ := json.Marshal(list)
return b
}
func recordStream(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming nicht unterstützt", http.StatusInternalServerError)
return
}
// SSE-Header
h := w.Header()
h.Set("Content-Type", "text/event-stream; charset=utf-8")
h.Set("Cache-Control", "no-cache, no-transform")
h.Set("Connection", "keep-alive")
h.Set("X-Accel-Buffering", "no") // hilfreich bei Reverse-Proxies
// sofort starten
w.WriteHeader(http.StatusOK)
writeEvent := func(event string, data []byte) bool {
// returns false => client weg / write error
if event != "" {
if _, err := fmt.Fprintf(w, "event: %s\n", event); err != nil {
return false
}
}
if len(data) > 0 {
if _, err := fmt.Fprintf(w, "data: %s\n\n", data); err != nil {
return false
}
} else {
// empty payload ok (nur terminator)
if _, err := io.WriteString(w, "\n"); err != nil {
return false
}
}
flusher.Flush()
return true
}
writeComment := func(msg string) bool {
if _, err := fmt.Fprintf(w, ": %s\n\n", msg); err != nil {
return false
}
flusher.Flush()
return true
}
// Reconnect-Hinweis
if _, err := fmt.Fprintf(w, "retry: 3000\n\n"); err != nil {
return
}
flusher.Flush()
// Channel + Hub
ch := make(chan []byte, 32)
recordJobsHub.add(ch)
defer recordJobsHub.remove(ch)
// Initialer Snapshot sofort
if b := jobsSnapshotJSON(); len(b) > 0 {
if !writeEvent("jobs", b) {
return
}
}
ctx := r.Context()
// Ping/Keepalive
ping := time.NewTicker(15 * time.Second)
defer ping.Stop()
for {
select {
case <-ctx.Done():
return
case b, ok := <-ch:
if !ok {
return
}
if len(b) == 0 {
continue
}
// ✅ Burst-Coalescing: wenn viele Updates schnell kommen, nur das neueste senden
last := b
drain:
for i := 0; i < 64; i++ {
select {
case nb, ok := <-ch:
if !ok {
return
}
if len(nb) > 0 {
last = nb
}
default:
break drain
}
}
if !writeEvent("jobs", last) {
return
}
case <-ping.C:
// Keepalive als Kommentar (stört nicht, hält Verbindungen offen)
if !writeComment(fmt.Sprintf("ping %d", time.Now().Unix())) {
return
}
}
}
}
// ffmpeg-Binary suchen (env, neben EXE, oder PATH)
var ffmpegPath = detectFFmpegPath()
var ffprobePath = detectFFprobePath()
func detectFFprobePath() string {
// 1) Env-Override
if p := strings.TrimSpace(os.Getenv("FFPROBE_PATH")); p != "" {
if abs, err := filepath.Abs(p); err == nil {
return abs
}
return p
}
// 2) Neben ffmpeg.exe (gleicher Ordner)
fp := strings.TrimSpace(ffmpegPath)
if fp != "" && fp != "ffmpeg" {
dir := filepath.Dir(fp)
ext := ""
if strings.HasSuffix(strings.ToLower(fp), ".exe") {
ext = ".exe"
}
c := filepath.Join(dir, "ffprobe"+ext)
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
// 3) Im EXE-Ordner
if exe, err := os.Executable(); err == nil {
exeDir := filepath.Dir(exe)
candidates := []string{
filepath.Join(exeDir, "ffprobe"),
filepath.Join(exeDir, "ffprobe.exe"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
}
// 4) PATH
if lp, err := exec.LookPath("ffprobe"); err == nil {
if abs, err2 := filepath.Abs(lp); err2 == nil {
return abs
}
return lp
}
return "ffprobe"
}
// ---------- Dynamic Semaphore (resizeable by load controller) ----------
type DynSem struct {
mu sync.Mutex
in int
max int
cap int
}
func NewDynSem(initial, cap int) *DynSem {
if cap < 1 {
cap = 1
}
if initial < 1 {
initial = 1
}
if initial > cap {
initial = cap
}
return &DynSem{max: initial, cap: cap}
}
func (s *DynSem) Acquire(ctx context.Context) error {
for {
if ctx != nil && ctx.Err() != nil {
return ctx.Err()
}
s.mu.Lock()
if s.in < s.max {
s.in++
s.mu.Unlock()
return nil
}
s.mu.Unlock()
time.Sleep(25 * time.Millisecond)
}
}
func (s *DynSem) Release() {
s.mu.Lock()
if s.in > 0 {
s.in--
}
s.mu.Unlock()
}
func (s *DynSem) SetMax(n int) {
if n < 1 {
n = 1
}
if n > s.cap {
n = s.cap
}
s.mu.Lock()
s.max = n
s.mu.Unlock()
}
func (s *DynSem) Max() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.max
}
func (s *DynSem) Cap() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.cap
}
func (s *DynSem) InUse() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.in
}
var (
genSem *DynSem
previewSem *DynSem
thumbSem *DynSem
durSem *DynSem
)
func clamp(n, lo, hi int) int {
if n < lo {
return lo
}
if n > hi {
return hi
}
return n
}
func envInt(name string) (int, bool) {
v := strings.TrimSpace(os.Getenv(name))
if v == "" {
return 0, false
}
n, err := strconv.Atoi(v)
if err != nil {
return 0, false
}
return n, true
}
func initFFmpegSemaphores() {
cpu := runtime.NumCPU()
if cpu <= 0 {
cpu = 2
}
// Defaults (heuristisch)
previewN := clamp((cpu+1)/2, 1, 6) // x264 live -> konservativ
thumbN := clamp(cpu, 2, 12) // Frames -> darf höher
genN := clamp((cpu+3)/4, 1, 4) // preview.mp4 clips -> eher klein
durN := clamp(cpu, 2, 16) // ffprobe: darf höher, aber nicht unbegrenzt
// ENV Overrides (optional)
if n, ok := envInt("PREVIEW_WORKERS"); ok {
previewN = clamp(n, 1, 32)
}
if n, ok := envInt("THUMB_WORKERS"); ok {
thumbN = clamp(n, 1, 64)
}
if n, ok := envInt("GEN_WORKERS"); ok {
genN = clamp(n, 1, 16)
}
if n, ok := envInt("DUR_WORKERS"); ok {
durN = clamp(n, 1, 64)
}
// Caps (Obergrenzen) können via ENV überschrieben werden
previewCap := clamp(cpu, 2, 12)
thumbCap := clamp(cpu*2, 4, 32)
genCap := clamp((cpu+1)/2, 2, 12)
durCap := clamp(cpu*2, 4, 32)
if n, ok := envInt("PREVIEW_CAP"); ok {
previewCap = clamp(n, 1, 64)
}
if n, ok := envInt("THUMB_CAP"); ok {
thumbCap = clamp(n, 1, 128)
}
if n, ok := envInt("GEN_CAP"); ok {
genCap = clamp(n, 1, 64)
}
if n, ok := envInt("DUR_CAP"); ok {
durCap = clamp(n, 1, 128)
}
// Initial max (Startwerte)
previewSem = NewDynSem(previewN, previewCap)
thumbSem = NewDynSem(thumbN, thumbCap)
genSem = NewDynSem(genN, genCap)
durSem = NewDynSem(durN, durCap)
fmt.Printf(
"🔧 semaphores(init): preview=%d/%d thumb=%d/%d gen=%d/%d dur=%d/%d (cpu=%d)\n",
previewSem.Max(), previewSem.Cap(),
thumbSem.Max(), thumbSem.Cap(),
genSem.Max(), genSem.Cap(),
durSem.Max(), durSem.Cap(),
cpu,
)
fmt.Printf(
"🔧 semaphores: preview=%d thumb=%d gen=%d dur=%d (cpu=%d)\n",
previewN, thumbN, genN, durN, cpu,
)
}
func startAdaptiveSemController(ctx context.Context) {
targetHi := 85.0
targetLo := 65.0
if v := strings.TrimSpace(os.Getenv("CPU_TARGET_HI")); v != "" {
if f, err := strconv.ParseFloat(v, 64); err == nil {
targetHi = f
}
}
if v := strings.TrimSpace(os.Getenv("CPU_TARGET_LO")); v != "" {
if f, err := strconv.ParseFloat(v, 64); err == nil {
targetLo = f
}
}
// Warmup (erste Messung kann 0 sein)
_, _ = gocpu.Percent(200*time.Millisecond, false)
t := time.NewTicker(2 * time.Second)
go func() {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
p, err := gocpu.Percent(0, false)
if err != nil || len(p) == 0 {
continue
}
usage := p[0]
setLastCPUUsage(usage)
// Preview ist am teuersten → konservativ
if usage > targetHi {
previewSem.SetMax(previewSem.Max() - 1)
genSem.SetMax(genSem.Max() - 1)
thumbSem.SetMax(thumbSem.Max() - 1)
} else if usage < targetLo {
previewSem.SetMax(previewSem.Max() + 1)
genSem.SetMax(genSem.Max() + 1)
thumbSem.SetMax(thumbSem.Max() + 1)
}
// optional Debug:
// fmt.Printf("CPU %.1f%% -> preview=%d thumb=%d gen=%d\n", usage, previewSem.Max(), thumbSem.Max(), genSem.Max())
}
}
}()
}
type durEntry struct {
size int64
mod time.Time
sec float64
}
var durCache = struct {
mu sync.Mutex
m map[string]durEntry
}{m: map[string]durEntry{}}
var startedAtFromFilenameRe = regexp.MustCompile(
`^(.+)_([0-9]{1,2})_([0-9]{1,2})_([0-9]{4})__([0-9]{1,2})-([0-9]{2})-([0-9]{2})$`,
)
func buildPerfSnapshot() map[string]any {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
s := getSettings()
recordDir, _ := resolvePathRelativeToApp(s.RecordDir)
var diskFreeBytes uint64
var diskTotalBytes uint64
var diskUsedPercent float64
diskPath := recordDir
if recordDir != "" {
if u, err := godisk.Usage(recordDir); err == nil && u != nil {
diskFreeBytes = u.Free
diskTotalBytes = u.Total
diskUsedPercent = u.UsedPercent
}
}
// ✅ Dynamische Disk-Schwellen (2× inFlight, Resume = +3GB)
pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds()
resp := map[string]any{
"ts": time.Now().UTC().Format(time.RFC3339Nano),
"serverMs": time.Now().UTC().UnixMilli(), // ✅ für "Ping" im Frontend (Approx)
"uptimeSec": time.Since(serverStartedAt).Seconds(),
"cpuPercent": func() float64 {
v := getLastCPUUsage()
if math.IsNaN(v) || math.IsInf(v, 0) || v < 0 {
return 0
}
return v
}(),
"diskPath": diskPath,
"diskFreeBytes": diskFreeBytes,
"diskTotalBytes": diskTotalBytes,
"diskUsedPercent": diskUsedPercent,
"diskEmergency": atomic.LoadInt32(&diskEmergency) == 1,
// ✅ statt LowDiskPauseBelowGB aus Settings
"diskPauseBelowGB": pauseGB,
"diskResumeAboveGB": resumeGB,
// ✅ optional, aber sehr hilfreich (Debug/UI)
"diskInFlightBytes": inFlight,
"diskInFlightHuman": formatBytesSI(u64ToI64(inFlight)),
"diskPauseNeedBytes": pauseNeed,
"diskPauseNeedHuman": formatBytesSI(u64ToI64(pauseNeed)),
"diskResumeNeedBytes": resumeNeed,
"diskResumeNeedHuman": formatBytesSI(u64ToI64(resumeNeed)),
"goroutines": runtime.NumGoroutine(),
"mem": map[string]any{
"alloc": ms.Alloc,
"heapAlloc": ms.HeapAlloc,
"heapInuse": ms.HeapInuse,
"sys": ms.Sys,
"numGC": ms.NumGC,
},
}
sem := map[string]any{}
if genSem != nil {
sem["gen"] = map[string]any{"inUse": genSem.InUse(), "cap": genSem.Cap(), "max": genSem.Max()}
}
if previewSem != nil {
sem["preview"] = map[string]any{"inUse": previewSem.InUse(), "cap": previewSem.Cap(), "max": previewSem.Max()}
}
if thumbSem != nil {
sem["thumb"] = map[string]any{"inUse": thumbSem.InUse(), "cap": thumbSem.Cap(), "max": thumbSem.Max()}
}
if durSem != nil {
sem["dur"] = map[string]any{"inUse": durSem.InUse(), "cap": durSem.Cap(), "max": durSem.Max()}
}
if len(sem) > 0 {
resp["sem"] = sem
}
return resp
}
func pingHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusNoContent)
}
func perfHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
resp := buildPerfSnapshot()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(resp)
}
func perfStreamHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Nur GET erlaubt", http.StatusMethodNotAllowed)
return
}
fl, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming nicht unterstützt", http.StatusInternalServerError)
return
}
// Optional: client kann Intervall mitgeben: /api/perf/stream?ms=5000
ms := 5000
if q := r.URL.Query().Get("ms"); q != "" {
if v, err := strconv.Atoi(q); err == nil {
// clamp: 1000..30000
if v < 1000 {
v = 1000
}
if v > 30000 {
v = 30000
}
ms = v
}
}
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Connection", "keep-alive")
// hilfreich hinter nginx/proxies:
w.Header().Set("X-Accel-Buffering", "no")
ctx := r.Context()
// sofort erstes Event schicken
send := func() error {
payload := buildPerfSnapshot()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(payload); err != nil {
return err
}
// event: perf
_, _ = io.WriteString(w, "event: perf\n")
_, _ = io.WriteString(w, "data: ")
_, _ = w.Write(buf.Bytes())
_, _ = io.WriteString(w, "\n")
fl.Flush()
return nil
}
// initial
_ = send()
t := time.NewTicker(time.Duration(ms) * time.Millisecond)
hb := time.NewTicker(15 * time.Second) // heartbeat gegen Proxy timeouts
defer t.Stop()
defer hb.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
_ = send()
case <-hb.C:
// SSE Kommentar als Heartbeat
_, _ = io.WriteString(w, ": keep-alive\n\n")
fl.Flush()
}
}
}
// -------------------------
// Low disk space guard
// - pausiert Autostart
// - stoppt laufende Downloads
// -------------------------
const (
diskGuardInterval = 5 * time.Second
)
var diskEmergency int32 // 0=false, 1=true
type diskStatusResp struct {
Emergency bool `json:"emergency"`
PauseGB int `json:"pauseGB"`
ResumeGB int `json:"resumeGB"`
FreeBytes uint64 `json:"freeBytes"`
FreeBytesHuman string `json:"freeBytesHuman"`
RecordPath string `json:"recordPath"`
}
func diskStatusHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
s := getSettings()
pauseGB, resumeGB, _, _, _ := computeDiskThresholds()
recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir)
dir := strings.TrimSpace(recordDirAbs)
if dir == "" {
dir = strings.TrimSpace(s.RecordDir)
}
free := uint64(0)
if dir != "" {
if u, err := godisk.Usage(dir); err == nil && u != nil {
free = u.Free
}
}
resp := diskStatusResp{
Emergency: atomic.LoadInt32(&diskEmergency) == 1,
PauseGB: pauseGB,
ResumeGB: resumeGB,
FreeBytes: free,
FreeBytesHuman: formatBytesSI(int64(free)),
RecordPath: dir,
}
w.Header().Set("Cache-Control", "no-store")
writeJSON(w, http.StatusOK, resp)
}
// stopJobsInternal markiert Jobs als "stopping" und cancelt sie (inkl. Preview-FFmpeg Kill).
// Nutzt 2 notify-Pushes, damit die UI Phase/Progress sofort sieht.
func stopJobsInternal(list []*RecordJob) {
if len(list) == 0 {
return
}
type payload struct {
cmd *exec.Cmd
cancel context.CancelFunc
}
pl := make([]payload, 0, len(list))
jobsMu.Lock()
for _, job := range list {
if job == nil {
continue
}
job.Phase = "stopping"
job.Progress = 10
pl = append(pl, payload{cmd: job.previewCmd, cancel: job.cancel})
job.previewCmd = nil
}
jobsMu.Unlock()
notifyJobsChanged() // 1) UI sofort updaten (Phase/Progress)
for _, p := range pl {
if p.cmd != nil && p.cmd.Process != nil {
_ = p.cmd.Process.Kill()
}
if p.cancel != nil {
p.cancel()
}
}
notifyJobsChanged() // 2) optional: nach Cancel/Kill nochmal pushen
}
func stopAllStoppableJobs() int {
stoppable := make([]*RecordJob, 0, 16)
jobsMu.Lock()
for _, j := range jobs {
if j == nil {
continue
}
if j.Status != JobRunning {
continue
}
phase := strings.ToLower(strings.TrimSpace(j.Phase))
// ✅ Im Disk-Notfall ALLES stoppen, was noch schreibt.
// Wir skippen nur Jobs, die sowieso schon im "stopping" sind.
if phase == "stopping" {
continue
}
stoppable = append(stoppable, j)
}
jobsMu.Unlock()
stopJobsInternal(stoppable)
return len(stoppable)
}
func shouldAutoDeleteSmallDownload(filePath string) (bool, int64, int64) {
// returns: (shouldDelete, sizeBytes, thresholdBytes)
s := getSettings()
if !s.AutoDeleteSmallDownloads {
return false, 0, 0
}
mb := s.AutoDeleteSmallDownloadsBelowMB
if mb <= 0 {
return false, 0, 0
}
p := strings.TrimSpace(filePath)
if p == "" {
return false, 0, 0
}
// relativ -> absolut versuchen (best effort)
if !filepath.IsAbs(p) {
if abs, err := resolvePathRelativeToApp(p); err == nil && strings.TrimSpace(abs) != "" {
p = abs
}
}
fi, err := os.Stat(p)
if err != nil || fi.IsDir() {
return false, 0, int64(mb) * 1024 * 1024
}
size := fi.Size()
thr := int64(mb) * 1024 * 1024
if size > 0 && size < thr {
return true, size, thr
}
return false, size, thr
}
func sizeOfPathBestEffort(p string) uint64 {
p = strings.TrimSpace(p)
if p == "" {
return 0
}
// relativ -> absolut versuchen
if !filepath.IsAbs(p) {
if abs, err := resolvePathRelativeToApp(p); err == nil && strings.TrimSpace(abs) != "" {
p = abs
}
}
fi, err := os.Stat(p)
if err != nil || fi.IsDir() || fi.Size() <= 0 {
return 0
}
return uint64(fi.Size())
}
func inFlightBytesForJob(j *RecordJob) uint64 {
if j == nil {
return 0
}
// Prefer live-tracked bytes if available (accurate & cheap).
if j.SizeBytes > 0 {
return uint64(j.SizeBytes)
}
return sizeOfPathBestEffort(j.Output)
}
const giB = uint64(1024 * 1024 * 1024)
// computeDiskThresholds:
// Pause = ceil( (2 * inFlightBytes) / GiB )
// Resume = Pause + 3 GB (Hysterese)
// Wenn inFlight==0 => Pause/Resume = 0
func computeDiskThresholds() (pauseGB int, resumeGB int, inFlight uint64, pauseNeed uint64, resumeNeed uint64) {
inFlight = sumInFlightBytes()
if inFlight == 0 {
return 0, 0, 0, 0, 0
}
need := inFlight * 2
pauseGB = int((need + giB - 1) / giB) // ceil
// Safety cap (nur zur Sicherheit, falls irgendwas eskaliert)
if pauseGB > 10_000 {
pauseGB = 10_000
}
resumeGB = pauseGB + 3
if resumeGB > 10_000 {
resumeGB = 10_000
}
pauseNeed = uint64(pauseGB) * giB
resumeNeed = uint64(resumeGB) * giB
return
}
// ✅ Summe der "wachsenden" Daten (running + remuxing etc.)
// Idee: Für TS->MP4 Peak brauchst du grob nochmal die Größe der aktuellen Datei als Reserve.
func sumInFlightBytes() uint64 {
var sum uint64
jobsMu.Lock()
defer jobsMu.Unlock()
for _, j := range jobs {
if j == nil {
continue
}
if j.Status != JobRunning {
continue
}
// Nimm die Datei, die gerade wächst.
// In deinem System ist das typischerweise j.Output (TS oder temporäres Ziel).
// Falls du ein separates Feld für "TempTS" o.ä. hast: hier ergänzen.
sum += inFlightBytesForJob(j)
}
return sum
}
// startDiskSpaceGuard läuft im Backend und reagiert auch ohne offenen Browser.
// Bei wenig freiem Platz:
// - Autostart pausieren
// - laufende Jobs stoppen (nur Status=running und Phase leer)
func startDiskSpaceGuard() {
t := time.NewTicker(diskGuardInterval)
defer t.Stop()
for range t.C {
s := getSettings()
// Pfad bestimmen, auf dem wir freien Speicher prüfen
recordDirAbs, _ := resolvePathRelativeToApp(s.RecordDir)
dir := strings.TrimSpace(recordDirAbs)
if dir == "" {
dir = strings.TrimSpace(s.RecordDir)
}
if dir == "" {
continue
}
u, err := godisk.Usage(dir)
if err != nil || u == nil {
continue
}
free := u.Free
// ✅ Dynamische Schwellen:
// Pause = ceil((2 * inFlight) / GiB)
// Resume = Pause + 3 GB
// pauseNeed/resumeNeed sind die benötigten freien Bytes
pauseGB, resumeGB, inFlight, pauseNeed, resumeNeed := computeDiskThresholds()
// Wenn nichts läuft, gibt es nichts zu reservieren.
// (Optional: Emergency zurücksetzen, damit Autostart wieder frei wird.)
if inFlight == 0 {
if atomic.LoadInt32(&diskEmergency) == 1 {
atomic.StoreInt32(&diskEmergency, 0)
fmt.Printf(
"✅ [disk] No active jobs: emergency cleared (free=%s, path=%s)\n",
formatBytesSI(u64ToI64(free)),
dir,
)
}
continue
}
// ✅ Hysterese: erst ab resumeNeed wieder "bereit"
if atomic.LoadInt32(&diskEmergency) == 1 {
if free >= resumeNeed {
atomic.StoreInt32(&diskEmergency, 0)
fmt.Printf(
"✅ [disk] Recovered: free=%s (%dB) (>= %s, %dB) emergency cleared (pause=%dGB resume=%dGB inFlight=%s, %dB)\n",
formatBytesSI(u64ToI64(free)), free,
formatBytesSI(u64ToI64(resumeNeed)), resumeNeed,
pauseGB, resumeGB,
formatBytesSI(u64ToI64(inFlight)), inFlight,
)
}
continue
}
// ✅ Normalzustand: solange free >= pauseNeed, nichts tun
if free >= pauseNeed {
continue
}
// ✅ Trigger: Notbremse aktivieren, Jobs stoppen
atomic.StoreInt32(&diskEmergency, 1)
fmt.Printf(
"🛑 [disk] Low space: free=%s (%dB) (< %s, %dB, pause=%dGB resume=%dGB, inFlight=%s, %dB) -> stop jobs + block autostart via diskEmergency (path=%s)\n",
formatBytesSI(u64ToI64(free)), free,
formatBytesSI(u64ToI64(pauseNeed)), pauseNeed,
pauseGB, resumeGB,
formatBytesSI(u64ToI64(inFlight)), inFlight,
dir,
)
stopped := stopAllStoppableJobs()
if stopped > 0 {
fmt.Printf("🛑 [disk] Stop requested for %d job(s)\n", stopped)
}
}
}
func setJobPhase(job *RecordJob, phase string, progress int) {
if progress < 0 {
progress = 0
}
if progress > 100 {
progress = 100
}
jobsMu.Lock()
job.Phase = phase
job.Progress = progress
jobsMu.Unlock()
notifyJobsChanged()
}
func durationSecondsCached(ctx context.Context, path string) (float64, error) {
fi, err := os.Stat(path)
if err != nil {
return 0, err
}
durCache.mu.Lock()
if e, ok := durCache.m[path]; ok && e.size == fi.Size() && e.mod.Equal(fi.ModTime()) && e.sec > 0 {
durCache.mu.Unlock()
return e.sec, nil
}
durCache.mu.Unlock()
// 1) ffprobe (bevorzugt)
cmd := exec.CommandContext(ctx, ffprobePath,
"-v", "error",
"-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1",
path,
)
out, err := cmd.Output()
if err == nil {
s := strings.TrimSpace(string(out))
sec, err2 := strconv.ParseFloat(s, 64)
if err2 == nil && sec > 0 {
durCache.mu.Lock()
durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec}
durCache.mu.Unlock()
return sec, nil
}
}
// 2) Fallback: ffmpeg -i "Duration: HH:MM:SS.xx" parsen
cmd2 := exec.CommandContext(ctx, ffmpegPath, "-i", path)
b, _ := cmd2.CombinedOutput() // ffmpeg liefert hier oft ExitCode!=0, Output ist trotzdem da
text := string(b)
re := regexp.MustCompile(`Duration:\s*(\d+):(\d+):(\d+(?:\.\d+)?)`)
m := re.FindStringSubmatch(text)
if len(m) != 4 {
return 0, fmt.Errorf("duration not found")
}
hh, _ := strconv.ParseFloat(m[1], 64)
mm, _ := strconv.ParseFloat(m[2], 64)
ss, _ := strconv.ParseFloat(m[3], 64)
sec := hh*3600 + mm*60 + ss
if sec <= 0 {
return 0, fmt.Errorf("invalid duration")
}
durCache.mu.Lock()
durCache.m[path] = durEntry{size: fi.Size(), mod: fi.ModTime(), sec: sec}
durCache.mu.Unlock()
return sec, nil
}
type RecorderSettings struct {
RecordDir string `json:"recordDir"`
DoneDir string `json:"doneDir"`
FFmpegPath string `json:"ffmpegPath"`
AutoAddToDownloadList bool `json:"autoAddToDownloadList"`
AutoStartAddedDownloads bool `json:"autoStartAddedDownloads"`
UseChaturbateAPI bool `json:"useChaturbateApi"`
UseMyFreeCamsWatcher bool `json:"useMyFreeCamsWatcher"`
// Wenn aktiv, werden fertige Downloads automatisch gelöscht, wenn sie kleiner als der Grenzwert sind.
AutoDeleteSmallDownloads bool `json:"autoDeleteSmallDownloads"`
AutoDeleteSmallDownloadsBelowMB int `json:"autoDeleteSmallDownloadsBelowMB"`
BlurPreviews bool `json:"blurPreviews"`
TeaserPlayback string `json:"teaserPlayback"` // still | hover | all
TeaserAudio bool `json:"teaserAudio"` // ✅ Vorschau/Teaser mit Ton abspielen
// EncryptedCookies contains base64(nonce+ciphertext) of a JSON cookie map.
EncryptedCookies string `json:"encryptedCookies"`
}
var (
settingsMu sync.Mutex
settings = RecorderSettings{
RecordDir: "/records",
DoneDir: "/records/done",
FFmpegPath: "",
AutoAddToDownloadList: false,
AutoStartAddedDownloads: false,
UseChaturbateAPI: false,
UseMyFreeCamsWatcher: false,
AutoDeleteSmallDownloads: false,
AutoDeleteSmallDownloadsBelowMB: 50,
BlurPreviews: false,
TeaserPlayback: "hover",
TeaserAudio: false,
EncryptedCookies: "",
}
settingsFile = "recorder_settings.json"
)
func settingsFilePath() string {
// optionaler Override per ENV
name := strings.TrimSpace(os.Getenv("RECORDER_SETTINGS_FILE"))
if name == "" {
name = settingsFile
}
// Standard: relativ zur EXE / App-Dir (oder fallback auf Working Dir bei go run)
if p, err := resolvePathRelativeToApp(name); err == nil && strings.TrimSpace(p) != "" {
return p
}
// Fallback: so zurückgeben wie es ist
return name
}
func getSettings() RecorderSettings {
settingsMu.Lock()
defer settingsMu.Unlock()
return settings
}
func detectFFmpegPath() string {
// 0. Settings-Override (ffmpegPath in recorder_settings.json / UI)
s := getSettings()
if p := strings.TrimSpace(s.FFmpegPath); p != "" {
// Relativ zur EXE auflösen, falls nötig
if !filepath.IsAbs(p) {
if abs, err := resolvePathRelativeToApp(p); err == nil {
p = abs
}
}
return p
}
// 1. Umgebungsvariable FFMPEG_PATH erlaubt Override
if p := strings.TrimSpace(os.Getenv("FFMPEG_PATH")); p != "" {
if abs, err := filepath.Abs(p); err == nil {
return abs
}
return p
}
// 2. ffmpeg / ffmpeg.exe im selben Ordner wie dein Go-Programm
if exe, err := os.Executable(); err == nil {
exeDir := filepath.Dir(exe)
candidates := []string{
filepath.Join(exeDir, "ffmpeg"),
filepath.Join(exeDir, "ffmpeg.exe"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() {
return c
}
}
}
// 3. ffmpeg über PATH suchen und absolut machen
if lp, err := exec.LookPath("ffmpeg"); err == nil {
if abs, err2 := filepath.Abs(lp); err2 == nil {
return abs
}
return lp
}
// 4. Fallback: plain "ffmpeg" kann dann immer noch fehlschlagen
return "ffmpeg"
}
func removeGeneratedForID(id string) {
// ✅ canonical id: wie beim Erzeugen der generated Ordner
id = strings.TrimSpace(id)
if id == "" {
return
}
// falls jemand "file.mp4" übergibt
id = strings.TrimSuffix(id, filepath.Ext(id))
// HOT Prefix weg
id = stripHotPrefix(id)
// wichtig: exakt gleiche Normalisierung wie überall sonst (Ordnernamen!)
var err error
id, err = sanitizeID(id)
if err != nil || id == "" {
return
}
// 1) NEU: generated/meta/<id>/ ...
if root, _ := generatedMetaRoot(); strings.TrimSpace(root) != "" {
_ = os.RemoveAll(filepath.Join(root, id))
}
// (optional aber sinnvoll) 1b) Legacy: generated/<id>/ (falls noch alte Assets existieren)
if root, _ := generatedRoot(); strings.TrimSpace(root) != "" {
_ = os.RemoveAll(filepath.Join(root, id))
}
// 2) Temp Preview Segmente (HLS) wegräumen
// (%TEMP%/rec_preview/<assetID>)
_ = os.RemoveAll(filepath.Join(os.TempDir(), "rec_preview", id))
// 3) Legacy Cleanup (best effort)
thumbsLegacy, _ := generatedThumbsRoot()
teaserLegacy, _ := generatedTeaserRoot()
if strings.TrimSpace(thumbsLegacy) != "" {
_ = os.RemoveAll(filepath.Join(thumbsLegacy, id))
_ = os.Remove(filepath.Join(thumbsLegacy, id+".jpg"))
}
if strings.TrimSpace(teaserLegacy) != "" {
_ = os.Remove(filepath.Join(teaserLegacy, id+".mp4"))
_ = os.Remove(filepath.Join(teaserLegacy, id+"_teaser.mp4"))
}
}
func purgeDurationCacheForPath(p string) {
p = strings.TrimSpace(p)
if p == "" {
return
}
durCache.mu.Lock()
delete(durCache.m, p)
durCache.mu.Unlock()
}
func renameGenerated(oldID, newID string) {
thumbsRoot, _ := generatedThumbsRoot()
teaserRoot, _ := generatedTeaserRoot()
oldThumb := filepath.Join(thumbsRoot, oldID)
newThumb := filepath.Join(thumbsRoot, newID)
if _, err := os.Stat(oldThumb); err == nil {
if _, err2 := os.Stat(newThumb); os.IsNotExist(err2) {
_ = os.Rename(oldThumb, newThumb)
} else {
_ = os.RemoveAll(oldThumb)
}
}
oldTeaser := filepath.Join(teaserRoot, oldID+".mp4")
newTeaser := filepath.Join(teaserRoot, newID+".mp4")
if _, err := os.Stat(oldTeaser); err == nil {
if _, err2 := os.Stat(newTeaser); os.IsNotExist(err2) {
_ = os.Rename(oldTeaser, newTeaser)
} else {
_ = os.Remove(oldTeaser)
}
}
}
func loadSettings() {
p := settingsFilePath()
b, err := os.ReadFile(p)
fmt.Println("🔧 settingsFile:", p)
if err == nil {
s := getSettings() // ✅ startet mit Defaults
if json.Unmarshal(b, &s) == nil {
if strings.TrimSpace(s.RecordDir) != "" {
s.RecordDir = filepath.Clean(strings.TrimSpace(s.RecordDir))
}
if strings.TrimSpace(s.DoneDir) != "" {
s.DoneDir = filepath.Clean(strings.TrimSpace(s.DoneDir))
}
if strings.TrimSpace(s.FFmpegPath) != "" {
s.FFmpegPath = strings.TrimSpace(s.FFmpegPath)
}
s.TeaserPlayback = strings.ToLower(strings.TrimSpace(s.TeaserPlayback))
if s.TeaserPlayback == "" {
s.TeaserPlayback = "hover"
}
if s.TeaserPlayback != "still" && s.TeaserPlayback != "hover" && s.TeaserPlayback != "all" {
s.TeaserPlayback = "hover"
}
// Auto-Delete: clamp
if s.AutoDeleteSmallDownloadsBelowMB < 0 {
s.AutoDeleteSmallDownloadsBelowMB = 0
}
if s.AutoDeleteSmallDownloadsBelowMB > 100_000 {
s.AutoDeleteSmallDownloadsBelowMB = 100_000
}
settingsMu.Lock()
settings = s
settingsMu.Unlock()
}
}
// Ordner sicherstellen
s := getSettings()
recordAbs, _ := resolvePathRelativeToApp(s.RecordDir)
doneAbs, _ := resolvePathRelativeToApp(s.DoneDir)
if strings.TrimSpace(recordAbs) != "" {
_ = os.MkdirAll(recordAbs, 0o755)
}
if strings.TrimSpace(doneAbs) != "" {
_ = os.MkdirAll(doneAbs, 0o755)
}
// ffmpeg-Pfad anhand Settings/Env/PATH bestimmen
ffmpegPath = detectFFmpegPath()
fmt.Println("🔍 ffmpegPath:", ffmpegPath)
ffprobePath = detectFFprobePath()
fmt.Println("🔍 ffprobePath:", ffprobePath)
}
func saveSettingsToDisk() {
s := getSettings()
b, err := json.MarshalIndent(s, "", " ")
if err != nil {
fmt.Println("⚠️ settings marshal:", err)
return
}
b = append(b, '\n')
p := settingsFilePath()
if err := atomicWriteFile(p, b); err != nil {
fmt.Println("⚠️ settings write:", err)
return
}
// optional
// fmt.Println("✅ settings saved:", p)
}
func recordSettingsHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(getSettings())
return
case http.MethodPost:
var in RecorderSettings
if err := json.NewDecoder(r.Body).Decode(&in); err != nil {
http.Error(w, "invalid json: "+err.Error(), http.StatusBadRequest)
return
}
// --- normalize (WICHTIG: erst trim, dann leer-check, dann clean) ---
recRaw := strings.TrimSpace(in.RecordDir)
doneRaw := strings.TrimSpace(in.DoneDir)
if recRaw == "" || doneRaw == "" {
http.Error(w, "recordDir und doneDir dürfen nicht leer sein", http.StatusBadRequest)
return
}
in.RecordDir = filepath.Clean(recRaw)
in.DoneDir = filepath.Clean(doneRaw)
// Optional aber sehr empfehlenswert: "." verbieten
if in.RecordDir == "." || in.DoneDir == "." {
http.Error(w, "recordDir/doneDir dürfen nicht '.' sein", http.StatusBadRequest)
return
}
in.FFmpegPath = strings.TrimSpace(in.FFmpegPath)
in.TeaserPlayback = strings.ToLower(strings.TrimSpace(in.TeaserPlayback))
if in.TeaserPlayback == "" {
in.TeaserPlayback = "hover"
}
if in.TeaserPlayback != "still" && in.TeaserPlayback != "hover" && in.TeaserPlayback != "all" {
in.TeaserPlayback = "hover"
}
// Auto-Delete: clamp
if in.AutoDeleteSmallDownloadsBelowMB < 0 {
in.AutoDeleteSmallDownloadsBelowMB = 0
}
if in.AutoDeleteSmallDownloadsBelowMB > 100_000 {
in.AutoDeleteSmallDownloadsBelowMB = 100_000
}
// --- ensure folders (Fehler zurückgeben, falls z.B. keine Rechte) ---
recAbs, err := resolvePathRelativeToApp(in.RecordDir)
if err != nil {
http.Error(w, "ungültiger recordDir: "+err.Error(), http.StatusBadRequest)
return
}
doneAbs, err := resolvePathRelativeToApp(in.DoneDir)
if err != nil {
http.Error(w, "ungültiger doneDir: "+err.Error(), http.StatusBadRequest)
return
}
if err := os.MkdirAll(recAbs, 0o755); err != nil {
http.Error(w, "konnte recordDir nicht erstellen: "+err.Error(), http.StatusBadRequest)
return
}
if err := os.MkdirAll(doneAbs, 0o755); err != nil {
http.Error(w, "konnte doneDir nicht erstellen: "+err.Error(), http.StatusBadRequest)
return
}
// ✅ Settings im RAM aktualisieren
settingsMu.Lock()
settings = in
settingsMu.Unlock()
// ✅ Settings auf Disk persistieren
saveSettingsToDisk()
// ✅ ffmpeg/ffprobe nach Änderungen neu bestimmen
// Tipp: wenn der User FFmpegPath explizit setzt, nutze den direkt.
if strings.TrimSpace(in.FFmpegPath) != "" {
ffmpegPath = in.FFmpegPath
} else {
ffmpegPath = detectFFmpegPath()
}
fmt.Println("🔍 ffmpegPath:", ffmpegPath)
ffprobePath = detectFFprobePath()
fmt.Println("🔍 ffprobePath:", ffprobePath)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-store")
_ = json.NewEncoder(w).Encode(getSettings())
return
default:
http.Error(w, "Nur GET/POST erlaubt", http.StatusMethodNotAllowed)
return
}
}
func settingsBrowse(w http.ResponseWriter, r *http.Request) {
target := r.URL.Query().Get("target")
if target != "record" && target != "done" && target != "ffmpeg" {
http.Error(w, "target muss record, done oder ffmpeg sein", http.StatusBadRequest)
return
}
var (
p string
err error
)
if target == "ffmpeg" {
// Dateiauswahl für ffmpeg.exe
p, err = dialog.File().
Title("ffmpeg.exe auswählen").
Load()
} else {
// Ordnerauswahl für record/done
p, err = dialog.Directory().
Title("Ordner auswählen").
Browse()
}
if err != nil {
// User cancelled → 204 No Content ist praktisch fürs Frontend
if strings.Contains(strings.ToLower(err.Error()), "cancel") {
w.WriteHeader(http.StatusNoContent)
return
}
http.Error(w, "auswahl fehlgeschlagen: "+err.Error(), http.StatusInternalServerError)
return
}
// optional: wenn innerhalb exe-dir, als RELATIV zurückgeben
p = maybeMakeRelativeToExe(p)
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]string{"path": p})
}
func maybeMakeRelativeToExe(abs string) string {
exe, err := os.Executable()
if err != nil {
return abs
}
base := filepath.Dir(exe)
rel, err := filepath.Rel(base, abs)
if err != nil {
return abs
}
// wenn rel mit ".." beginnt -> nicht innerhalb base -> absoluten Pfad behalten
if rel == "." || rel == ".." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return abs
}
return filepath.ToSlash(rel) // frontend-freundlich
}
// --- Gemeinsame Status-Werte für MFC ---
type Status int
const (
StatusUnknown Status = iota
StatusPublic
StatusPrivate
StatusOffline
StatusNotExist
)
func (s Status) String() string {
switch s {
case StatusPublic:
return "PUBLIC"
case StatusPrivate:
return "PRIVATE"
case StatusOffline:
return "OFFLINE"
case StatusNotExist:
return "NOTEXIST"
default:
return "UNKNOWN"
}
}
// HTTPClient kapselt http.Client + Header/Cookies (wie internal.Req im DVR)
type HTTPClient struct {
client *http.Client
userAgent string
}
// gemeinsamen HTTP-Client erzeugen
func NewHTTPClient(userAgent string) *HTTPClient {
if userAgent == "" {
// Default, falls kein UA übergeben wird
userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}
return &HTTPClient{
client: &http.Client{
Timeout: 10 * time.Second,
},
userAgent: userAgent,
}
}
// Request-Erstellung mit User-Agent + Cookies
func (h *HTTPClient) NewRequest(ctx context.Context, method, url, cookieStr string) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
// Basis-Header, die immer gesetzt werden
if h.userAgent != "" {
req.Header.Set("User-Agent", h.userAgent)
} else {
req.Header.Set("User-Agent", "Mozilla/5.0")
}
req.Header.Set("Accept", "*/*")
// Cookie-String wie "name=value; foo=bar"
addCookiesFromString(req, cookieStr)
return req, nil
}
// Seite laden + einfache Erkennung von Schutzseiten (Cloudflare / Age-Gate)
func (h *HTTPClient) FetchPage(ctx context.Context, url, cookieStr string) (string, error) {
req, err := h.NewRequest(ctx, http.MethodGet, url, cookieStr)
if err != nil {
return "", err
}
resp, err := h.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
body := string(data)
// Etwas aussagekräftigere Fehler als nur "room dossier nicht gefunden"
if strings.Contains(body, "<title>Just a moment...</title>") {
return "", errors.New("Schutzseite von Cloudflare erhalten (\"Just a moment...\") kein Room-HTML")
}
if strings.Contains(body, "Verify your age") {
return "", errors.New("Altersverifikationsseite erhalten kein Room-HTML")
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("HTTP %d beim Laden von %s", resp.StatusCode, url)
}
return body, nil
}
func remuxTSToMP4(tsPath, mp4Path string) error {
// ffmpeg -y -i in.ts -c copy -movflags +faststart out.mp4
cmd := exec.Command(ffmpegPath,
"-y",
"-i", tsPath,
"-c", "copy",
"-movflags", "+faststart",
mp4Path,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, stderr.String())
}
return nil
}
func parseFFmpegOutTime(v string) float64 {
v = strings.TrimSpace(v)
if v == "" {
return 0
}
parts := strings.Split(v, ":")
if len(parts) != 3 {
return 0
}
h, err1 := strconv.Atoi(parts[0])
m, err2 := strconv.Atoi(parts[1])
s, err3 := strconv.ParseFloat(parts[2], 64) // Sekunden können Dezimalstellen haben
if err1 != nil || err2 != nil || err3 != nil {
return 0
}
return float64(h*3600+m*60) + s
}
func remuxTSToMP4WithProgress(
ctx context.Context,
tsPath, mp4Path string,
durationSec float64,
inSize int64,
onRatio func(r float64),
) error {
// ffmpeg progress kommt auf stdout als key=value
cmd := exec.CommandContext(ctx, ffmpegPath,
"-y",
"-nostats",
"-progress", "pipe:1",
"-i", tsPath,
"-c", "copy",
"-movflags", "+faststart",
mp4Path,
)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return err
}
sc := bufio.NewScanner(stdout)
sc.Buffer(make([]byte, 0, 64*1024), 1024*1024)
var (
lastOutSec float64
lastTotalSz int64
)
send := func(outSec float64, totalSize int64, force bool) {
// bevorzugt: Zeit/Dauer
if durationSec > 0 && outSec > 0 {
r := outSec / durationSec
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
if onRatio != nil {
onRatio(r)
}
return
}
// fallback: Bytes (bei remux meist okay-ish)
if inSize > 0 && totalSize > 0 {
r := float64(totalSize) / float64(inSize)
if r < 0 {
r = 0
}
if r > 1 {
r = 1
}
if onRatio != nil {
onRatio(r)
}
return
}
// force (z.B. end)
if force && onRatio != nil {
onRatio(1)
}
}
for sc.Scan() {
line := strings.TrimSpace(sc.Text())
if line == "" {
continue
}
k, v, ok := strings.Cut(line, "=")
if !ok {
continue
}
switch k {
case "out_time_us":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
lastOutSec = float64(n) / 1_000_000.0
send(lastOutSec, lastTotalSz, false)
}
case "out_time_ms":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
// out_time_ms ist i.d.R. Millisekunden
lastOutSec = float64(n) / 1_000.0
send(lastOutSec, lastTotalSz, false)
}
case "out_time":
if s := parseFFmpegOutTime(v); s > 0 {
lastOutSec = s
send(lastOutSec, lastTotalSz, false)
}
case "total_size":
if n, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64); err == nil && n > 0 {
lastTotalSz = n
send(lastOutSec, lastTotalSz, false)
}
case "progress":
if strings.TrimSpace(v) == "end" {
send(lastOutSec, lastTotalSz, true)
}
}
}
if err := cmd.Wait(); err != nil {
return fmt.Errorf("ffmpeg remux failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
return nil
}
// --- MP4 Streaming Optimierung (Fast Start) ---
// "Fast Start" bedeutet: moov vor mdat (Browser kann sofort Metadaten lesen)
func isFastStartMP4(path string) (bool, error) {
f, err := os.Open(path)
if err != nil {
return false, err
}
defer f.Close()
for i := 0; i < 256; i++ {
var hdr [8]byte
if _, err := io.ReadFull(f, hdr[:]); err != nil {
// unklar/kurz -> nicht anfassen
return true, nil
}
sz32 := binary.BigEndian.Uint32(hdr[0:4])
typ := string(hdr[4:8])
var boxSize int64
headerSize := int64(8)
if sz32 == 0 {
return true, nil
}
if sz32 == 1 {
var ext [8]byte
if _, err := io.ReadFull(f, ext[:]); err != nil {
return true, nil
}
boxSize = int64(binary.BigEndian.Uint64(ext[:]))
headerSize = 16
} else {
boxSize = int64(sz32)
}
if boxSize < headerSize {
return true, nil
}
switch typ {
case "moov":
return true, nil
case "mdat":
return false, nil
}
if _, err := f.Seek(boxSize-headerSize, io.SeekCurrent); err != nil {
return true, nil
}
}
return true, nil
}
func ensureFastStartMP4(path string) error {
path = strings.TrimSpace(path)
if path == "" || !strings.EqualFold(filepath.Ext(path), ".mp4") {
return nil
}
if strings.TrimSpace(ffmpegPath) == "" {
return nil
}
ok, err := isFastStartMP4(path)
if err == nil && ok {
return nil
}
dir := filepath.Dir(path)
base := filepath.Base(path)
tmp := filepath.Join(dir, ".__faststart__"+base+".tmp")
bak := filepath.Join(dir, ".__faststart__"+base+".bak")
_ = os.Remove(tmp)
_ = os.Remove(bak)
cmd := exec.Command(ffmpegPath,
"-y",
"-i", path,
"-c", "copy",
"-movflags", "+faststart",
tmp,
)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("ffmpeg faststart failed: %v (%s)", err, strings.TrimSpace(stderr.String()))
}
// atomar austauschen
if err := os.Rename(path, bak); err != nil {
_ = os.Remove(tmp)
return fmt.Errorf("rename original to bak failed: %w", err)
}
if err := os.Rename(tmp, path); err != nil {
_ = os.Rename(bak, path)
_ = os.Remove(tmp)
return fmt.Errorf("rename tmp to original failed: %w", err)
}
_ = os.Remove(bak)
return nil
}
func extractLastFrameJPEG(path string) ([]byte, error) {
cmd := exec.Command(
ffmpegPath,
"-hide_banner",
"-loglevel", "error",
"-sseof", "-0.1",
"-i", path,
"-frames:v", "1",
"-vf", "scale=720:-2",
"-q:v", "10",
"-f", "image2pipe",
"-vcodec", "mjpeg",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg last-frame: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
return out.Bytes(), nil
}
func extractFrameAtTimeJPEG(path string, seconds float64) ([]byte, error) {
if seconds < 0 {
seconds = 0
}
seek := fmt.Sprintf("%.3f", seconds)
cmd := exec.Command(
ffmpegPath,
"-hide_banner",
"-loglevel", "error",
"-ss", seek,
"-i", path,
"-frames:v", "1",
"-vf", "scale=720:-2",
"-q:v", "10",
"-f", "image2pipe",
"-vcodec", "mjpeg",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg frame-at-time: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
return out.Bytes(), nil
}
func extractLastFrameJPEGScaled(path string, width int, q int) ([]byte, error) {
if width <= 0 {
width = 320
}
if q <= 0 {
q = 14
}
// ffmpeg: letztes Frame, low-res
cmd := exec.Command(
ffmpegPath,
"-hide_banner", "-loglevel", "error",
"-sseof", "-0.25",
"-i", path,
"-frames:v", "1",
"-vf", fmt.Sprintf("scale=%d:-2", width),
"-q:v", strconv.Itoa(q),
"-f", "image2pipe",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg last-frame scaled: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
b := out.Bytes()
if len(b) == 0 {
return nil, fmt.Errorf("ffmpeg last-frame scaled: empty output")
}
return b, nil
}
func extractFirstFrameJPEGScaled(path string, width int, q int) ([]byte, error) {
if width <= 0 {
width = 320
}
if q <= 0 {
q = 14
}
cmd := exec.Command(
ffmpegPath,
"-hide_banner", "-loglevel", "error",
"-ss", "0",
"-i", path,
"-frames:v", "1",
"-vf", fmt.Sprintf("scale=%d:-2", width),
"-q:v", strconv.Itoa(q),
"-f", "image2pipe",
"pipe:1",
)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("ffmpeg first-frame scaled: %w (%s)", err, strings.TrimSpace(stderr.String()))
}
b := out.Bytes()
if len(b) == 0 {
return nil, fmt.Errorf("ffmpeg first-frame scaled: empty output")
}
return b, nil
}
func extractLastFrameFromPreviewDirThumb(previewDir string) ([]byte, error) {
seg, err := latestPreviewSegment(previewDir)
if err != nil {
return nil, err
}
// low-res, notfalls fallback auf erstes Frame
img, err := extractLastFrameJPEGScaled(seg, 320, 14)
if err == nil && len(img) > 0 {
return img, nil
}
return extractFirstFrameJPEGScaled(seg, 320, 14)
}
// sucht das "neueste" Preview-Segment (seg_low_XXXXX.ts / seg_hq_XXXXX.ts)
func latestPreviewSegment(previewDir string) (string, error) {
entries, err := os.ReadDir(previewDir)
if err != nil {
return "", err
}
var best string
for _, e := range entries {
if e.IsDir() {
continue
}
name := e.Name()
if !strings.HasPrefix(name, "seg_low_") && !strings.HasPrefix(name, "seg_hq_") {
continue
}
if best == "" || name > best {
best = name
}
}
if best == "" {
return "", fmt.Errorf("kein Preview-Segment in %s", previewDir)
}
return filepath.Join(previewDir, best), nil
}
// erzeugt ein JPEG aus dem letzten Preview-Segment
func extractLastFrameFromPreviewDir(previewDir string) ([]byte, error) {
seg, err := latestPreviewSegment(previewDir)
if err != nil {
return nil, err
}
// Segment ist klein und "fertig" hier reicht ein Last-Frame-Versuch,
// mit Fallback auf First-Frame.
img, err := extractLastFrameJPEG(seg)
if err != nil {
return extractFirstFrameJPEG(seg)
}
return img, nil
}
func stripHotPrefix(s string) string {
s = strings.TrimSpace(s)
// akzeptiere "HOT " auch case-insensitive
if len(s) >= 4 && strings.EqualFold(s[:4], "HOT ") {
return strings.TrimSpace(s[4:])
}
return s
}
// --------------------------
// Covers: generated/covers/<category>.<ext>
// --------------------------
type coverInfo struct {
Category string `json:"category"`
Model string `json:"model,omitempty"`
Src string `json:"src,omitempty"`
GeneratedAt string `json:"generatedAt"`
}
func normalizeCoverSrc(s string) string {
s = strings.TrimSpace(s)
if s == "" {
return ""
}
// Windows -> URL-artig
s2 := strings.ReplaceAll(s, "\\", "/")
// Wenn es schon wie ein Web-Pfad aussieht, so lassen
if strings.HasPrefix(s2, "/generated/") || strings.HasPrefix(s2, "http://") || strings.HasPrefix(s2, "https://") {
return s2
}
// Wenn es ein lokaler Pfad ist, versuche den /generated/ Teil zu extrahieren
// z.B. ".../generated/meta/<id>/thumbs.jpg" -> "/generated/meta/<id>/thumbs.jpg"
if i := strings.Index(s2, "/generated/"); i >= 0 {
return s2[i:]
}
return s2
}
func coverInfoPathForKey(key string) (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
return filepath.Join(root, key+".info.json"), nil
}
func writeCoverInfoBestEffort(key string, info coverInfo) {
p, err := coverInfoPathForKey(key)
if err != nil {
return
}
b, err := json.MarshalIndent(info, "", " ")
if err != nil {
return
}
_ = os.MkdirAll(filepath.Dir(p), 0o755)
_ = os.WriteFile(p, b, 0o644)
}
func readCoverInfoBestEffort(key string) (coverInfo, bool) {
p, err := coverInfoPathForKey(key)
if err != nil {
return coverInfo{}, false
}
b, err := os.ReadFile(p)
if err != nil || len(b) == 0 {
return coverInfo{}, false
}
var ci coverInfo
if json.Unmarshal(b, &ci) != nil {
return coverInfo{}, false
}
return ci, true
}
func drawLabel(img draw.Image, text string) {
text = strings.TrimSpace(text)
if text == "" {
return
}
face := basicfont.Face7x13
// Layout
const margin = 10
const padX = 10
const padY = 8
b := img.Bounds()
// Max. verfügbare Breite für Text (ohne Padding/Margins)
maxTextW := (b.Dx() - 2*margin) - 2*padX
if maxTextW <= 0 {
return
}
// Text ggf. kürzen, damit er ins Badge passt
measure := func(s string) int {
d := &font.Drawer{Face: face}
return d.MeasureString(s).Ceil()
}
label := text
if w := measure(label); w > maxTextW {
ellipsis := "…"
rs := []rune(text)
// harte Schranke gegen Extremfälle
if len(rs) == 0 {
return
}
lo, hi := 0, len(rs)
best := ""
for lo <= hi {
mid := (lo + hi) / 2
cand := string(rs[:mid]) + ellipsis
if measure(cand) <= maxTextW {
best = cand
lo = mid + 1
} else {
hi = mid - 1
}
}
if best == "" {
// notfalls nur Ellipsis
label = ellipsis
} else {
label = best
}
}
// Textmetriken
d := &font.Drawer{Face: face}
textW := d.MeasureString(label).Ceil()
textH := face.Metrics().Height.Ceil()
ascent := face.Metrics().Ascent.Ceil()
// Badge-Box (unten links)
x0 := b.Min.X + margin
y1 := b.Max.Y - margin
y0 := y1 - (textH + 2*padY)
x1 := x0 + (textW + 2*padX)
// Clamp nach rechts (falls Bild sehr schmal)
maxX1 := b.Max.X - margin
if x1 > maxX1 {
shift := x1 - maxX1
x0 -= shift
x1 -= shift
if x0 < b.Min.X+margin {
x0 = b.Min.X + margin
x1 = maxX1
}
}
// Clamp nach oben (falls Bild sehr niedrig)
minY0 := b.Min.Y + margin
if y0 < minY0 {
y0 = minY0
y1 = y0 + (textH + 2*padY)
if y1 > b.Max.Y-margin {
// zu wenig Platz insgesamt
return
}
}
rect := image.Rect(x0, y0, x1, y1)
// Background
bg := image.NewUniform(color.RGBA{0, 0, 0, 170})
draw.Draw(img, rect, bg, image.Point{}, draw.Over)
// Optional: dünner Rand für mehr Kontrast
border := image.NewUniform(color.RGBA{255, 255, 255, 35})
// top
draw.Draw(img, image.Rect(rect.Min.X, rect.Min.Y, rect.Max.X, rect.Min.Y+1), border, image.Point{}, draw.Over)
// bottom
draw.Draw(img, image.Rect(rect.Min.X, rect.Max.Y-1, rect.Max.X, rect.Max.Y), border, image.Point{}, draw.Over)
// left
draw.Draw(img, image.Rect(rect.Min.X, rect.Min.Y, rect.Min.X+1, rect.Max.Y), border, image.Point{}, draw.Over)
// right
draw.Draw(img, image.Rect(rect.Max.X-1, rect.Min.Y, rect.Max.X, rect.Max.Y), border, image.Point{}, draw.Over)
// Text baseline
tx := x0 + padX
ty := y0 + padY + ascent
// Mini-Schatten (Lesbarkeit)
shadow := &font.Drawer{
Dst: img,
Src: image.NewUniform(color.RGBA{0, 0, 0, 200}),
Face: face,
Dot: fixed.P(tx+1, ty+1),
}
shadow.DrawString(label)
// Text
fg := &font.Drawer{
Dst: img,
Src: image.NewUniform(color.RGBA{255, 255, 255, 235}),
Face: face,
Dot: fixed.P(tx, ty),
}
fg.DrawString(label)
}
func splitTagsLoose(raw string) []string {
raw = strings.TrimSpace(raw)
if raw == "" {
return nil
}
parts := strings.FieldsFunc(raw, func(r rune) bool {
switch r {
case '\n', ',', ';', '|':
return true
}
return false
})
out := make([]string, 0, len(parts))
seen := map[string]struct{}{}
for _, p := range parts {
t := strings.TrimSpace(p)
if t == "" {
continue
}
low := strings.ToLower(t)
if _, ok := seen[low]; ok {
continue
}
seen[low] = struct{}{}
out = append(out, t)
}
return out
}
func hasTag(tagsRaw string, want string) bool {
want = strings.ToLower(strings.TrimSpace(want))
if want == "" {
return false
}
for _, t := range splitTagsLoose(tagsRaw) {
if strings.ToLower(strings.TrimSpace(t)) == want {
return true
}
}
return false
}
// ✅ Passe diese Struct/Methoden an dein echtes ModelStore-API an.
type coverModel struct {
Key string // z.B. model key/name
Tags string // raw tags (csv/newline/…)
}
func listModelsForCovers() ([]coverModel, error) {
if coverModelStore == nil {
return nil, fmt.Errorf("model store not set")
}
ms := coverModelStore.List() // ✅ existiert bei dir
out := make([]coverModel, 0, len(ms))
for _, m := range ms {
key := strings.TrimSpace(m.ModelKey)
if key == "" {
continue
}
out = append(out, coverModel{
Key: key,
Tags: m.Tags,
})
}
return out, nil
}
func pickRandomThumbForCategory(ctx context.Context, category string) (thumbPath string, err error) {
category = strings.TrimSpace(category)
if category == "" {
return "", fmt.Errorf("category empty")
}
// Optional: früh abbrechen, wenn Request schon tot ist
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
models, err := listModelsForCovers()
if err != nil {
return "", err
}
// 1) Kandidaten-Models nach Tag filtern
cands := make([]coverModel, 0, 64)
for _, m := range models {
key := strings.TrimSpace(m.Key)
if key == "" {
continue
}
if hasTag(m.Tags, category) {
cands = append(cands, coverModel{Key: key, Tags: m.Tags})
}
}
if len(cands) == 0 {
return "", fmt.Errorf("no model with tag")
}
// 2) Kandidaten mischen und nacheinander probieren (robuster als 1 random pick)
rand.Shuffle(len(cands), func(i, j int) { cands[i], cands[j] = cands[j], cands[i] })
// 3) done dirs (einmalig auflösen)
s := getSettings()
doneAbs, derr := resolvePathRelativeToApp(s.DoneDir)
if derr != nil || strings.TrimSpace(doneAbs) == "" {
return "", fmt.Errorf("doneDir resolve failed: %v", derr)
}
type candFile struct {
videoPath string
id string
}
isVideo := func(name string) bool {
low := strings.ToLower(name)
if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") {
return false
}
ext := strings.ToLower(filepath.Ext(name))
return ext == ".mp4" || ext == ".ts"
}
// 4) Für jedes passende Model: Dateien sammeln, random wählen, Thumb prüfen
for _, m := range cands {
// Context check pro Iteration
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
modelKey := strings.TrimSpace(m.Key)
if modelKey == "" {
continue
}
// Kandidaten: done/<model>/ und done/keep/<model>/
dirs := []string{
filepath.Join(doneAbs, modelKey),
filepath.Join(doneAbs, "keep", modelKey),
}
files := make([]candFile, 0, 128)
for _, d := range dirs {
ents, err := os.ReadDir(d)
if err != nil {
continue
}
for _, e := range ents {
if e.IsDir() {
continue
}
name := e.Name()
if !isVideo(name) {
continue
}
full := filepath.Join(d, name)
stem := strings.TrimSuffix(name, filepath.Ext(name))
id := stripHotPrefix(strings.TrimSpace(stem))
if id == "" {
continue
}
files = append(files, candFile{videoPath: full, id: id})
}
}
if len(files) == 0 {
// ✅ dieses Model hat (noch) keine Downloads -> nächstes Model probieren
continue
}
// random file innerhalb des Models
cf := files[rand.Intn(len(files))]
// thumbs sicherstellen (best effort)
_ = ensureAssetsForVideo(cf.videoPath)
tp, terr := generatedThumbFile(cf.id)
if terr != nil {
// nächstes Model probieren
continue
}
if fi, serr := os.Stat(tp); serr == nil && !fi.IsDir() && fi.Size() > 0 {
return tp, nil
}
// ✅ Thumb fehlt -> nächstes Model probieren
}
return "", fmt.Errorf("no downloads/thumbs for category")
}
func coversRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "covers"))
}
func ensureCoversDir() (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
if strings.TrimSpace(root) == "" {
return "", fmt.Errorf("covers root ist leer")
}
if err := os.MkdirAll(root, 0o755); err != nil {
return "", err
}
return root, nil
}
var coverKeyRe = regexp.MustCompile(`[^a-z0-9._-]+`)
func sanitizeCoverKey(category string) (string, error) {
c := strings.ToLower(strings.TrimSpace(category))
if c == "" {
sum := sha1.Sum([]byte(category))
c = "tag_" + hex.EncodeToString(sum[:8]) // 16 hex chars reichen
}
if c == "" {
return "", fmt.Errorf("category fehlt")
}
// Windows & FS safe
c = strings.ReplaceAll(c, " ", "_")
c = coverKeyRe.ReplaceAllString(c, "_")
c = strings.Trim(c, "._-")
if c == "" {
return "", fmt.Errorf("category ungültig")
}
if len(c) > 120 {
c = c[:120]
}
return c, nil
}
func detectImageExt(contentType string, b []byte) (ext string, ct string) {
ct = strings.ToLower(strings.TrimSpace(contentType))
// wenn server CT liefert
switch {
case strings.Contains(ct, "image/jpeg") || strings.Contains(ct, "image/jpg"):
return ".jpg", "image/jpeg"
case strings.Contains(ct, "image/png"):
return ".png", "image/png"
case strings.Contains(ct, "image/webp"):
return ".webp", "image/webp"
case strings.Contains(ct, "image/gif"):
return ".gif", "image/gif"
}
// Magic bytes fallback
if len(b) >= 3 && b[0] == 0xFF && b[1] == 0xD8 && b[2] == 0xFF {
return ".jpg", "image/jpeg"
}
if len(b) >= 8 && bytes.Equal(b[:8], []byte{0x89, 'P', 'N', 'G', 0x0D, 0x0A, 0x1A, 0x0A}) {
return ".png", "image/png"
}
if len(b) >= 12 && string(b[:4]) == "RIFF" && string(b[8:12]) == "WEBP" {
return ".webp", "image/webp"
}
if len(b) >= 6 && (string(b[:6]) == "GIF87a" || string(b[:6]) == "GIF89a") {
return ".gif", "image/gif"
}
// default: jpg
return ".jpg", "image/jpeg"
}
func coverPathForCategory(key string, ext string) (string, error) {
root, err := coversRoot()
if err != nil {
return "", err
}
if strings.TrimSpace(root) == "" {
return "", fmt.Errorf("covers root ist leer")
}
if ext == "" {
ext = ".jpg"
}
return filepath.Join(root, key+ext), nil
}
func findExistingCoverFile(key string) (string, os.FileInfo, bool) {
root, err := coversRoot()
if err != nil || strings.TrimSpace(root) == "" {
return "", nil, false
}
// probiere gängige Endungen
exts := []string{".jpg", ".png", ".webp", ".gif"}
for _, ext := range exts {
p := filepath.Join(root, key+ext)
if fi, err := os.Stat(p); err == nil && !fi.IsDir() && fi.Size() > 0 {
return p, fi, true
}
}
return "", nil, false
}
func downloadBytes(ctx context.Context, rawURL string, ua string) ([]byte, string, error) {
rawURL = strings.TrimSpace(rawURL)
if rawURL == "" {
return nil, "", fmt.Errorf("src fehlt")
}
// ✅ 1) Lokaler Pfad: nur /generated/... erlauben
if strings.HasPrefix(rawURL, "/") {
// URL-Pfad normalisieren und Traversal verhindern
clean := path.Clean(rawURL) // URL-style cleaning
if !strings.HasPrefix(clean, "/generated/") {
return nil, "", fmt.Errorf("src ungültig")
}
if strings.Contains(clean, "..") {
return nil, "", fmt.Errorf("src ungültig")
}
// "/generated/..." -> "generated/..." (relativ zur App)
rel := strings.TrimPrefix(clean, "/")
abs, err := resolvePathRelativeToApp(rel)
if err != nil || strings.TrimSpace(abs) == "" {
return nil, "", fmt.Errorf("src ungültig")
}
f, err := os.Open(abs)
if err != nil {
return nil, "", fmt.Errorf("download failed: %v", err)
}
defer f.Close()
// max 10MB lesen
b, err := io.ReadAll(io.LimitReader(f, 10*1024*1024))
if err != nil {
return nil, "", fmt.Errorf("download failed: %v", err)
}
if len(b) == 0 {
return nil, "", fmt.Errorf("download empty")
}
// Content-Type grob nach Extension (Magic-bytes macht detectImageExt später sowieso)
ext := strings.ToLower(filepath.Ext(abs))
ct := "application/octet-stream"
switch ext {
case ".jpg", ".jpeg":
ct = "image/jpeg"
case ".png":
ct = "image/png"
case ".webp":
ct = "image/webp"
case ".gif":
ct = "image/gif"
}
return b, ct, nil
}
// ✅ 2) Remote URL: wie bisher nur http/https
u, err := url.Parse(rawURL)
if err != nil || u.Scheme == "" || u.Host == "" {
return nil, "", fmt.Errorf("src ungültig")
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, "", fmt.Errorf("src schema nicht erlaubt")
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rawURL, nil)
if err != nil {
return nil, "", err
}
if strings.TrimSpace(ua) == "" {
ua = "Mozilla/5.0"
}
req.Header.Set("User-Agent", ua)
req.Header.Set("Accept", "image/*,*/*;q=0.8")
client := &http.Client{Timeout: 12 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("download failed: HTTP %d", resp.StatusCode)
}
b, err := io.ReadAll(io.LimitReader(resp.Body, 10*1024*1024)) // max 10MB
if err != nil {
return nil, "", err
}
if len(b) == 0 {
return nil, "", fmt.Errorf("download empty")
}
return b, resp.Header.Get("Content-Type"), nil
}
func generatedRoot() (string, error) {
return resolvePathRelativeToApp("generated")
}
func generatedMetaRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "meta"))
}
// Legacy (falls noch alte Assets liegen):
func generatedThumbsRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "thumbs"))
}
func generatedTeaserRoot() (string, error) {
return resolvePathRelativeToApp(filepath.Join("generated", "teaser"))
}
// irgendwo auf Package-Level (z.B. in derselben Datei über generatedCover)
var coverBatchMu sync.Mutex
var (
coverBatchInflight int
coverBatchStarted time.Time
coverBatchTotal int
coverBatchForced int
coverBatchMiss int
coverBatchErrors int
coverBatchNoThumb int
coverBatchDecodeErr int
)
func coverBatchEnter(force bool) {
coverBatchMu.Lock()
defer coverBatchMu.Unlock()
if coverBatchInflight == 0 {
coverBatchStarted = time.Now()
coverBatchTotal = 0
coverBatchForced = 0
coverBatchMiss = 0
coverBatchErrors = 0
coverBatchNoThumb = 0
coverBatchDecodeErr = 0
log.Printf("[cover] BATCH START")
}
coverBatchInflight++
coverBatchTotal++
if force {
coverBatchForced++
} else {
coverBatchMiss++
}
}
func coverBatchLeave(outcome string, status int) {
coverBatchMu.Lock()
defer coverBatchMu.Unlock()
// Outcome-Stats (grob, aber hilfreich)
if status >= 400 {
coverBatchErrors++
}
switch outcome {
case "no-thumb":
coverBatchNoThumb++
case "decode-failed-no-overlay":
coverBatchDecodeErr++
}
coverBatchInflight--
if coverBatchInflight <= 0 {
dur := time.Since(coverBatchStarted).Round(time.Millisecond)
log.Printf(
"[cover] BATCH END total=%d miss=%d forced=%d errors=%d noThumb=%d decodeFail=%d took=%s",
coverBatchTotal,
coverBatchMiss,
coverBatchForced,
coverBatchErrors,
coverBatchNoThumb,
coverBatchDecodeErr,
dur,
)
coverBatchInflight = 0
}
}
// ----------------------------------------------------------------------
// deine Handler-Funktion (vollständig)
var (
reModelFromStem = regexp.MustCompile(`^(.*?)_\d{1,2}_\d{1,2}_\d{4}__\d{1,2}-\d{2}-\d{2}`)
)
// stem ist z.B. "sigmasian_01_21_2026__07-28-13" oder ein Parent-Dir-Name
func inferModelFromStem(stem string) string {
stem = stripHotPrefix(strings.TrimSpace(stem))
if stem == "" {
return ""
}
m := reModelFromStem.FindStringSubmatch(stem)
if len(m) >= 2 {
return strings.TrimSpace(m[1])
}
return ""
}
// akzeptiert:
// - "/generated/meta/<id>/thumbs.jpg"
// - "C:\...\generated\meta\<id>\thumbs.jpg"
// - "http(s)://host/generated/meta/<id>/thumbs.jpg"
// - (fallback) irgendeinen Dateinamen-Stem, der wie "<model>_MM_DD_YYYY__HH-MM-ss" aussieht
func inferModelFromThumbLike(srcOrPath string) string {
s := strings.TrimSpace(srcOrPath)
if s == "" {
return ""
}
// Windows -> slash
s = strings.ReplaceAll(s, `\`, `/`)
// Wenn URL: nimm nur den Path
if u, err := url.Parse(s); err == nil && u != nil && u.Scheme != "" && u.Host != "" {
s = u.Path
}
// Wenn es wie ".../<id>/thumbs.jpg" aussieht: parent dir ist <id>
base := path.Base(s)
lb := strings.ToLower(base)
if strings.HasPrefix(lb, "thumbs.") {
id := path.Base(path.Dir(s))
return inferModelFromStem(id)
}
// Fallback: versuch Stem aus basename
stem := strings.TrimSuffix(base, path.Ext(base))
return inferModelFromStem(stem)
}
// ----------------------------------------------------------------------
// deine Handler-Funktion (vollständig)
type coverInfoListItem struct {
Category string `json:"category"`
Model string `json:"model,omitempty"`
GeneratedAt string `json:"generatedAt,omitempty"`
HasCover bool `json:"hasCover"`
}
func generatedCoverInfoList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
root, err := coversRoot()
if err != nil {
http.Error(w, "covers root: "+err.Error(), http.StatusInternalServerError)
return
}
entries, err := os.ReadDir(root)
if err != nil {
http.Error(w, "covers dir: "+err.Error(), http.StatusInternalServerError)
return
}
byKey := map[string]*coverInfoListItem{}
ensure := func(key string) *coverInfoListItem {
if v, ok := byKey[key]; ok {
return v
}
v := &coverInfoListItem{Category: key}
byKey[key] = v
return v
}
isCoverExt := func(ext string) bool {
switch strings.ToLower(ext) {
case ".jpg", ".jpeg", ".png", ".webp", ".gif":
return true
default:
return false
}
}
for _, e := range entries {
name := e.Name()
lower := strings.ToLower(name)
// info.json
if strings.HasSuffix(lower, ".info.json") {
key := strings.TrimSuffix(name, ".info.json")
if ci, ok := readCoverInfoBestEffort(key); ok {
v := ensure(key)
if strings.TrimSpace(ci.Category) != "" {
v.Category = strings.TrimSpace(ci.Category)
}
if strings.TrimSpace(ci.Model) != "" {
v.Model = strings.TrimSpace(ci.Model)
}
if strings.TrimSpace(ci.GeneratedAt) != "" {
v.GeneratedAt = strings.TrimSpace(ci.GeneratedAt)
}
}
continue
}
// cover image
ext := filepath.Ext(name)
if isCoverExt(ext) {
key := strings.TrimSuffix(name, ext)
v := ensure(key)
v.HasCover = true
}
}
// ✅ WICHTIG: Model nur ausgeben, wenn wirklich ein Cover-Bild existiert
for _, v := range byKey {
if !v.HasCover {
v.Model = ""
v.GeneratedAt = ""
}
if strings.TrimSpace(v.Category) == "" {
v.Category = ""
}
}
// sortiert ausgeben (stabil)
keys := make([]string, 0, len(byKey))
for k := range byKey {
keys = append(keys, k)
}
sort.Strings(keys)
out := make([]coverInfoListItem, 0, len(keys))
for _, k := range keys {
out = append(out, *byKey[k])
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Cache-Control", "no-store")
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
_ = json.NewEncoder(w).Encode(out)
}
func generatedCover(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "Nur GET/HEAD erlaubt", http.StatusMethodNotAllowed)
return
}
category := r.URL.Query().Get("category")
key, err := sanitizeCoverKey(category)
if err != nil {
http.Error(w, "category ungültig: "+err.Error(), http.StatusBadRequest)
return
}
refresh := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("refresh")))
force := refresh == "1" || refresh == "true" || refresh == "yes"
// Optional: model overlay
modelQ := strings.TrimSpace(r.URL.Query().Get("model"))
modelExplicit := modelQ != ""
model := modelQ
// Optional: src (Frontend kann ein konkretes Thumb vorgeben)
src := strings.TrimSpace(r.URL.Query().Get("src"))
fallbackModel := ""
if ci, ok := readCoverInfoBestEffort(key); ok {
if m := strings.TrimSpace(ci.Model); m != "" {
fallbackModel = m
}
}
// Wenn kein explizites model im Request: erstmal fallback übernehmen
if model == "" {
model = fallbackModel
}
// Wenn src gesetzt ist und model NICHT explizit: schon hier aus src ableiten (ok)
if !modelExplicit && src != "" {
if m := inferModelFromThumbLike(src); m != "" {
model = m
}
}
// kleine Request-ID (nur als Header/Debug; kein per-request log)
reqID := strconv.FormatInt(time.Now().UnixNano(), 36)
setDebugHeaders := func(cache string) {
w.Header().Set("X-Cover-Key", key)
w.Header().Set("X-Cover-Category", category)
if model != "" {
w.Header().Set("X-Cover-Model", model)
}
w.Header().Set("X-Cover-Cache", cache) // HIT | MISS | FORCED
w.Header().Set("X-Request-Id", reqID)
}
// 1) Cache hit: direkt von Disk (nur wenn nicht force)
if !force {
// Wenn model im Request/abgeleitet da ist: info.json muss existieren & gleich sein, sonst neu erzeugen
if model != "" {
if ci, ok := readCoverInfoBestEffort(key); ok {
if strings.TrimSpace(ci.Model) != model { // auch wenn ci.Model leer ist
force = true
}
} else {
// kein info.json -> force, damit wir model persistent bekommen
force = true
}
}
if !force {
if p, fi, ok := findExistingCoverFile(key); ok {
setDebugHeaders("HIT")
// ✅ Cache-Hit: Cover existiert wirklich -> info.json best-effort updaten,
// aber NUR wenn wir ein Model kennen (damit "kein Cover" auch kein Model zeigt)
if model != "" {
ci, ok := readCoverInfoBestEffort(key)
if !ok {
ci = coverInfo{Category: category}
}
ci.Category = category
ci.Model = strings.TrimSpace(model)
// Src NICHT überschreiben, wenn wir es bei HIT nicht kennen
// ci.Src bleibt wie gehabt
ci.GeneratedAt = time.Now().UTC().Format(time.RFC3339Nano)
writeCoverInfoBestEffort(key, ci)
}
w.Header().Set("Cache-Control", "public, max-age=31536000")
w.Header().Set("X-Content-Type-Options", "nosniff")
ext := strings.ToLower(filepath.Ext(p))
switch ext {
case ".png":
w.Header().Set("Content-Type", "image/png")
case ".webp":
w.Header().Set("Content-Type", "image/webp")
case ".gif":
w.Header().Set("Content-Type", "image/gif")
default:
w.Header().Set("Content-Type", "image/jpeg")
}
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
f, err := os.Open(p)
if err != nil {
http.NotFound(w, r)
return
}
defer f.Close()
http.ServeContent(w, r, filepath.Base(p), fi.ModTime(), f)
return
}
}
}
// Ab hier: entweder MISS oder FORCED => wir erzeugen (oder versuchen es)
cacheStatus := "MISS"
if force {
cacheStatus = "FORCED"
}
setDebugHeaders(cacheStatus)
// ✅ Globales Batch-Logging: nur beim echten Generate-Path
coverBatchEnter(force)
start := time.Now()
status := http.StatusOK
outcome := "ok"
defer func() {
w.Header().Set("X-Cover-Gen-Ms", strconv.FormatInt(time.Since(start).Milliseconds(), 10))
coverBatchLeave(outcome, status)
}()
// 2) Kein Cache (oder force): Cover erzeugen und persistieren
if _, err := ensureCoversDir(); err != nil {
status = http.StatusInternalServerError
outcome = "covers-dir"
http.Error(w, "covers-dir nicht verfügbar: "+err.Error(), status)
return
}
ctx, cancel := context.WithTimeout(r.Context(), 20*time.Second)
defer cancel()
var (
raw []byte
mimeType string
ext string
)
thumbPath := "" // nur fürs Ableiten von model, wenn src leer ist
usedSrc := "" // ✅ das speichern wir später in coverInfo
// 2a) Wenn src gegeben: daraus bauen (lokal /generated/... oder http(s))
if src != "" {
var derr error
raw, mimeType, derr = downloadBytes(ctx, src, r.Header.Get("User-Agent"))
usedSrc = normalizeCoverSrc(src)
if derr != nil {
status = http.StatusBadRequest
outcome = "src-download"
http.Error(w, "src download failed: "+derr.Error(), status)
return
}
ext, mimeType = detectImageExt(mimeType, raw)
if len(raw) == 0 {
status = http.StatusBadRequest
outcome = "src-empty"
http.Error(w, "src leer", status)
return
}
// ✅ falls model immer noch leer ist, versuch JETZT aus src abzuleiten
if model == "" {
if m := inferModelFromThumbLike(src); m != "" {
model = m
// Header aktualisieren (weil model jetzt da ist)
w.Header().Set("X-Cover-Model", model)
}
}
} else {
// 2b) Sonst: Backend wählt random Thumb passend zur Category
var perr error
thumbPath, perr = pickRandomThumbForCategory(ctx, category)
if perr != nil {
// Wenn es schon ein Cover gibt, lieber das liefern (auch wenn force)
if p, fi, ok := findExistingCoverFile(key); ok {
outcome = "fallback-existing-cover"
status = http.StatusOK
w.Header().Set("Cache-Control", "public, max-age=600")
w.Header().Set("X-Content-Type-Options", "nosniff")
ext2 := strings.ToLower(filepath.Ext(p))
switch ext2 {
case ".png":
w.Header().Set("Content-Type", "image/png")
case ".webp":
w.Header().Set("Content-Type", "image/webp")
case ".gif":
w.Header().Set("Content-Type", "image/gif")
default:
w.Header().Set("Content-Type", "image/jpeg")
}
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
f, err := os.Open(p)
if err != nil {
servePreviewStatusSVG(w, "No Cover", status)
return
}
defer f.Close()
http.ServeContent(w, r, filepath.Base(p), fi.ModTime(), f)
return
}
outcome = "no-thumb"
status = http.StatusNotFound
if r.Method == http.MethodHead {
w.WriteHeader(status)
return
}
servePreviewStatusSVG(w, "No Cover", status)
return
}
usedSrc = normalizeCoverSrc(thumbPath)
raw, err = os.ReadFile(thumbPath)
if err != nil || len(raw) == 0 {
status = http.StatusInternalServerError
outcome = "thumb-read"
http.Error(w, "cover read fehlgeschlagen", status)
return
}
// thumbs bei dir sind JPEG
ext = ".jpg"
mimeType = "image/jpeg"
// ✅ falls model leer ist: aus thumbPath ableiten
// thumbPath ist i.d.R. ".../generated/meta/<id>/thumbs.jpg"
if model == "" {
if m := inferModelFromThumbLike(thumbPath); m != "" {
model = m
w.Header().Set("X-Cover-Model", model)
}
}
}
// ✅ Final: wenn model nicht explizit gesetzt wurde, binde es an usedSrc
if !modelExplicit {
if m := inferModelFromThumbLike(usedSrc); m != "" {
model = m
w.Header().Set("X-Cover-Model", model)
}
}
// ✅ 3) Overlay + Re-Encode
img, _, derr := image.Decode(bytes.NewReader(raw))
if derr == nil && img != nil {
rgba := image.NewRGBA(img.Bounds())
draw.Draw(rgba, rgba.Bounds(), img, img.Bounds().Min, draw.Src)
var buf bytes.Buffer
switch strings.ToLower(ext) {
case ".png":
_ = png.Encode(&buf, rgba)
raw = buf.Bytes()
ext = ".png"
mimeType = "image/png"
default:
_ = jpeg.Encode(&buf, rgba, &jpeg.Options{Quality: 85})
raw = buf.Bytes()
ext = ".jpg"
mimeType = "image/jpeg"
}
} else {
outcome = "decode-failed-no-overlay"
}
// 4) Vorherige Cover-Dateien entfernen
root, _ := coversRoot()
for _, e := range []string{".jpg", ".png", ".webp", ".gif"} {
_ = os.Remove(filepath.Join(root, key+e))
}
_ = os.Remove(filepath.Join(root, key+".info.json"))
// 5) Persistieren
dst, err := coverPathForCategory(key, ext)
if err != nil {
status = http.StatusInternalServerError
outcome = "cover-path"
http.Error(w, "cover path: "+err.Error(), status)
return
}
if err := atomicWriteFile(dst, raw); err != nil {
status = http.StatusInternalServerError
outcome = "cover-write"
http.Error(w, "cover write: "+err.Error(), status)
return
}
// ✅ 6) info.json schreiben (best-effort)
// model ist jetzt: Query-Param ODER aus info.json ODER aus src/thumbPath abgeleitet
writeCoverInfoBestEffort(key, coverInfo{
Category: category,
Model: strings.TrimSpace(model),
Src: strings.TrimSpace(usedSrc),
GeneratedAt: time.Now().UTC().Format(time.RFC3339Nano),
})
// 7) Ausliefern
w.Header().Set("Cache-Control", "public, max-age=600")
w.Header().Set("Content-Type", mimeType)
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Cover-Bytes", strconv.Itoa(len(raw)))
if r.Method == http.MethodHead {
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write(raw)
}
func sanitizeModelKey(k string) string {
k = stripHotPrefix(strings.TrimSpace(k))
if k == "" || k == "—" || strings.ContainsAny(k, `/\`) {
return ""
}
return k
}
func modelKeyFromFilenameOrPath(file string, srcPath string, doneAbs string) string {
// 1) bevorzugt aus Dateiname (OHNE Extension!)
stem := strings.TrimSuffix(filepath.Base(strings.TrimSpace(file)), filepath.Ext(file))
k := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem)))
if k != "" {
return k
}
// 2) Fallback: aus Quellpfad ableiten, wenn Datei in done/<model>/... lag
if strings.TrimSpace(srcPath) != "" && strings.TrimSpace(doneAbs) != "" {
srcDir := filepath.Clean(filepath.Dir(srcPath))
doneAbs = filepath.Clean(doneAbs)
// srcDir != doneAbs => wir sind in einem Unterordner, dessen Name i.d.R. das Model ist
if !strings.EqualFold(srcDir, doneAbs) {
k2 := sanitizeModelKey(filepath.Base(srcDir))
if k2 != "" && !strings.EqualFold(k2, "keep") {
return k2
}
}
}
return ""
}
func uniqueDestPath(dstDir, file string) (string, error) {
dst := filepath.Join(dstDir, file)
if _, err := os.Stat(dst); err == nil {
ext := filepath.Ext(file)
base := strings.TrimSuffix(file, ext)
for i := 2; i <= 200; i++ {
alt := fmt.Sprintf("%s__dup%d%s", base, i, ext)
cand := filepath.Join(dstDir, alt)
if _, err := os.Stat(cand); os.IsNotExist(err) {
return cand, nil
}
}
return "", fmt.Errorf("too many duplicates for %s", file)
} else if err != nil && !os.IsNotExist(err) {
return "", err
}
return dst, nil
}
func idFromVideoPath(videoPath string) string {
name := filepath.Base(strings.TrimSpace(videoPath))
return strings.TrimSuffix(name, filepath.Ext(name))
}
func assetIDForJob(job *RecordJob) string {
if job == nil {
return ""
}
// Prefer: Dateiname ohne Endung (und ohne HOT Prefix)
out := strings.TrimSpace(job.Output)
if out != "" {
id := stripHotPrefix(idFromVideoPath(out))
if strings.TrimSpace(id) != "" {
return id
}
}
// Fallback: JobID (sollte praktisch nie nötig sein)
return strings.TrimSpace(job.ID)
}
func atomicWriteFile(dst string, data []byte) error {
dir := filepath.Dir(dst)
if err := os.MkdirAll(dir, 0o755); err != nil {
return err
}
tmp, err := os.CreateTemp(dir, ".tmp-*")
if err != nil {
return err
}
tmpName := tmp.Name()
_ = tmp.Chmod(0o644)
_, werr := tmp.Write(data)
cerr := tmp.Close()
if werr != nil {
_ = os.Remove(tmpName)
return werr
}
if cerr != nil {
_ = os.Remove(tmpName)
return cerr
}
return os.Rename(tmpName, dst)
}
// Beim Start: loose Dateien in /done/keep (Root) in /done/keep/<model>/ einsortieren.
// Best-effort: wenn Model nicht ableitbar oder Ziel kollidiert, wird geskippt bzw. umbenannt.
func fixKeepRootFilesIntoModelSubdirs() {
s := getSettings()
doneAbs, err := resolvePathRelativeToApp(s.DoneDir)
if err != nil || strings.TrimSpace(doneAbs) == "" {
return
}
keepRoot := filepath.Join(doneAbs, "keep")
ents, err := os.ReadDir(keepRoot)
if err != nil {
// keep existiert evtl. noch nicht -> nichts zu tun
if os.IsNotExist(err) {
return
}
fmt.Println("⚠️ keep scan failed:", err)
return
}
moved := 0
skipped := 0
isVideo := func(name string) bool {
low := strings.ToLower(name)
if strings.Contains(low, ".part") || strings.Contains(low, ".tmp") {
return false
}
ext := strings.ToLower(filepath.Ext(name))
return ext == ".mp4" || ext == ".ts"
}
for _, e := range ents {
if e.IsDir() {
continue
}
name := e.Name()
if !isVideo(name) {
continue
}
// Quelle: /done/keep/<file>
src := filepath.Join(keepRoot, name)
// Model aus Dateiname ableiten (wie im keep-handler)
stem := strings.TrimSuffix(name, filepath.Ext(name)) // ✅ ohne .mp4/.ts
modelKey := sanitizeModelKey(strings.TrimSpace(modelNameFromFilename(stem)))
// wenn nicht ableitbar -> skip
if modelKey == "" || modelKey == "—" || strings.ContainsAny(modelKey, `/\`) {
skipped++
continue
}
dstDir := filepath.Join(keepRoot, modelKey)
if err := os.MkdirAll(dstDir, 0o755); err != nil {
fmt.Println("⚠️ keep mkdir failed:", err)
skipped++
continue
}
dst := filepath.Join(dstDir, name)
// Wenn Ziel schon existiert -> unique Name finden
if _, err := os.Stat(dst); err == nil {
ext := filepath.Ext(name)
base := strings.TrimSuffix(name, ext)
found := false
for i := 2; i <= 200; i++ {
alt := fmt.Sprintf("%s__dup%d%s", base, i, ext)
cand := filepath.Join(dstDir, alt)
if _, err := os.Stat(cand); os.IsNotExist(err) {
dst = cand
found = true
break
}
}
if !found {
fmt.Println("⚠️ keep fix: too many duplicates, skip:", name)
skipped++
continue
}
} else if err != nil && !os.IsNotExist(err) {
fmt.Println("⚠️ keep stat dst failed:", err)
skipped++
continue
}
// Verschieben (Windows-lock robust)
if err := renameWithRetry(src, dst); err != nil {
fmt.Println("⚠️ keep fix rename failed:", err)
skipped++
continue
}
moved++
}
if moved > 0 || skipped > 0 {
fmt.Printf("🧹 keep fix: moved=%d skipped=%d (root=%s)\n", moved, skipped, keepRoot)
}
}
func findFinishedFileByID(id string) (string, error) {
s := getSettings()
recordAbs, _ := resolvePathRelativeToApp(s.RecordDir)
doneAbs, _ := resolvePathRelativeToApp(s.DoneDir)
base := stripHotPrefix(strings.TrimSpace(id))
if base == "" {
return "", fmt.Errorf("not found")
}
names := []string{
base + ".mp4",
"HOT " + base + ".mp4",
base + ".ts",
"HOT " + base + ".ts",
}
// done (root + /done/<subdir>/) + keep (root + /done/keep/<subdir>/)
for _, name := range names {
if p, _, ok := findFileInDirOrOneLevelSubdirs(doneAbs, name, "keep"); ok {
return p, nil
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(filepath.Join(doneAbs, "keep"), name, ""); ok {
return p, nil
}
if p, _, ok := findFileInDirOrOneLevelSubdirs(recordAbs, name, ""); ok {
return p, nil
}
}
return "", fmt.Errorf("not found")
}
func servePreviewThumbAlias(w http.ResponseWriter, r *http.Request, id, file string) {
// 1) Wenn Job bekannt (id = job.ID): assetID aus Output ableiten
jobsMu.Lock()
job := jobs[id]
jobsMu.Unlock()
if job != nil {
assetID := assetIDForJob(job)
if assetID != "" {
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
// running => no-store, finished => cache ok (du willst live eher no-store)
if job.Status == JobRunning {
serveLivePreviewJPEGFile(w, r, thumbPath)
} else {
servePreviewJPEGFile(w, r, thumbPath)
}
return
}
}
}
// Optional: wenn du bei running noch in-memory fallback willst:
if job.Status == JobRunning {
job.previewMu.Lock()
cached := job.previewJpeg
job.previewMu.Unlock()
if len(cached) > 0 {
serveLivePreviewJPEGBytes(w, cached)
return
}
}
// Placeholder statt hartem 404
servePreviewStatusSVG(w, "Preview", http.StatusOK)
return
}
// 2) Kein Job im RAM: id als assetID behandeln (finished files nach Neustart)
// "preview.jpg" als Alias auf thumbs.jpg
assetID := stripHotPrefix(strings.TrimSpace(id))
if assetID == "" {
http.NotFound(w, r)
return
}
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
servePreviewJPEGFile(w, r, thumbPath)
return
}
}
http.NotFound(w, r)
}
func isHover(r *http.Request) bool {
v := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("hover")))
return v == "1" || v == "true" || v == "yes"
}
func touchPreview(job *RecordJob) {
if job == nil {
return
}
jobsMu.Lock()
job.previewLastHit = time.Now()
jobsMu.Unlock()
}
func ensurePreviewStarted(r *http.Request, job *RecordJob) {
if job == nil {
return
}
job.previewStartMu.Lock()
defer job.previewStartMu.Unlock()
jobsMu.Lock()
// läuft schon?
if job.previewCmd != nil && job.PreviewDir != "" {
job.previewLastHit = time.Now()
jobsMu.Unlock()
return
}
// brauchen M3U8 URL
m3u8 := strings.TrimSpace(job.PreviewM3U8)
cookie := strings.TrimSpace(job.PreviewCookie)
ua := strings.TrimSpace(job.PreviewUA)
jobsMu.Unlock()
if m3u8 == "" {
return
}
// eigener Context für Preview (WICHTIG: nicht der Recording ctx)
pctx, cancel := context.WithCancel(context.Background())
// PreviewDir temp
assetID := assetIDForJob(job)
pdir := filepath.Join(os.TempDir(), "rec_preview", assetID)
jobsMu.Lock()
job.PreviewDir = pdir
job.previewCancel = cancel
job.previewLastHit = time.Now()
jobsMu.Unlock()
_ = startPreviewHLS(pctx, job, m3u8, pdir, cookie, ua)
}
func recordPreview(w http.ResponseWriter, r *http.Request) {
// optional aber sinnvoll: nur GET/HEAD erlauben
if r.Method != http.MethodGet && r.Method != http.MethodHead {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
id := strings.TrimSpace(r.URL.Query().Get("id"))
if id == "" {
// ✅ Alias: Frontend schickt "name"
id = strings.TrimSpace(r.URL.Query().Get("name"))
}
if id == "" {
http.Error(w, "id fehlt", http.StatusBadRequest)
return
}
// ✅ NEW: JPEG requests abfangen
if file := strings.TrimSpace(r.URL.Query().Get("file")); file != "" {
if file == "thumbs.jpg" || file == "preview.jpg" {
servePreviewThumbAlias(w, r, id, file)
return
}
// HLS wie gehabt
servePreviewHLSFile(w, r, id, file)
return
}
// Schauen, ob wir einen Job mit dieser ID kennen (laufend oder gerade fertig)
jobsMu.Lock()
job, ok := jobs[id]
jobsMu.Unlock()
if ok {
// ✅ 0) Running: wenn generated/<jobId>/thumbs.jpg existiert -> sofort ausliefern
// (kein ffmpeg pro HTTP-Request)
if job.Status == "running" {
assetID := assetIDForJob(job)
if assetID != "" {
if thumbPath, err := generatedThumbFile(assetID); err == nil {
if st, err := os.Stat(thumbPath); err == nil && !st.IsDir() && st.Size() > 0 {
serveLivePreviewJPEGFile(w, r, thumbPath)
return
}
}
}
}
// ✅ Fallback: alter In-Memory-Cache (falls thumbs.jpg noch nicht da ist)
job.previewMu.Lock()
cached := job.previewJpeg
cachedAt := job.previewJpegAt
freshWindow := 8 * time.Second
fresh := len(cached) > 0 && !cachedAt.IsZero() && time.Since(cachedAt) < freshWindow
// Wenn nicht frisch, ggf. im Hintergrund aktualisieren (einmal gleichzeitig)
if !fresh && !job.previewGen {
job.previewGen = true
go func(j *RecordJob, jobID string) {
defer func() {
j.previewMu.Lock()
j.previewGen = false
j.previewMu.Unlock()
}()
var img []byte
var genErr error
// 1) aus Preview-Segmenten
previewDir := strings.TrimSpace(j.PreviewDir)
if previewDir != "" {
img, genErr = extractLastFrameFromPreviewDir(previewDir)
}
// 2) Fallback: aus der Ausgabedatei
if genErr != nil || len(img) == 0 {
outPath := strings.TrimSpace(j.Output)
if outPath != "" {
outPath = filepath.Clean(outPath)
if !filepath.IsAbs(outPath) {
if abs, err := resolvePathRelativeToApp(outPath); err == nil {
outPath = abs
}
}
if fi, err := os.Stat(outPath); err == nil && !fi.IsDir() && fi.Size() > 0 {
img, genErr = extractLastFrameJPEG(outPath)
if genErr != nil {
img, _ = extractFirstFrameJPEG(outPath)
}
}
}
}
if len(img) > 0 {
j.previewMu.Lock()
j.previewJpeg = img
j.previewJpegAt = time.Now()
j.previewMu.Unlock()
}
}(job, id)
}
// Wir liefern entweder ein frisches Bild, oder das zuletzt gecachte.
out := cached
job.previewMu.Unlock()
if len(out) > 0 {
serveLivePreviewJPEGBytes(w, out) // ✅ no-store für laufende Jobs
return
}
// ✅ Wenn Preview definitiv nicht geht -> Placeholder statt 204
jobsMu.Lock()
state := strings.TrimSpace(job.PreviewState)
jobsMu.Unlock()
if state == "private" {
servePreviewStatusSVG(w, "Private", http.StatusOK)
return
}
if state == "offline" {
servePreviewStatusSVG(w, "Offline", http.StatusOK)
return
}
// noch kein Bild verfügbar -> 204 (Frontend zeigt Placeholder und retry)
w.Header().Set("Cache-Control", "no-store")
w.WriteHeader(http.StatusNoContent)
return
}
// 3⃣ Kein Job im RAM → id als Dateistamm für fertige Downloads behandeln
servePreviewForFinishedFile(w, r, id)
}
func serveLivePreviewJPEGFile(w http.ResponseWriter, r *http.Request, path string) {
f, err := os.Open(path)
if err != nil {
http.NotFound(w, r)
return
}
defer f.Close()
st, err := f.Stat()
if err != nil || st.IsDir() || st.Size() == 0 {
http.NotFound(w, r)
return
}
w.Header().Set("Content-Type", "image/jpeg")
w.Header().Set("Cache-Control", "no-store")
http.ServeContent(w, r, "thumbs.jpg", st.ModTime(), f)
}
func updateLiveThumbOnce(ctx context.Context, job *RecordJob) {
// Snapshot unter Lock holen
jobsMu.Lock()
status := job.Status
previewDir := job.PreviewDir
out := job.Output
jobsMu.Unlock()
if status != "running" {
return
}
// Zielpfad: generated/<jobId>/thumbs.jpg
assetID := assetIDForJob(job)
thumbPath, err := generatedThumbFile(assetID)
if err != nil {
return
}
// Wenn frisch genug: skip
if st, err := os.Stat(thumbPath); err == nil && st.Size() > 0 {
if time.Since(st.ModTime()) < 10*time.Second {
return
}
}
// Concurrency limit über thumbSem
if thumbSem != nil {
thumbCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
if err := thumbSem.Acquire(thumbCtx); err != nil {
return
}
defer thumbSem.Release()
}
var img []byte
// 1) bevorzugt aus Preview-Segmenten
if previewDir != "" {
if b, err := extractLastFrameFromPreviewDirThumb(previewDir); err == nil && len(b) > 0 {
img = b
}
}
// 2) fallback aus Output-Datei (kann bei partial files manchmal langsamer sein)
if len(img) == 0 && out != "" {
if b, err := extractLastFrameJPEGScaled(out, 320, 14); err == nil && len(b) > 0 {
img = b
}
}
if len(img) == 0 {
return
}
_ = atomicWriteFile(thumbPath, img)
}
func startLiveThumbLoop(ctx context.Context, job *RecordJob) {
// einmalig starten
jobsMu.Lock()
if job.LiveThumbStarted {
jobsMu.Unlock()
return
}
job.LiveThumbStarted = true
jobsMu.Unlock()
go func() {
// sofort einmal versuchen
updateLiveThumbOnce(ctx, job)
for {
// dynamische Frequenz: je mehr aktive Jobs, desto langsamer (weniger Last)
jobsMu.Lock()
nRunning := 0
for _, j := range jobs {
if j != nil && j.Status == "running" {
nRunning++
}
}
jobsMu.Unlock()
delay := 12 * time.Second
if nRunning >= 6 {
delay = 18 * time.Second
}
if nRunning >= 12 {
delay = 25 * time.Second
}
select {
case <-ctx.Done():
return
case <-time.After(delay):
// ✅ Stoppen, sobald Job nicht mehr läuft
jobsMu.Lock()
st := job.Status
jobsMu.Unlock()
if st != "running" {
return
}
updateLiveThumbOnce(ctx, job)
}
}
}()
}
// Fallback: Preview für fertige Dateien nur anhand des Dateistamms (id)
func servePreviewForFinishedFile(w http.ResponseWriter, r *http.Request, id string) {
var err error
id, err = sanitizeID(id)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
outPath, err := findFinishedFileByID(id)
if err != nil {
http.Error(w, "preview nicht verfügbar", http.StatusNotFound)
return
}
if err := ensureGeneratedDirs(); err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Assets immer auf "basename ohne HOT" ablegen
assetID := stripHotPrefix(id)
if assetID == "" {
assetID = id
}
assetDir, err := ensureGeneratedDir(assetID)
if err != nil {
http.Error(w, "generated-dir nicht verfügbar: "+err.Error(), http.StatusInternalServerError)
return
}
// ✅ Frame-Caching für t=... (optional)
if tStr := strings.TrimSpace(r.URL.Query().Get("t")); tStr != "" {
if sec, err := strconv.ParseFloat(tStr, 64); err == nil && sec >= 0 {
secI := int64(sec + 0.5)
if secI < 0 {
secI = 0
}
framePath := filepath.Join(assetDir, fmt.Sprintf("t_%d.jpg", secI))
if fi, err := os.Stat(framePath); err == nil && !fi.IsDir() && fi.Size() > 0 {
servePreviewJPEGFile(w, r, framePath)
return
}
img, err := extractFrameAtTimeJPEG(outPath, float64(secI))
if err == nil && len(img) > 0 {
_ = atomicWriteFile(framePath, img)
servePreviewJPEGBytes(w, img)
return
}
}
}
thumbPath := filepath.Join(assetDir, "thumbs.jpg")
// 1) Cache hit (neu)
if fi, err := os.Stat(thumbPath); err == nil && !fi.IsDir() && fi.Size() > 0 {
servePreviewJPEGFile(w, r, thumbPath)
return
}
// 2) Legacy-Migration (best effort)
if thumbsLegacy, _ := generatedThumbsRoot(); strings.TrimSpace(thumbsLegacy) != "" {
candidates := []string{
filepath.Join(thumbsLegacy, assetID, "preview.jpg"),
filepath.Join(thumbsLegacy, id, "preview.jpg"),
filepath.Join(thumbsLegacy, assetID+".jpg"),
filepath.Join(thumbsLegacy, id+".jpg"),
}
for _, c := range candidates {
if fi, err := os.Stat(c); err == nil && !fi.IsDir() && fi.Size() > 0 {
if b, rerr := os.ReadFile(c); rerr == nil && len(b) > 0 {
_ = atomicWriteFile(thumbPath, b)
servePreviewJPEGBytes(w, b)
return
}
}
}
}
// 3) Neu erzeugen
genCtx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
var t float64 = 0
if dur, derr := durationSecondsCached(genCtx, outPath); derr == nil && dur > 0 {
t = dur * 0.5
}
img, err := extractFrameAtTimeJPEG(outPath, t)
if err != nil || len(img) == 0 {
img, err = extractLastFrameJPEG(outPath)
if err != nil || len(img) == 0 {
img, err = extractFirstFrameJPEG(outPath)
if err != nil || len(img) == 0 {
http.Error(w, "konnte preview nicht erzeugen", http.StatusInternalServerError)
return
}
}
}
_ = atomicWriteFile(thumbPath, img)
servePreviewJPEGBytes(w, img)
}
const maxInt64 = int64(^uint64(0) >> 1)
func removeJobsByOutputBasename(file string) {
file = strings.TrimSpace(file)
if file == "" {
return
}
removed := false
jobsMu.Lock()
for id, j := range jobs {
if j == nil {
continue
}
out := strings.TrimSpace(j.Output)
if out == "" {
continue
}
if filepath.Base(out) == file {
delete(jobs, id)
removed = true
}
}
jobsMu.Unlock()
if removed {
notifyJobsChanged()
}
}
func renameJobsOutputBasename(oldFile, newFile string) {
oldFile = strings.TrimSpace(oldFile)
newFile = strings.TrimSpace(newFile)
if oldFile == "" || newFile == "" {
return
}
changed := false
jobsMu.Lock()
for _, j := range jobs {
if j == nil {
continue
}
out := strings.TrimSpace(j.Output)
if out == "" {
continue
}
if filepath.Base(out) == oldFile {
j.Output = filepath.Join(filepath.Dir(out), newFile)
changed = true
}
}
jobsMu.Unlock()
if changed {
notifyJobsChanged()
}
}
// nimmt jetzt *HTTPClient entgegen
func FetchPlaylist(ctx context.Context, hc *HTTPClient, hlsSource, httpCookie string) (*Playlist, error) {
if hlsSource == "" {
return nil, errors.New("HLS-URL leer")
}
req, err := hc.NewRequest(ctx, http.MethodGet, hlsSource, httpCookie)
if err != nil {
return nil, fmt.Errorf("Fehler beim Erstellen der Playlist-Request: %w", err)
}
resp, err := hc.client.Do(req)
if err != nil {
return nil, fmt.Errorf("Fehler beim Laden der Playlist: %w", err)
}
defer resp.Body.Close()
playlist, listType, err := m3u8.DecodeFrom(resp.Body, true)
if err != nil || listType != m3u8.MASTER {
return nil, errors.New("keine gültige Master-Playlist")
}
master := playlist.(*m3u8.MasterPlaylist)
var bestURI string
var bestWidth int
var bestFramerate int
for _, variant := range master.Variants {
if variant == nil || variant.Resolution == "" {
continue
}
parts := strings.Split(variant.Resolution, "x")
if len(parts) != 2 {
continue
}
width, err := strconv.Atoi(parts[0])
if err != nil {
continue
}
fr := 30
if strings.Contains(variant.Name, "FPS:60.0") {
fr = 60
}
if width > bestWidth || (width == bestWidth && fr > bestFramerate) {
bestWidth = width
bestFramerate = fr
bestURI = variant.URI
}
}
if bestURI == "" {
return nil, errors.New("keine gültige Auflösung gefunden")
}
root := hlsSource[:strings.LastIndex(hlsSource, "/")+1]
return &Playlist{
PlaylistURL: root + bestURI,
RootURL: root,
Resolution: bestWidth,
Framerate: bestFramerate,
}, nil
}
func readLine() string {
r := bufio.NewReader(os.Stdin)
s, _ := r.ReadString('\n')
return strings.TrimRight(s, "\r\n")
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}