mirror of
https://github.com/arkorty/DownLink.git
synced 2026-03-17 16:51:45 +00:00
feat: caching & logging
This commit is contained in:
6
backend/.gitignore
vendored
6
backend/.gitignore
vendored
@@ -36,4 +36,8 @@
|
|||||||
Thumbs.db
|
Thumbs.db
|
||||||
|
|
||||||
# Cookies
|
# Cookies
|
||||||
cookies*.txt
|
instagram.txt
|
||||||
|
youtube.txt
|
||||||
|
|
||||||
|
# Cache directory for downloaded videos
|
||||||
|
cache/
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# Use the official Golang image
|
# Builder stage
|
||||||
FROM golang:1.20-alpine
|
FROM golang:1.21-alpine AS builder
|
||||||
|
|
||||||
# Set the Current Working Directory inside the container
|
# Set the Current Working Directory inside the container
|
||||||
WORKDIR /server
|
WORKDIR /build
|
||||||
|
|
||||||
# Copy go mod and sum files
|
# Copy go mod and sum files
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
@@ -16,6 +16,16 @@ COPY . .
|
|||||||
# Build the Go app
|
# Build the Go app
|
||||||
RUN go build -o main .
|
RUN go build -o main .
|
||||||
|
|
||||||
|
# Runtime stage
|
||||||
|
FROM alpine:3.17
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy the executable from builder
|
||||||
|
COPY --from=builder /build/main .
|
||||||
|
COPY --from=builder /build/*txt .
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
python3 \
|
python3 \
|
||||||
py3-pip \
|
py3-pip \
|
||||||
|
|||||||
153
backend/LOGGING.md
Normal file
153
backend/LOGGING.md
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
# Logging Configuration
|
||||||
|
|
||||||
|
The DownLink backend uses structured logging with the Go `log/slog` package for better observability and debugging.
|
||||||
|
|
||||||
|
## Log Levels
|
||||||
|
|
||||||
|
The application supports the following log levels (in order of increasing severity):
|
||||||
|
|
||||||
|
- `DEBUG`: Detailed information for debugging
|
||||||
|
- `INFO`: General information about application flow
|
||||||
|
- `WARN`: Warning messages for potentially harmful situations
|
||||||
|
- `ERROR`: Error messages for failed operations
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Logging can be configured using environment variables:
|
||||||
|
|
||||||
|
### LOG_LEVEL
|
||||||
|
Sets the minimum log level to display. Default: `INFO`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LOG_LEVEL=DEBUG # Show all logs including debug
|
||||||
|
export LOG_LEVEL=WARN # Show only warnings and errors
|
||||||
|
export LOG_LEVEL=ERROR # Show only errors
|
||||||
|
```
|
||||||
|
|
||||||
|
### LOG_FORMAT
|
||||||
|
Sets the log output format. Default: `json`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LOG_FORMAT=json # JSON structured format (default)
|
||||||
|
export LOG_FORMAT=text # Human-readable text format
|
||||||
|
```
|
||||||
|
|
||||||
|
### LOG_BUFFER_SIZE
|
||||||
|
Sets the size of the in-memory log buffer (number of log entries). Default: `1000`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export LOG_BUFFER_SIZE=2000 # Store last 2000 log entries
|
||||||
|
```
|
||||||
|
|
||||||
|
## Log Structure
|
||||||
|
|
||||||
|
### JSON Format (Default)
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"time": "2024-01-15T10:30:45.123Z",
|
||||||
|
"level": "INFO",
|
||||||
|
"msg": "Video downloaded successfully",
|
||||||
|
"url": "https://youtube.com/watch?v=example",
|
||||||
|
"quality": "720p",
|
||||||
|
"path": "./cache/example_720.mp4"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Text Format
|
||||||
|
```
|
||||||
|
2024-01-15T10:30:45.123Z INFO Video downloaded successfully url=https://youtube.com/watch?v=example quality=720p path=./cache/example_720.mp4
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Log Events
|
||||||
|
|
||||||
|
### Application Startup
|
||||||
|
- Logger initialization with configuration
|
||||||
|
- Server startup with port information
|
||||||
|
- Cache directory initialization
|
||||||
|
|
||||||
|
### Video Downloads
|
||||||
|
- Download requests with URL and quality
|
||||||
|
- Cache hits and misses
|
||||||
|
- yt-dlp command execution
|
||||||
|
- Download completion or failure
|
||||||
|
|
||||||
|
### Cache Operations
|
||||||
|
- Cache cleanup scheduling and execution
|
||||||
|
- File removal operations
|
||||||
|
- Cache statistics requests
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
- Request validation errors
|
||||||
|
- File system errors
|
||||||
|
- yt-dlp execution errors
|
||||||
|
- HTTP error responses
|
||||||
|
|
||||||
|
## Log API Endpoint
|
||||||
|
|
||||||
|
The application provides an API endpoint to retrieve logs:
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /downlink/logs
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Parameters
|
||||||
|
|
||||||
|
- `level`: Filter logs by minimum level (`DEBUG`, `INFO`, `WARN`, `ERROR`). Default: `INFO`
|
||||||
|
- `limit`: Maximum number of logs to return. Default: all logs in the buffer
|
||||||
|
|
||||||
|
### Example Requests
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /downlink/logs
|
||||||
|
GET /downlink/logs?level=ERROR
|
||||||
|
GET /downlink/logs?level=DEBUG&limit=50
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"time": "2024-01-15T10:30:45.123Z",
|
||||||
|
"level": "INFO",
|
||||||
|
"msg": "Server starting",
|
||||||
|
"attrs": {
|
||||||
|
"port": "8080"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"time": "2024-01-15T10:31:10.456Z",
|
||||||
|
"level": "INFO",
|
||||||
|
"msg": "Video downloaded successfully",
|
||||||
|
"attrs": {
|
||||||
|
"url": "https://youtube.com/watch?v=example",
|
||||||
|
"quality": "720p",
|
||||||
|
"path": "./cache/example_720.mp4"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"count": 2
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Use appropriate log levels**: Use DEBUG for detailed troubleshooting, INFO for normal operations, WARN for potential issues, and ERROR for actual failures.
|
||||||
|
|
||||||
|
2. **Include relevant context**: Always include relevant fields like URLs, file paths, error details, and operation parameters.
|
||||||
|
|
||||||
|
3. **Avoid sensitive data**: Never log passwords, API keys, or other sensitive information.
|
||||||
|
|
||||||
|
4. **Structured logging**: Use structured fields instead of string concatenation for better parsing and filtering.
|
||||||
|
|
||||||
|
## Monitoring and Alerting
|
||||||
|
|
||||||
|
The JSON log format is compatible with log aggregation systems like:
|
||||||
|
- ELK Stack (Elasticsearch, Logstash, Kibana)
|
||||||
|
- Fluentd/Fluent Bit
|
||||||
|
- CloudWatch Logs
|
||||||
|
- Datadog
|
||||||
|
- Splunk
|
||||||
|
|
||||||
|
You can set up alerts based on ERROR level logs or specific error patterns to monitor application health.
|
||||||
@@ -1,20 +1,9 @@
|
|||||||
module DownLink
|
module DownLink
|
||||||
|
|
||||||
go 1.20
|
go 1.21
|
||||||
|
|
||||||
require github.com/labstack/echo v3.3.10+incompatible
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
github.com/go-chi/chi/v5 v5.0.10
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/go-chi/cors v1.2.1
|
||||||
github.com/labstack/gommon v0.4.2 // indirect
|
github.com/google/uuid v1.4.0
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
|
||||||
github.com/stretchr/testify v1.9.0 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
||||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
|
||||||
golang.org/x/crypto v0.26.0 // indirect
|
|
||||||
golang.org/x/net v0.21.0 // indirect
|
|
||||||
golang.org/x/sys v0.23.0 // indirect
|
|
||||||
golang.org/x/text v0.17.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,35 +1,6 @@
|
|||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=
|
|
||||||
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
|
|
||||||
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
|
|
||||||
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
|
||||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
|
||||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
|
||||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
|
||||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
|
||||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
|
|
||||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
|
||||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
|
|||||||
73
backend/handlers/log.go
Normal file
73
backend/handlers/log.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"DownLink/services"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LogHandler handles requests for viewing logs
|
||||||
|
type LogHandler struct {
|
||||||
|
logBuffer *services.LogBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLogHandler creates a new log handler with the provided log buffer
|
||||||
|
func NewLogHandler(logBuffer *services.LogBuffer) *LogHandler {
|
||||||
|
return &LogHandler{
|
||||||
|
logBuffer: logBuffer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogs returns the logs based on query parameters
|
||||||
|
func (lh *LogHandler) GetLogs(w http.ResponseWriter, r *http.Request) {
|
||||||
|
slog.Debug("Log retrieval requested", "remote_addr", r.RemoteAddr)
|
||||||
|
|
||||||
|
// Parse level parameter
|
||||||
|
level := slog.LevelInfo
|
||||||
|
if levelStr := r.URL.Query().Get("level"); levelStr != "" {
|
||||||
|
switch levelStr {
|
||||||
|
case "DEBUG", "debug":
|
||||||
|
level = slog.LevelDebug
|
||||||
|
case "INFO", "info":
|
||||||
|
level = slog.LevelInfo
|
||||||
|
case "WARN", "warn":
|
||||||
|
level = slog.LevelWarn
|
||||||
|
case "ERROR", "error":
|
||||||
|
level = slog.LevelError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse limit parameter
|
||||||
|
limit := 0 // 0 means no limit
|
||||||
|
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
|
||||||
|
if parsedLimit, err := strconv.Atoi(limitStr); err == nil && parsedLimit > 0 {
|
||||||
|
limit = parsedLimit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get logs filtered by level
|
||||||
|
logs := lh.logBuffer.GetEntriesByLevel(level)
|
||||||
|
|
||||||
|
// Apply limit if specified
|
||||||
|
if limit > 0 && limit < len(logs) {
|
||||||
|
// Return the most recent logs (which are at the end of the array)
|
||||||
|
startIdx := len(logs) - limit
|
||||||
|
if startIdx < 0 {
|
||||||
|
startIdx = 0
|
||||||
|
}
|
||||||
|
logs = logs[startIdx:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Respond with logs
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
slog.Debug("Returning logs", "count", len(logs), "level", level.String())
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"logs": logs,
|
||||||
|
"count": len(logs),
|
||||||
|
})
|
||||||
|
}
|
||||||
109
backend/handlers/video.go
Normal file
109
backend/handlers/video.go
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"DownLink/models"
|
||||||
|
"DownLink/services"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VideoHandler struct {
|
||||||
|
videoService *services.VideoService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVideoHandler(videoService *services.VideoService) *VideoHandler {
|
||||||
|
return &VideoHandler{
|
||||||
|
videoService: videoService,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vh *VideoHandler) DownloadVideo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var req models.VideoDownloadRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
slog.Error("Failed to decode request body", "error", err)
|
||||||
|
vh.writeError(w, http.StatusBadRequest, "Invalid JSON")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.URL == "" || req.Quality == "" {
|
||||||
|
slog.Warn("Invalid request parameters", "url", req.URL, "quality", req.Quality)
|
||||||
|
vh.writeError(w, http.StatusBadRequest, "URL and Quality are required")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Starting video download", "url", req.URL, "quality", req.Quality)
|
||||||
|
|
||||||
|
outputPath, err := vh.videoService.DownloadVideo(req.URL, req.Quality)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Video download failed", "url", req.URL, "quality", req.Quality, "error", err)
|
||||||
|
vh.writeError(w, http.StatusInternalServerError, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if this was a cached response
|
||||||
|
isCached := !strings.Contains(outputPath, "dl_") && strings.Contains(outputPath, "cache")
|
||||||
|
|
||||||
|
// Only cleanup if it's a fresh download (not cached)
|
||||||
|
if strings.Contains(outputPath, "dl_") {
|
||||||
|
defer vh.videoService.CleanupTempDir(outputPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
uid := uuid.New().String()
|
||||||
|
filename := fmt.Sprintf("video_%s.mp4", uid)
|
||||||
|
|
||||||
|
// Add cache status header
|
||||||
|
if isCached {
|
||||||
|
w.Header().Set("X-Cache-Status", "HIT")
|
||||||
|
slog.Info("Serving cached video", "url", req.URL, "quality", req.Quality, "file", outputPath)
|
||||||
|
} else {
|
||||||
|
w.Header().Set("X-Cache-Status", "MISS")
|
||||||
|
slog.Info("Serving fresh download", "url", req.URL, "quality", req.Quality, "file", outputPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename))
|
||||||
|
w.Header().Set("Content-Type", "video/mp4")
|
||||||
|
|
||||||
|
http.ServeFile(w, r, outputPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vh *VideoHandler) HealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||||
|
slog.Debug("Health check requested", "remote_addr", r.RemoteAddr)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Backend for DownLink is running.\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vh *VideoHandler) ClearCache(w http.ResponseWriter, r *http.Request) {
|
||||||
|
slog.Info("Cache clear requested")
|
||||||
|
if err := vh.videoService.CleanupExpiredCache(0); err != nil {
|
||||||
|
slog.Error("Failed to clear cache", "error", err)
|
||||||
|
vh.writeError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to clear cache: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Cache cleared successfully")
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(w).Encode(map[string]string{"message": "Cache cleared successfully"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vh *VideoHandler) GetCacheStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
slog.Debug("Cache status requested")
|
||||||
|
status := vh.videoService.GetCacheStats()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(w).Encode(status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vh *VideoHandler) writeError(w http.ResponseWriter, status int, message string) {
|
||||||
|
slog.Error("HTTP error response", "status", status, "message", message)
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(status)
|
||||||
|
json.NewEncoder(w).Encode(models.ErrorResponse{Error: message})
|
||||||
|
}
|
||||||
65
backend/logger.go
Normal file
65
backend/logger.go
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"DownLink/services"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Global log buffer for API access
|
||||||
|
var LogBuffer *services.LogBuffer
|
||||||
|
|
||||||
|
// setupLogger configures structured logging for the application
|
||||||
|
func setupLogger() {
|
||||||
|
// Get log level from environment variable, default to INFO
|
||||||
|
logLevel := slog.LevelInfo
|
||||||
|
if levelStr := os.Getenv("LOG_LEVEL"); levelStr != "" {
|
||||||
|
switch levelStr {
|
||||||
|
case "DEBUG":
|
||||||
|
logLevel = slog.LevelDebug
|
||||||
|
case "INFO":
|
||||||
|
logLevel = slog.LevelInfo
|
||||||
|
case "WARN":
|
||||||
|
logLevel = slog.LevelWarn
|
||||||
|
case "ERROR":
|
||||||
|
logLevel = slog.LevelError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize log buffer (store last 1000 logs)
|
||||||
|
bufferSize := 1000
|
||||||
|
if sizeStr := os.Getenv("LOG_BUFFER_SIZE"); sizeStr != "" {
|
||||||
|
if parsed, err := strconv.Atoi(sizeStr); err == nil && parsed > 0 {
|
||||||
|
bufferSize = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LogBuffer = services.NewLogBuffer(bufferSize)
|
||||||
|
|
||||||
|
// Get log format from environment variable, default to JSON
|
||||||
|
logFormat := os.Getenv("LOG_FORMAT")
|
||||||
|
var baseHandler slog.Handler
|
||||||
|
|
||||||
|
if logFormat == "text" {
|
||||||
|
baseHandler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||||
|
Level: logLevel,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Default to JSON format
|
||||||
|
baseHandler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||||
|
Level: logLevel,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap the base handler with in-memory handler
|
||||||
|
handler := services.NewInMemoryHandler(LogBuffer, baseHandler)
|
||||||
|
logger := slog.New(handler)
|
||||||
|
slog.SetDefault(logger)
|
||||||
|
|
||||||
|
slog.Info("Logger initialized",
|
||||||
|
"level", logLevel.String(),
|
||||||
|
"format", logFormat,
|
||||||
|
"buffer_size", bufferSize,
|
||||||
|
"service", "DownLink Backend")
|
||||||
|
}
|
||||||
120
backend/main.go
120
backend/main.go
@@ -1,89 +1,65 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"time"
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/labstack/echo"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/labstack/echo/middleware"
|
"github.com/go-chi/chi/v5/middleware"
|
||||||
"github.com/google/uuid"
|
"github.com/go-chi/cors"
|
||||||
|
|
||||||
|
"DownLink/handlers"
|
||||||
|
"DownLink/services"
|
||||||
)
|
)
|
||||||
|
|
||||||
// VideoDownloadRequest represents the request structure for video download
|
|
||||||
type VideoDownloadRequest struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Quality string `json:"quality"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadVideo(c echo.Context) error {
|
|
||||||
req := new(VideoDownloadRequest)
|
|
||||||
if err := c.Bind(req); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if req.URL == "" || req.Quality == "" {
|
|
||||||
return echo.NewHTTPError(http.StatusBadRequest, "URL and Quality are required")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a temporary directory for downloading files
|
|
||||||
tmpDir, err := os.MkdirTemp("", "downlink")
|
|
||||||
if err != nil {
|
|
||||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Failed to create temporary directory: %v", err))
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := os.RemoveAll(tmpDir); err != nil {
|
|
||||||
log.Printf("Failed to clean up temporary directory: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
uid := uuid.New().String()
|
|
||||||
outputPath := filepath.Join(tmpDir, fmt.Sprintf("output_%s.mp4", uid))
|
|
||||||
|
|
||||||
// Download video and audio combined
|
|
||||||
quality := req.Quality[:len(req.Quality) - 1]
|
|
||||||
|
|
||||||
var mergedFormat string
|
|
||||||
var cookies string
|
|
||||||
|
|
||||||
if strings.Contains(req.URL, "instagram.com/") {
|
|
||||||
mergedFormat = fmt.Sprintf("bestvideo[width<=%s]+bestaudio/best", quality)
|
|
||||||
cookies = "cookies_i.txt"
|
|
||||||
} else {
|
|
||||||
mergedFormat = fmt.Sprintf("bestvideo[height<=%s]+bestaudio/best[height<=%s]", quality, quality)
|
|
||||||
cookies = "cookies_y.txt"
|
|
||||||
}
|
|
||||||
|
|
||||||
cmdDownload := exec.Command("./venv/bin/python3", "-m", "yt_dlp", "--cookies", cookies, "-f", mergedFormat, "--merge-output-format", "mp4", "-o", outputPath, req.URL)
|
|
||||||
if err := cmdDownload.Run(); err != nil {
|
|
||||||
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Failed to download video and audio: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serve the file with appropriate headers
|
|
||||||
return c.Attachment(outputPath, fmt.Sprintf("video_%s.mp4", uid))
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
e := echo.New()
|
// Setup structured logging
|
||||||
|
setupLogger()
|
||||||
|
|
||||||
|
r := chi.NewRouter()
|
||||||
|
|
||||||
// Middleware
|
// Middleware
|
||||||
e.Use(middleware.Logger())
|
r.Use(middleware.Logger)
|
||||||
e.Use(middleware.Recover())
|
r.Use(middleware.Recoverer)
|
||||||
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
|
r.Use(cors.Handler(cors.Options{
|
||||||
AllowOrigins: []string{"*"},
|
AllowedOrigins: []string{"http://localhost:3000", "https://downlink.webark.in"},
|
||||||
AllowMethods: []string{http.MethodGet, http.MethodPost, http.MethodPut, http.MethodDelete},
|
AllowedMethods: []string{"GET", "POST", "OPTIONS"},
|
||||||
|
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
|
||||||
|
ExposedHeaders: []string{"Link"},
|
||||||
|
AllowCredentials: false,
|
||||||
|
MaxAge: 300,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
// Initialize services
|
||||||
|
videoService := services.NewVideoService()
|
||||||
|
|
||||||
|
// Initialize handlers
|
||||||
|
videoHandler := handlers.NewVideoHandler(videoService)
|
||||||
|
logHandler := handlers.NewLogHandler(LogBuffer) // Initialize log handler
|
||||||
|
|
||||||
// Routes
|
// Routes
|
||||||
e.GET("/downlink/", func(c echo.Context) error {
|
r.Get("/d/", videoHandler.HealthCheck)
|
||||||
return c.String(http.StatusOK, "Backend for DownLink is running.\n")
|
r.Post("/d/download", videoHandler.DownloadVideo)
|
||||||
})
|
r.Get("/d/cache/status", videoHandler.GetCacheStatus)
|
||||||
|
r.Delete("/d/cache/delete", videoHandler.ClearCache)
|
||||||
|
r.Get("/d/logs", logHandler.GetLogs)
|
||||||
|
|
||||||
e.POST("/downlink/download", downloadVideo)
|
// Start periodic cache cleanup (every 6 hours)
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(6 * time.Hour)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
// Start server
|
for range ticker.C {
|
||||||
e.Logger.Fatal(e.Start(":8080"))
|
if err := videoService.CleanupExpiredCache(24 * time.Hour); err != nil {
|
||||||
|
slog.Error("Cache cleanup failed", "error", err)
|
||||||
|
} else {
|
||||||
|
slog.Info("Cache cleanup completed successfully")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
slog.Info("Server starting", "port", "8080")
|
||||||
|
log.Fatal(http.ListenAndServe(":8080", r))
|
||||||
}
|
}
|
||||||
|
|||||||
10
backend/models/video.go
Normal file
10
backend/models/video.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
type VideoDownloadRequest struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Quality string `json:"quality"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorResponse struct {
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
145
backend/services/logbuffer.go
Normal file
145
backend/services/logbuffer.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/ring"
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LogEntry represents a structured log entry
|
||||||
|
type LogEntry struct {
|
||||||
|
Time time.Time `json:"time"`
|
||||||
|
Level string `json:"level"`
|
||||||
|
Message string `json:"msg"`
|
||||||
|
Attrs map[string]any `json:"attrs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogBuffer is a service that maintains an in-memory buffer of recent logs
|
||||||
|
type LogBuffer struct {
|
||||||
|
buffer *ring.Ring
|
||||||
|
mutex sync.RWMutex
|
||||||
|
size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLogBuffer creates a new log buffer with the specified capacity
|
||||||
|
func NewLogBuffer(capacity int) *LogBuffer {
|
||||||
|
return &LogBuffer{
|
||||||
|
buffer: ring.New(capacity),
|
||||||
|
size: capacity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a log entry to the buffer
|
||||||
|
func (lb *LogBuffer) Add(entry LogEntry) {
|
||||||
|
lb.mutex.Lock()
|
||||||
|
defer lb.mutex.Unlock()
|
||||||
|
lb.buffer.Value = entry
|
||||||
|
lb.buffer = lb.buffer.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntries returns all log entries in chronological order
|
||||||
|
func (lb *LogBuffer) GetEntries() []LogEntry {
|
||||||
|
lb.mutex.RLock()
|
||||||
|
defer lb.mutex.RUnlock()
|
||||||
|
|
||||||
|
var entries []LogEntry
|
||||||
|
lb.buffer.Do(func(val interface{}) {
|
||||||
|
if val != nil {
|
||||||
|
entries = append(entries, val.(LogEntry))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Sort entries by time (they might be out of order due to ring buffer)
|
||||||
|
// No need for manual sort as we'll return them in the order they appear in the ring
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEntriesByLevel filters log entries by minimum log level
|
||||||
|
func (lb *LogBuffer) GetEntriesByLevel(minLevel slog.Level) []LogEntry {
|
||||||
|
allEntries := lb.GetEntries()
|
||||||
|
if minLevel == slog.LevelDebug {
|
||||||
|
return allEntries // Return all logs if debug level requested
|
||||||
|
}
|
||||||
|
|
||||||
|
var filteredEntries []LogEntry
|
||||||
|
for _, entry := range allEntries {
|
||||||
|
var entryLevel slog.Level
|
||||||
|
switch entry.Level {
|
||||||
|
case "DEBUG":
|
||||||
|
entryLevel = slog.LevelDebug
|
||||||
|
case "INFO":
|
||||||
|
entryLevel = slog.LevelInfo
|
||||||
|
case "WARN":
|
||||||
|
entryLevel = slog.LevelWarn
|
||||||
|
case "ERROR":
|
||||||
|
entryLevel = slog.LevelError
|
||||||
|
}
|
||||||
|
|
||||||
|
if entryLevel >= minLevel {
|
||||||
|
filteredEntries = append(filteredEntries, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return filteredEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the capacity of the log buffer
|
||||||
|
func (lb *LogBuffer) Size() int {
|
||||||
|
return lb.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// InMemoryHandler is a slog.Handler that writes logs to the in-memory buffer
|
||||||
|
type InMemoryHandler struct {
|
||||||
|
logBuffer *LogBuffer
|
||||||
|
next slog.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInMemoryHandler creates a new slog.Handler that writes logs to both
|
||||||
|
// the in-memory buffer and the next handler
|
||||||
|
func NewInMemoryHandler(logBuffer *LogBuffer, next slog.Handler) *InMemoryHandler {
|
||||||
|
return &InMemoryHandler{
|
||||||
|
logBuffer: logBuffer,
|
||||||
|
next: next,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled implements slog.Handler.
|
||||||
|
func (h *InMemoryHandler) Enabled(ctx context.Context, level slog.Level) bool {
|
||||||
|
return h.next.Enabled(ctx, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle implements slog.Handler.
|
||||||
|
func (h *InMemoryHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||||
|
// Forward to next handler
|
||||||
|
if err := h.next.Handle(ctx, record); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store in buffer
|
||||||
|
attrs := make(map[string]any)
|
||||||
|
record.Attrs(func(attr slog.Attr) bool {
|
||||||
|
attrs[attr.Key] = attr.Value.Any()
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
h.logBuffer.Add(LogEntry{
|
||||||
|
Time: record.Time,
|
||||||
|
Level: record.Level.String(),
|
||||||
|
Message: record.Message,
|
||||||
|
Attrs: attrs,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttrs implements slog.Handler.
|
||||||
|
func (h *InMemoryHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||||
|
return NewInMemoryHandler(h.logBuffer, h.next.WithAttrs(attrs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup implements slog.Handler.
|
||||||
|
func (h *InMemoryHandler) WithGroup(name string) slog.Handler {
|
||||||
|
return NewInMemoryHandler(h.logBuffer, h.next.WithGroup(name))
|
||||||
|
}
|
||||||
249
backend/services/video.go
Normal file
249
backend/services/video.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VideoService struct {
|
||||||
|
cacheDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVideoService() *VideoService {
|
||||||
|
// Create cache directory if it doesn't exist
|
||||||
|
cacheDir := "./cache"
|
||||||
|
if err := os.MkdirAll(cacheDir, 0755); err != nil {
|
||||||
|
slog.Error("Failed to create cache directory", "path", cacheDir, "error", err)
|
||||||
|
cacheDir = "" // Disable caching if we can't create directory
|
||||||
|
slog.Warn("Caching disabled due to directory creation failure")
|
||||||
|
} else {
|
||||||
|
slog.Info("Cache directory initialized", "path", cacheDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &VideoService{
|
||||||
|
cacheDir: cacheDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vs *VideoService) extractWatchID(url string) string {
|
||||||
|
// YouTube watch ID pattern
|
||||||
|
youtubePattern := regexp.MustCompile(`(?:youtube\.com/watch\?v=|youtu\.be/|youtube\.com/embed/)([a-zA-Z0-9_-]{11})`)
|
||||||
|
if match := youtubePattern.FindStringSubmatch(url); len(match) > 1 {
|
||||||
|
return match[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instagram pattern
|
||||||
|
instagramPattern := regexp.MustCompile(`instagram\.com/p/([a-zA-Z0-9_-]+)`)
|
||||||
|
if match := instagramPattern.FindStringSubmatch(url); len(match) > 1 {
|
||||||
|
return match[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: use a hash of the URL
|
||||||
|
return fmt.Sprintf("hash_%x", len(url))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vs *VideoService) generateCacheFileName(url, quality string) string {
|
||||||
|
watchID := vs.extractWatchID(url)
|
||||||
|
// Remove 'p' from quality (e.g., "720p" -> "720")
|
||||||
|
cleanQuality := strings.TrimSuffix(quality, "p")
|
||||||
|
return fmt.Sprintf("%s_%s.mp4", watchID, cleanQuality)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vs *VideoService) DownloadVideo(url, quality string) (string, error) {
|
||||||
|
// Check cache first
|
||||||
|
if vs.cacheDir != "" {
|
||||||
|
cacheFileName := vs.generateCacheFileName(url, quality)
|
||||||
|
cachePath := filepath.Join(vs.cacheDir, cacheFileName)
|
||||||
|
|
||||||
|
// Check if cached file exists
|
||||||
|
if _, err := os.Stat(cachePath); err == nil {
|
||||||
|
slog.Info("Cache hit", "url", url, "quality", quality, "file", cachePath)
|
||||||
|
return cachePath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Cache miss, downloading video", "url", url, "quality", quality)
|
||||||
|
|
||||||
|
// Determine output path
|
||||||
|
var outputPath string
|
||||||
|
|
||||||
|
if vs.cacheDir != "" {
|
||||||
|
// Use cache directory with watch_id+quality naming
|
||||||
|
cacheFileName := vs.generateCacheFileName(url, quality)
|
||||||
|
outputPath = filepath.Join(vs.cacheDir, cacheFileName)
|
||||||
|
} else {
|
||||||
|
// Fallback to temporary directory
|
||||||
|
tmpDir, err := os.MkdirTemp("", "dl_")
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to create temporary directory: %v", err)
|
||||||
|
}
|
||||||
|
watchID := vs.extractWatchID(url)
|
||||||
|
cleanQuality := strings.TrimSuffix(quality, "p")
|
||||||
|
outputPath = filepath.Join(tmpDir, fmt.Sprintf("%s_%s.mp4", watchID, cleanQuality))
|
||||||
|
}
|
||||||
|
|
||||||
|
quality = quality[:len(quality)-1]
|
||||||
|
|
||||||
|
var mergedFormat string
|
||||||
|
var cookies string
|
||||||
|
|
||||||
|
if strings.Contains(url, "instagram.com/") {
|
||||||
|
mergedFormat = fmt.Sprintf("bestvideo[width<=%s]+bestaudio/best", quality)
|
||||||
|
cookies = "instagram.txt"
|
||||||
|
} else {
|
||||||
|
mergedFormat = fmt.Sprintf("bestvideo[height<=%s]+bestaudio/best[height<=%s]", quality, quality)
|
||||||
|
cookies = "youtube.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
cookiePath := filepath.Join(".", cookies)
|
||||||
|
if _, err := os.Stat(cookiePath); os.IsNotExist(err) {
|
||||||
|
slog.Error("Cookie file not found", "path", cookiePath)
|
||||||
|
return "", fmt.Errorf("cookie file %s not found", cookiePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Starting yt-dlp download",
|
||||||
|
"url", url,
|
||||||
|
"quality", quality,
|
||||||
|
"format", mergedFormat,
|
||||||
|
"cookies", cookiePath,
|
||||||
|
"output", outputPath)
|
||||||
|
|
||||||
|
cmdDownload := exec.Command("./venv/bin/python3", "-m", "yt_dlp", "--cookies", cookiePath, "-f", mergedFormat, "--merge-output-format", "mp4", "-o", outputPath, url)
|
||||||
|
|
||||||
|
output, err := cmdDownload.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("yt-dlp download failed",
|
||||||
|
"url", url,
|
||||||
|
"error", err,
|
||||||
|
"output", string(output))
|
||||||
|
return "", fmt.Errorf("failed to download video and audio: %v\nOutput: %s", err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("yt-dlp download completed", "url", url, "output", string(output))
|
||||||
|
|
||||||
|
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
|
||||||
|
slog.Error("Output file was not created", "path", outputPath)
|
||||||
|
return "", fmt.Errorf("video file was not created")
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Video downloaded successfully", "path", outputPath)
|
||||||
|
return outputPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vs *VideoService) CleanupTempDir(path string) {
|
||||||
|
// Only cleanup if it's a temporary download (contains "dl_" in path)
|
||||||
|
if strings.Contains(path, "dl_") {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
|
slog.Error("Failed to clean up temporary directory", "path", dir, "error", err)
|
||||||
|
} else {
|
||||||
|
slog.Info("Temporary directory cleaned up", "path", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanupExpiredCache removes cached files that are older than the specified duration
|
||||||
|
func (vs *VideoService) CleanupExpiredCache(maxAge time.Duration) error {
|
||||||
|
if vs.cacheDir == "" {
|
||||||
|
slog.Debug("Cache cleanup skipped - caching disabled")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := os.ReadDir(vs.cacheDir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read cache directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cutoff := time.Now().Add(-maxAge)
|
||||||
|
var removedCount int
|
||||||
|
var totalSize int64
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() || !strings.HasSuffix(file.Name(), ".mp4") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := filepath.Join(vs.cacheDir, file.Name())
|
||||||
|
info, err := os.Stat(filePath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.ModTime().Before(cutoff) {
|
||||||
|
if err := os.Remove(filePath); err != nil {
|
||||||
|
slog.Error("Failed to remove expired cache file", "path", filePath, "error", err)
|
||||||
|
} else {
|
||||||
|
removedCount++
|
||||||
|
totalSize += info.Size()
|
||||||
|
slog.Debug("Removed expired cache file", "path", filePath, "size", info.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if removedCount > 0 {
|
||||||
|
slog.Info("Cache cleanup completed", "files_removed", removedCount, "total_size_removed", totalSize)
|
||||||
|
} else {
|
||||||
|
slog.Debug("Cache cleanup completed - no expired files found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCacheDir returns the cache directory path
|
||||||
|
func (vs *VideoService) GetCacheDir() string {
|
||||||
|
return vs.cacheDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCacheStats returns cache statistics
|
||||||
|
func (vs *VideoService) GetCacheStats() map[string]interface{} {
|
||||||
|
if vs.cacheDir == "" {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"status": "disabled",
|
||||||
|
"total_size": 0,
|
||||||
|
"files": 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := os.ReadDir(vs.cacheDir)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Failed to read cache directory for stats", "path", vs.cacheDir, "error", err)
|
||||||
|
return map[string]interface{}{
|
||||||
|
"status": "error",
|
||||||
|
"total_size": 0,
|
||||||
|
"files": 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var videoCount int64
|
||||||
|
var totalSize int64
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() || !strings.HasSuffix(file.Name(), ".mp4") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := filepath.Join(vs.cacheDir, file.Name())
|
||||||
|
info, err := os.Stat(filePath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
videoCount++
|
||||||
|
totalSize += info.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert bytes to MB
|
||||||
|
totalSizeMB := totalSize
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"status": "enabled",
|
||||||
|
"total_size": totalSizeMB,
|
||||||
|
"files": videoCount,
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user