Some checks are pending
CI Pipeline / Run Tests (push) Waiting to run
CI Pipeline / Lint Code (push) Waiting to run
CI Pipeline / Security Scan (push) Waiting to run
CI Pipeline / Build Docker Images (push) Blocked by required conditions
CI Pipeline / E2E Tests (push) Blocked by required conditions
3234 lines
100 KiB
Go
3234 lines
100 KiB
Go
package api
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"log"
|
|
"mime/multipart"
|
|
"net/http"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"git.sovbit.dev/enki/torrentGateway/internal/admin"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/auth"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/blossom"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/config"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/middleware"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/nostr"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/profile"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/storage"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/streaming"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/torrent"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/tracker"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/dht"
|
|
"github.com/gorilla/mux"
|
|
nip "github.com/nbd-wtf/go-nostr"
|
|
)
|
|
|
|
// Server start time for uptime calculation
|
|
var serverStartTime = time.Now()
|
|
|
|
// Error handling structures
|
|
type APIError struct {
|
|
Code int `json:"code"`
|
|
Message string `json:"message"`
|
|
Details string `json:"details,omitempty"`
|
|
Type string `json:"type"`
|
|
}
|
|
|
|
type ErrorResponse struct {
|
|
Error APIError `json:"error"`
|
|
Success bool `json:"success"`
|
|
}
|
|
|
|
// Error types
|
|
const (
|
|
ErrorTypeValidation = "validation_error"
|
|
ErrorTypeNotFound = "not_found_error"
|
|
ErrorTypeInternal = "internal_error"
|
|
ErrorTypeUnsupported = "unsupported_error"
|
|
ErrorTypeUnauthorized = "unauthorized_error"
|
|
ErrorTypeRateLimit = "rate_limit_error"
|
|
ErrorTypeInvalidRange = "invalid_range_error"
|
|
ErrorTypeUploadFailed = "upload_failed_error"
|
|
ErrorTypeStorageFailed = "storage_failed_error"
|
|
ErrorTypeServiceUnavailable = "service_unavailable_error"
|
|
)
|
|
|
|
// Common error responses
|
|
var (
|
|
ErrFileNotFound = APIError{
|
|
Code: http.StatusNotFound,
|
|
Message: "File not found",
|
|
Type: ErrorTypeNotFound,
|
|
}
|
|
|
|
ErrInvalidFileHash = APIError{
|
|
Code: http.StatusBadRequest,
|
|
Message: "Invalid file hash format",
|
|
Type: ErrorTypeValidation,
|
|
}
|
|
|
|
ErrUnsupportedMediaType = APIError{
|
|
Code: http.StatusUnsupportedMediaType,
|
|
Message: "Unsupported media type",
|
|
Type: ErrorTypeUnsupported,
|
|
}
|
|
|
|
ErrInternalServer = APIError{
|
|
Code: http.StatusInternalServerError,
|
|
Message: "Internal server error",
|
|
Type: ErrorTypeInternal,
|
|
}
|
|
|
|
ErrMethodNotAllowed = APIError{
|
|
Code: http.StatusMethodNotAllowed,
|
|
Message: "Method not allowed",
|
|
Type: ErrorTypeValidation,
|
|
}
|
|
|
|
ErrInvalidRange = APIError{
|
|
Code: http.StatusRequestedRangeNotSatisfiable,
|
|
Message: "Invalid range request",
|
|
Type: ErrorTypeInvalidRange,
|
|
}
|
|
)
|
|
|
|
type Gateway struct {
|
|
blossomClient BlossomClient
|
|
nostrPublisher NostrPublisher
|
|
config *config.Config
|
|
storage *storage.Backend
|
|
profileFetcher *profile.ProfileFetcher
|
|
publicURL string
|
|
trackerInstance *tracker.Tracker
|
|
dhtBootstrap DHTBootstrap
|
|
}
|
|
|
|
// DHTBootstrap interface for DHT integration
|
|
type DHTBootstrap interface {
|
|
AnnounceNewTorrent(infoHash string, port int)
|
|
GetDHTStats() map[string]interface{}
|
|
}
|
|
|
|
// DHTNodeInfo represents a DHT node for torrent embedding
|
|
type DHTNodeInfo struct {
|
|
IP string
|
|
Port int
|
|
}
|
|
|
|
// Error handling utilities
|
|
func (g *Gateway) writeErrorResponse(w http.ResponseWriter, apiErr APIError, details string) {
|
|
apiErr.Details = details
|
|
response := ErrorResponse{
|
|
Error: apiErr,
|
|
Success: false,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(apiErr.Code)
|
|
json.NewEncoder(w).Encode(response)
|
|
}
|
|
|
|
func (g *Gateway) writeError(w http.ResponseWriter, statusCode int, message, errorType, details string) {
|
|
apiErr := APIError{
|
|
Code: statusCode,
|
|
Message: message,
|
|
Type: errorType,
|
|
Details: details,
|
|
}
|
|
g.writeErrorResponse(w, apiErr, "")
|
|
}
|
|
|
|
func (g *Gateway) validateFileHash(hash string) error {
|
|
if hash == "" {
|
|
return fmt.Errorf("file hash is required")
|
|
}
|
|
|
|
// SHA-256 hash should be 64 hex characters
|
|
if len(hash) != 64 {
|
|
return fmt.Errorf("file hash must be 64 characters long")
|
|
}
|
|
|
|
// Check if it's valid hex
|
|
for _, c := range hash {
|
|
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
|
|
return fmt.Errorf("file hash must contain only hexadecimal characters")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (g *Gateway) validateHTTPMethod(r *http.Request, allowedMethods []string) error {
|
|
method := r.Method
|
|
for _, allowed := range allowedMethods {
|
|
if method == allowed {
|
|
return nil
|
|
}
|
|
}
|
|
return fmt.Errorf("method %s not allowed, expected one of: %s", method, strings.Join(allowedMethods, ", "))
|
|
}
|
|
|
|
type NostrPublisher interface {
|
|
PublishTorrentAnnouncement(ctx context.Context, data nostr.TorrentEventData) (*nip.Event, error)
|
|
}
|
|
|
|
type BlossomClient interface {
|
|
Put(data []byte) (string, error)
|
|
Get(hash string) ([]byte, error)
|
|
}
|
|
|
|
type FileMetadata struct {
|
|
FileHash string `json:"file_hash"`
|
|
FileName string `json:"file_name"`
|
|
TotalSize int64 `json:"total_size"`
|
|
ChunkCount int `json:"chunk_count"`
|
|
StorageType string `json:"storage_type"` // "blob" or "torrent"
|
|
Chunks []ChunkInfo `json:"chunks"`
|
|
TorrentInfo *torrent.TorrentInfo `json:"torrent_info,omitempty"`
|
|
StreamingInfo *streaming.FileInfo `json:"streaming_info,omitempty"`
|
|
HLSPlaylist *streaming.HLSPlaylist `json:"hls_playlist,omitempty"`
|
|
}
|
|
|
|
type ChunkInfo struct {
|
|
Index int `json:"index"`
|
|
Hash string `json:"hash"`
|
|
Size int `json:"size"`
|
|
}
|
|
|
|
type UploadResponse struct {
|
|
FileHash string `json:"file_hash"`
|
|
Message string `json:"message"`
|
|
TorrentHash string `json:"torrent_hash,omitempty"`
|
|
MagnetLink string `json:"magnet_link,omitempty"`
|
|
NostrEventID string `json:"nostr_event_id,omitempty"`
|
|
}
|
|
|
|
func NewGateway(cfg *config.Config, storage *storage.Backend) *Gateway {
|
|
// Use mock Blossom client for now (real client has ContentLength issues)
|
|
blossomClient := blossom.NewMockClient()
|
|
|
|
// Initialize real Nostr publisher with configured relays
|
|
nostrRelays := cfg.Nostr.Relays
|
|
if len(nostrRelays) == 0 {
|
|
// Fallback relays if none configured
|
|
nostrRelays = []string{
|
|
"wss://freelay.sovbit.host",
|
|
"wss://relay.damus.io",
|
|
"wss://nos.lol",
|
|
}
|
|
}
|
|
|
|
// Generate a new private key for this session (in production, this should be loaded from config)
|
|
var nostrPublisher NostrPublisher
|
|
realPublisher, err := nostr.NewPublisher("", nostrRelays)
|
|
if err != nil {
|
|
// Fall back to mock if real publisher fails to initialize
|
|
log.Printf("Warning: Failed to initialize Nostr publisher, using mock: %v", err)
|
|
nostrPublisher = nostr.CreateMockPublisher()
|
|
} else {
|
|
pubkey, _ := realPublisher.GetPublicKeyBech32()
|
|
log.Printf("Initialized Nostr publisher with public key: %s", pubkey)
|
|
nostrPublisher = realPublisher
|
|
}
|
|
|
|
// Set public URL for tracker functionality
|
|
publicURL := fmt.Sprintf("http://localhost:%d", cfg.Gateway.Port)
|
|
|
|
return &Gateway{
|
|
blossomClient: blossomClient,
|
|
nostrPublisher: nostrPublisher,
|
|
config: cfg,
|
|
storage: storage,
|
|
profileFetcher: profile.NewProfileFetcher(nostrRelays),
|
|
publicURL: publicURL,
|
|
}
|
|
}
|
|
|
|
// Implement Gateway interface methods for tracker integration
|
|
func (g *Gateway) GetPublicURL() string {
|
|
return g.publicURL
|
|
}
|
|
|
|
func (g *Gateway) IsValidInfoHash(infoHash string) bool {
|
|
// Check if we have metadata for this info hash
|
|
// For now, check if any of our files match this info hash
|
|
for _, metadata := range metadataStore {
|
|
if metadata.TorrentInfo != nil && metadata.TorrentInfo.InfoHash == infoHash {
|
|
return true
|
|
}
|
|
}
|
|
|
|
// Also check storage backend
|
|
exists, _ := g.storage.FileExistsByInfoHash(infoHash)
|
|
return exists
|
|
}
|
|
|
|
func (g *Gateway) GetWebSeedURL(infoHash string) string {
|
|
// Find the file with this info hash and return its WebSeed URL
|
|
for fileHash, metadata := range metadataStore {
|
|
if metadata.TorrentInfo != nil && metadata.TorrentInfo.InfoHash == infoHash {
|
|
webSeedURL := fmt.Sprintf("%s/webseed/%s/", strings.TrimSuffix(g.publicURL, "/"), fileHash)
|
|
|
|
// Validate the generated URL
|
|
if g.validateWebSeedURL(webSeedURL) {
|
|
return webSeedURL
|
|
}
|
|
}
|
|
}
|
|
|
|
// Try storage backend
|
|
if fileHash, err := g.storage.GetFileHashByInfoHash(infoHash); err == nil {
|
|
webSeedURL := fmt.Sprintf("%s/webseed/%s/", strings.TrimSuffix(g.publicURL, "/"), fileHash)
|
|
|
|
// Validate the generated URL
|
|
if g.validateWebSeedURL(webSeedURL) {
|
|
return webSeedURL
|
|
}
|
|
}
|
|
|
|
return ""
|
|
}
|
|
|
|
// DHT Gateway interface methods
|
|
func (g *Gateway) GetDHTPort() int {
|
|
return g.config.DHT.Port
|
|
}
|
|
|
|
func (g *Gateway) GetDatabase() *sql.DB {
|
|
return g.storage.GetDB()
|
|
}
|
|
|
|
func (g *Gateway) GetAllTorrentHashes() []string {
|
|
var hashes []string
|
|
|
|
// Get from metadata store
|
|
for _, metadata := range metadataStore {
|
|
if metadata.TorrentInfo != nil {
|
|
hashes = append(hashes, metadata.TorrentInfo.InfoHash)
|
|
}
|
|
}
|
|
|
|
// Get from storage backend
|
|
rows, err := g.storage.GetDB().Query(`SELECT info_hash FROM files WHERE info_hash IS NOT NULL`)
|
|
if err != nil {
|
|
return hashes
|
|
}
|
|
defer rows.Close()
|
|
|
|
for rows.Next() {
|
|
var infoHash string
|
|
if err := rows.Scan(&infoHash); err == nil && infoHash != "" {
|
|
// Avoid duplicates
|
|
found := false
|
|
for _, existing := range hashes {
|
|
if existing == infoHash {
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
hashes = append(hashes, infoHash)
|
|
}
|
|
}
|
|
}
|
|
|
|
return hashes
|
|
}
|
|
|
|
// SetDHTBootstrap sets the DHT bootstrap instance for torrent announcements
|
|
func (g *Gateway) SetDHTBootstrap(dhtBootstrap DHTBootstrap) {
|
|
g.dhtBootstrap = dhtBootstrap
|
|
}
|
|
|
|
func (g *Gateway) UploadHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodPost}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Validate content type
|
|
contentType := r.Header.Get("Content-Type")
|
|
if !strings.HasPrefix(contentType, "multipart/form-data") {
|
|
g.writeError(w, http.StatusBadRequest, "Invalid content type", ErrorTypeValidation,
|
|
"Expected multipart/form-data, got: "+contentType)
|
|
return
|
|
}
|
|
|
|
// Check content length
|
|
if r.ContentLength <= 0 {
|
|
g.writeError(w, http.StatusBadRequest, "Empty file upload", ErrorTypeValidation,
|
|
"Content-Length header indicates empty or missing content")
|
|
return
|
|
}
|
|
|
|
// Check if content length exceeds configured limits
|
|
maxUploadSize, err := g.config.GetMaxUploadSizeBytes()
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Configuration error", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to parse max upload size: %v", err))
|
|
return
|
|
}
|
|
if r.ContentLength > maxUploadSize {
|
|
g.writeError(w, http.StatusRequestEntityTooLarge, "File too large", ErrorTypeValidation,
|
|
fmt.Sprintf("File size %d bytes exceeds maximum allowed size of %d bytes", r.ContentLength, maxUploadSize))
|
|
return
|
|
}
|
|
|
|
// Parse multipart form with size limit (use reasonable memory limit)
|
|
memoryLimit := int64(32 << 20) // 32MB default
|
|
if maxUploadSize < memoryLimit {
|
|
memoryLimit = maxUploadSize
|
|
}
|
|
err = r.ParseMultipartForm(memoryLimit)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusBadRequest, "Failed to parse multipart form", ErrorTypeValidation,
|
|
fmt.Sprintf("Multipart form parsing error: %v", err))
|
|
return
|
|
}
|
|
|
|
// Get file from form
|
|
file, fileHeader, err := r.FormFile("file")
|
|
if err != nil {
|
|
g.writeError(w, http.StatusBadRequest, "Missing or invalid file field", ErrorTypeValidation,
|
|
fmt.Sprintf("Expected 'file' field in multipart form: %v", err))
|
|
return
|
|
}
|
|
defer func() {
|
|
if closeErr := file.Close(); closeErr != nil {
|
|
fmt.Printf("Warning: Failed to close uploaded file: %v\n", closeErr)
|
|
}
|
|
}()
|
|
|
|
// Validate filename
|
|
fileName := strings.TrimSpace(fileHeader.Filename)
|
|
if fileName == "" {
|
|
g.writeError(w, http.StatusBadRequest, "Missing filename", ErrorTypeValidation,
|
|
"Uploaded file must have a filename")
|
|
return
|
|
}
|
|
|
|
// Check for dangerous file paths
|
|
if strings.Contains(fileName, "..") || strings.Contains(fileName, "/") || strings.Contains(fileName, "\\") {
|
|
g.writeError(w, http.StatusBadRequest, "Invalid filename", ErrorTypeValidation,
|
|
"Filename cannot contain path traversal characters")
|
|
return
|
|
}
|
|
|
|
// Validate file size from header
|
|
if fileHeader.Size <= 0 {
|
|
g.writeError(w, http.StatusBadRequest, "Empty file", ErrorTypeValidation,
|
|
"Uploaded file appears to be empty")
|
|
return
|
|
}
|
|
|
|
if fileHeader.Size > maxUploadSize {
|
|
g.writeError(w, http.StatusRequestEntityTooLarge, "File too large", ErrorTypeValidation,
|
|
fmt.Sprintf("File size %d bytes exceeds maximum allowed size of %d bytes", fileHeader.Size, maxUploadSize))
|
|
return
|
|
}
|
|
|
|
// Size-based routing: decide between blob and torrent storage
|
|
blobThreshold := g.config.GetBlobThreshold()
|
|
if fileHeader.Size < blobThreshold {
|
|
// Small file - store as single Blossom blob
|
|
g.handleBlobUpload(w, r, file, fileName, fileHeader)
|
|
return
|
|
} else {
|
|
// Large file - use existing chunking logic
|
|
g.handleTorrentUpload(w, r, file, fileName, fileHeader)
|
|
return
|
|
}
|
|
}
|
|
|
|
// handleBlobUpload handles small files that should be stored as single Blossom blobs
|
|
func (g *Gateway) handleBlobUpload(w http.ResponseWriter, r *http.Request, file multipart.File, fileName string, fileHeader *multipart.FileHeader) {
|
|
// Determine content type
|
|
contentType := fileHeader.Header.Get("Content-Type")
|
|
if contentType == "" {
|
|
contentType = "application/octet-stream"
|
|
}
|
|
|
|
// Get user from context for ownership tracking
|
|
ownerPubkey := middleware.GetUserFromContext(r.Context())
|
|
accessLevel := "public" // Default access level
|
|
|
|
// Store as blob using unified storage
|
|
var metadata *storage.FileMetadata
|
|
var err error
|
|
if ownerPubkey != "" {
|
|
metadata, err = g.storage.StoreBlobAsFileWithOwner(file, fileName, contentType, ownerPubkey, accessLevel)
|
|
} else {
|
|
metadata, err = g.storage.StoreBlobAsFile(file, fileName, contentType)
|
|
}
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Blob storage failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to store file as blob: %v", err))
|
|
return
|
|
}
|
|
|
|
// Create API response metadata
|
|
apiMetadata := FileMetadata{
|
|
FileHash: metadata.Hash,
|
|
FileName: fileName,
|
|
TotalSize: metadata.Size,
|
|
ChunkCount: 1, // Blobs count as single "chunk"
|
|
StorageType: "blob",
|
|
Chunks: []ChunkInfo{{Index: 0, Hash: metadata.Hash, Size: int(metadata.Size)}},
|
|
}
|
|
|
|
// Store API metadata for compatibility
|
|
err = g.storeMetadata(metadata.Hash, apiMetadata)
|
|
if err != nil {
|
|
log.Printf("Warning: Failed to store API metadata for blob %s: %v", metadata.Hash, err)
|
|
}
|
|
|
|
// Publish to Nostr for blobs
|
|
var nostrEventID string
|
|
if g.nostrPublisher != nil {
|
|
eventData := nostr.TorrentEventData{
|
|
Title: fmt.Sprintf("File: %s", fileName),
|
|
FileName: fileName,
|
|
FileSize: metadata.Size,
|
|
BlossomHash: metadata.Hash,
|
|
Description: fmt.Sprintf("File '%s' (%.2f MB) available via Blossom blob storage", fileName, float64(metadata.Size)/1024/1024),
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
|
|
event, err := g.nostrPublisher.PublishTorrentAnnouncement(ctx, eventData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to publish blob to Nostr: %v\n", err)
|
|
} else if event != nil {
|
|
nostrEventID = nostr.GetEventID(event)
|
|
}
|
|
}
|
|
|
|
// Send success response for blob
|
|
response := UploadResponse{
|
|
FileHash: metadata.Hash,
|
|
Message: "File uploaded successfully as blob",
|
|
NostrEventID: nostrEventID,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(http.StatusOK)
|
|
json.NewEncoder(w).Encode(response)
|
|
}
|
|
|
|
// handleTorrentUpload handles large files that should be chunked for BitTorrent
|
|
func (g *Gateway) handleTorrentUpload(w http.ResponseWriter, r *http.Request, file multipart.File, fileName string, fileHeader *multipart.FileHeader) {
|
|
// Reset file reader position
|
|
file.Seek(0, 0)
|
|
|
|
// Determine content type
|
|
contentType := fileHeader.Header.Get("Content-Type")
|
|
if contentType == "" {
|
|
contentType = "application/octet-stream"
|
|
}
|
|
|
|
// Get user from context for ownership tracking
|
|
ownerPubkey := middleware.GetUserFromContext(r.Context())
|
|
accessLevel := "public" // Default access level
|
|
|
|
// Store file using storage backend (will chunk automatically)
|
|
var metadata *storage.FileMetadata
|
|
var err error
|
|
if ownerPubkey != "" {
|
|
metadata, err = g.storage.StoreFileWithOwner(file, fileName, contentType, ownerPubkey, accessLevel)
|
|
} else {
|
|
metadata, err = g.storage.StoreFile(file, fileName, contentType)
|
|
}
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "File storage failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to store file: %v", err))
|
|
return
|
|
}
|
|
|
|
// Get chunk hashes for torrent creation
|
|
chunkHashes, err := g.storage.GetChunkHashes(metadata.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Failed to get chunks", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to retrieve chunk hashes: %v", err))
|
|
return
|
|
}
|
|
|
|
// Create API response chunks
|
|
var chunkInfos []ChunkInfo
|
|
for i, chunkHash := range chunkHashes {
|
|
chunkInfos = append(chunkInfos, ChunkInfo{
|
|
Index: i,
|
|
Hash: chunkHash,
|
|
Size: int(g.config.GetChunkSize()), // Use config chunk size
|
|
})
|
|
}
|
|
|
|
// Create torrent pieces from chunk hashes
|
|
pieces := make([]torrent.PieceInfo, len(chunkHashes))
|
|
for i, chunkHash := range chunkHashes {
|
|
// Convert hex string to bytes for torrent hash
|
|
hashBytes := make([]byte, 20)
|
|
copy(hashBytes, []byte(chunkHash)[:20])
|
|
|
|
pieces[i] = torrent.PieceInfo{
|
|
Index: i,
|
|
Hash: [20]byte(hashBytes),
|
|
SHA256: chunkHash,
|
|
Length: int(g.config.GetChunkSize()),
|
|
}
|
|
}
|
|
|
|
// Generate validated WebSeed URL and base URL for tracker
|
|
webSeedURL := g.generateWebSeedURL(r, metadata.Hash)
|
|
baseURL := fmt.Sprintf("http://%s", r.Host)
|
|
|
|
fileInfo := torrent.FileInfo{
|
|
Name: fileName,
|
|
Size: metadata.Size,
|
|
Pieces: pieces,
|
|
WebSeedURL: webSeedURL,
|
|
}
|
|
|
|
trackers := g.config.Torrent.Trackers
|
|
if len(trackers) == 0 {
|
|
trackers = []string{
|
|
"udp://tracker.opentrackr.org:1337",
|
|
"udp://tracker.openbittorrent.com:6969",
|
|
}
|
|
}
|
|
|
|
// Get gateway URL for built-in tracker
|
|
gatewayURL := baseURL
|
|
|
|
// Build DHT nodes list
|
|
var dhtNodes [][]interface{}
|
|
if g.config.IsServiceEnabled("dht") {
|
|
// Add self as DHT node
|
|
dhtNodes = append(dhtNodes, []interface{}{g.GetPublicURL(), g.GetDHTPort()})
|
|
|
|
// Add DHT bootstrap nodes if available
|
|
if g.dhtBootstrap != nil {
|
|
// Use type assertion to get concrete type for accessing GetBootstrapNodes
|
|
if bootstrap, ok := g.dhtBootstrap.(*dht.DHTBootstrap); ok {
|
|
bootstrapNodes := bootstrap.GetBootstrapNodes()
|
|
for _, node := range bootstrapNodes {
|
|
if len(dhtNodes) < 10 { // Limit DHT nodes in torrent
|
|
dhtNodes = append(dhtNodes, []interface{}{node.IP, node.Port})
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
torrentInfo, err := torrent.CreateTorrent(fileInfo, trackers, gatewayURL, dhtNodes)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Torrent creation failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to generate torrent: %v", err))
|
|
return
|
|
}
|
|
|
|
// Update file metadata with info_hash for tracker integration
|
|
err = g.storage.UpdateFileInfoHash(metadata.Hash, torrentInfo.InfoHash)
|
|
if err != nil {
|
|
log.Printf("Warning: Failed to update info_hash for file %s: %v", metadata.Hash, err)
|
|
}
|
|
|
|
// Announce to DHT if bootstrap is available
|
|
if g.dhtBootstrap != nil && g.config.IsServiceEnabled("dht") {
|
|
g.dhtBootstrap.AnnounceNewTorrent(torrentInfo.InfoHash, g.config.Gateway.Port)
|
|
}
|
|
|
|
// Create streaming info for video files
|
|
isVideo, mimeType := streaming.DetectMediaType(fileName)
|
|
var streamingInfo *streaming.FileInfo
|
|
var hlsPlaylist *streaming.HLSPlaylist
|
|
|
|
if isVideo {
|
|
duration := streaming.EstimateVideoDuration(metadata.Size, fileName)
|
|
streamingInfo = &streaming.FileInfo{
|
|
Name: fileName,
|
|
Size: metadata.Size,
|
|
ChunkCount: len(chunkHashes),
|
|
ChunkSize: int(g.config.GetChunkSize()),
|
|
Duration: duration,
|
|
IsVideo: true,
|
|
MimeType: mimeType,
|
|
}
|
|
|
|
config := streaming.DefaultHLSConfig()
|
|
playlist, err := streaming.GenerateHLSSegments(*streamingInfo, config)
|
|
if err == nil {
|
|
hlsPlaylist = playlist
|
|
}
|
|
}
|
|
|
|
// Create API metadata
|
|
apiMetadata := FileMetadata{
|
|
FileHash: metadata.Hash,
|
|
FileName: fileName,
|
|
TotalSize: metadata.Size,
|
|
ChunkCount: len(chunkHashes),
|
|
StorageType: "torrent",
|
|
Chunks: chunkInfos,
|
|
TorrentInfo: torrentInfo,
|
|
StreamingInfo: streamingInfo,
|
|
HLSPlaylist: hlsPlaylist,
|
|
}
|
|
|
|
// Store API metadata for compatibility
|
|
err = g.storeMetadata(metadata.Hash, apiMetadata)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Metadata storage failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to store metadata: %v", err))
|
|
return
|
|
}
|
|
|
|
// Publish to Nostr
|
|
var nostrEventID string
|
|
if g.nostrPublisher != nil {
|
|
eventData := nostr.TorrentEventData{
|
|
Title: fmt.Sprintf("Torrent: %s", fileName),
|
|
InfoHash: torrentInfo.InfoHash,
|
|
FileName: fileName,
|
|
FileSize: metadata.Size,
|
|
MagnetLink: torrentInfo.Magnet,
|
|
WebSeedURL: webSeedURL,
|
|
BlossomHash: metadata.Hash,
|
|
Description: fmt.Sprintf("File '%s' (%.2f MB) available via BitTorrent", fileName, float64(metadata.Size)/1024/1024),
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
|
|
event, err := g.nostrPublisher.PublishTorrentAnnouncement(ctx, eventData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to publish torrent to Nostr: %v\n", err)
|
|
} else if event != nil {
|
|
nostrEventID = nostr.GetEventID(event)
|
|
}
|
|
}
|
|
|
|
// Send success response for torrent
|
|
response := UploadResponse{
|
|
FileHash: metadata.Hash,
|
|
Message: "File uploaded successfully as torrent",
|
|
TorrentHash: torrentInfo.InfoHash,
|
|
MagnetLink: torrentInfo.Magnet,
|
|
NostrEventID: nostrEventID,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(http.StatusOK)
|
|
json.NewEncoder(w).Encode(response)
|
|
}
|
|
|
|
func (g *Gateway) DownloadHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash from URL
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
// Get metadata with error handling
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
if metadata.TotalSize <= 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid file size", ErrorTypeInternal,
|
|
"File metadata indicates invalid size")
|
|
return
|
|
}
|
|
|
|
// Route based on storage type
|
|
if metadata.StorageType == "blob" {
|
|
g.downloadBlob(w, r, metadata)
|
|
return
|
|
} else {
|
|
// Default to torrent/chunk handling for backward compatibility
|
|
g.downloadTorrent(w, r, metadata)
|
|
return
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") // Placeholder
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Set response headers for GET request
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
|
|
// Set filename if available
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
|
|
// Retrieve and stream chunks
|
|
bytesWritten := int64(0)
|
|
for i, chunkInfo := range metadata.Chunks {
|
|
// Validate chunk info
|
|
if chunkInfo.Hash == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk hash", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has empty hash", i))
|
|
return
|
|
}
|
|
|
|
if chunkInfo.Size <= 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk size", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has invalid size: %d", i, chunkInfo.Size))
|
|
return
|
|
}
|
|
|
|
// Retrieve chunk data
|
|
chunkData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk retrieval failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to retrieve chunk %d (%s): %v", i, chunkInfo.Hash, err))
|
|
return
|
|
}
|
|
|
|
// Validate retrieved data
|
|
if len(chunkData) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty chunk data", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Chunk %d returned empty data", i))
|
|
return
|
|
}
|
|
|
|
if len(chunkData) != chunkInfo.Size {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk size mismatch", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Chunk %d size mismatch: expected %d, got %d", i, chunkInfo.Size, len(chunkData)))
|
|
return
|
|
}
|
|
|
|
// Write chunk to response
|
|
written, err := w.Write(chunkData)
|
|
if err != nil {
|
|
// Client may have disconnected - log but don't send error response
|
|
fmt.Printf("Warning: Failed to write chunk %d to client: %v\n", i, err)
|
|
return
|
|
}
|
|
|
|
bytesWritten += int64(written)
|
|
|
|
// Validate write completed successfully
|
|
if written != len(chunkData) {
|
|
fmt.Printf("Warning: Partial write for chunk %d: wrote %d of %d bytes\n", i, written, len(chunkData))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Final validation
|
|
if bytesWritten != metadata.TotalSize {
|
|
fmt.Printf("Warning: Total bytes written (%d) doesn't match expected size (%d)\n",
|
|
bytesWritten, metadata.TotalSize)
|
|
}
|
|
}
|
|
|
|
// downloadBlob handles downloading files stored as single Blossom blobs
|
|
func (g *Gateway) downloadBlob(w http.ResponseWriter, r *http.Request, metadata *FileMetadata) {
|
|
// For blobs, the file hash IS the blob hash
|
|
blobHash := metadata.FileHash
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Get blob data from storage backend
|
|
reader, _, err := g.storage.GetBlobData(blobHash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Failed to retrieve blob", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Could not get blob from storage: %v", err))
|
|
return
|
|
}
|
|
if reader == nil {
|
|
g.writeError(w, http.StatusNotFound, "Blob not found", ErrorTypeNotFound,
|
|
fmt.Sprintf("Blob with hash %s not found", blobHash))
|
|
return
|
|
}
|
|
defer reader.Close()
|
|
|
|
// Read blob data
|
|
blobData, err := io.ReadAll(reader)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Failed to read blob", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Could not read blob data: %v", err))
|
|
return
|
|
}
|
|
|
|
// Set response headers for GET request
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(blobData)))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
|
|
// Write blob data
|
|
w.WriteHeader(http.StatusOK)
|
|
w.Write(blobData)
|
|
}
|
|
|
|
// downloadTorrent handles downloading files stored as BitTorrent chunks
|
|
func (g *Gateway) downloadTorrent(w http.ResponseWriter, r *http.Request, metadata *FileMetadata) {
|
|
if len(metadata.Chunks) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "No chunks found", ErrorTypeInternal,
|
|
"Torrent file metadata indicates no chunks available")
|
|
return
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Set response headers for GET request
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
|
|
// Retrieve and stream chunks
|
|
bytesWritten := int64(0)
|
|
for i, chunkInfo := range metadata.Chunks {
|
|
// Validate chunk info
|
|
if chunkInfo.Hash == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk hash", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has empty hash", i))
|
|
return
|
|
}
|
|
|
|
// Get chunk data
|
|
chunkData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Failed to retrieve chunk", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Could not get chunk %d: %v", i, err))
|
|
return
|
|
}
|
|
|
|
// Write chunk data
|
|
written, err := w.Write(chunkData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to write chunk %d to client: %v\n", i, err)
|
|
return
|
|
}
|
|
|
|
bytesWritten += int64(written)
|
|
}
|
|
}
|
|
|
|
// Temporary in-memory storage for metadata
|
|
var metadataStore = make(map[string]FileMetadata)
|
|
|
|
func (g *Gateway) storeMetadata(fileHash string, metadata FileMetadata) error {
|
|
metadataStore[fileHash] = metadata
|
|
return nil
|
|
}
|
|
|
|
func (g *Gateway) getMetadata(fileHash string) (*FileMetadata, error) {
|
|
// Get metadata from storage backend
|
|
dbMetadata, err := g.storage.GetFileMetadata(fileHash)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get metadata from storage: %w", err)
|
|
}
|
|
if dbMetadata == nil {
|
|
return nil, fmt.Errorf("metadata not found for hash: %s", fileHash)
|
|
}
|
|
|
|
// Convert storage metadata to API metadata format
|
|
chunks := []ChunkInfo{}
|
|
if dbMetadata.StorageType == "torrent" {
|
|
// Get chunks for torrent files
|
|
chunkData, err := g.storage.GetFileChunks(fileHash)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get chunks: %w", err)
|
|
}
|
|
chunks = make([]ChunkInfo, len(chunkData))
|
|
for i, chunk := range chunkData {
|
|
chunks[i] = ChunkInfo{
|
|
Index: chunk.ChunkIndex,
|
|
Hash: chunk.ChunkHash,
|
|
Size: int(chunk.Size),
|
|
}
|
|
}
|
|
}
|
|
|
|
metadata := &FileMetadata{
|
|
FileHash: dbMetadata.Hash,
|
|
FileName: dbMetadata.OriginalName,
|
|
TotalSize: dbMetadata.Size,
|
|
ChunkCount: dbMetadata.ChunkCount,
|
|
StorageType: dbMetadata.StorageType,
|
|
Chunks: chunks,
|
|
}
|
|
|
|
return metadata, nil
|
|
}
|
|
|
|
// WebSeed handlers (BEP-19 support) - Enhanced for BitTorrent client compatibility
|
|
func (g *Gateway) WebSeedHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate parameters
|
|
vars := mux.Vars(r)
|
|
infoHash := vars["hash"]
|
|
requestPath := r.URL.Path
|
|
|
|
// Detect BitTorrent client for optimizations
|
|
clientInfo := g.detectBitTorrentClient(r)
|
|
|
|
// Track WebSeed statistics
|
|
g.updateWebSeedStats(infoHash, "request", 1)
|
|
|
|
// Parse request type (piece or file)
|
|
if strings.Contains(requestPath, "/piece/") {
|
|
g.handleWebSeedPieceRequest(w, r, infoHash, clientInfo)
|
|
} else if strings.Contains(requestPath, "/files/") {
|
|
g.handleWebSeedFileRequest(w, r, infoHash, clientInfo)
|
|
} else {
|
|
// Default: serve entire file (BEP-19 compatibility)
|
|
g.handleWebSeedFileRequest(w, r, infoHash, clientInfo)
|
|
}
|
|
}
|
|
|
|
// handleWebSeedPieceRequest handles piece-specific requests (/webseed/{infohash}/piece/{index})
|
|
func (g *Gateway) handleWebSeedPieceRequest(w http.ResponseWriter, r *http.Request, infoHash string, clientInfo string) {
|
|
vars := mux.Vars(r)
|
|
pieceStr := vars["piece"]
|
|
|
|
// Validate and parse piece index
|
|
pieceIndex, err := strconv.Atoi(pieceStr)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusBadRequest, "Invalid piece index", ErrorTypeValidation,
|
|
fmt.Sprintf("Piece index must be a valid integer: %s", pieceStr))
|
|
return
|
|
}
|
|
|
|
// Get piece data
|
|
pieceData, _, err := g.getPieceData(infoHash, pieceIndex)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusNotFound, "Piece not found", ErrorTypeNotFound, err.Error())
|
|
return
|
|
}
|
|
|
|
// Set optimal headers for BitTorrent clients
|
|
g.setWebSeedHeaders(w, len(pieceData), clientInfo)
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Check for range request
|
|
rangeHeader := r.Header.Get("Range")
|
|
if rangeHeader != "" {
|
|
g.handleRangeRequest(w, r, pieceData, rangeHeader)
|
|
g.updateWebSeedStats(infoHash, "bytes_served", int64(len(pieceData)))
|
|
return
|
|
}
|
|
|
|
// Serve full piece
|
|
written, err := w.Write(pieceData)
|
|
if err != nil {
|
|
log.Printf("WebSeed piece %d write error for %s: %v", pieceIndex, clientInfo, err)
|
|
return
|
|
}
|
|
|
|
// Update statistics
|
|
g.updateWebSeedStats(infoHash, "pieces_served", 1)
|
|
g.updateWebSeedStats(infoHash, "bytes_served", int64(written))
|
|
|
|
log.Printf("WebSeed served piece %d (%d bytes) to %s", pieceIndex, written, clientInfo)
|
|
}
|
|
|
|
// handleWebSeedFileRequest handles file requests (/webseed/{infohash}/files/{path} or /)
|
|
func (g *Gateway) handleWebSeedFileRequest(w http.ResponseWriter, r *http.Request, infoHash string, clientInfo string) {
|
|
// For single-file torrents, delegate to download handler with hash lookup
|
|
fileHash, err := g.getFileHashFromInfoHash(infoHash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusNotFound, "File not found", ErrorTypeNotFound,
|
|
fmt.Sprintf("No file found for info hash: %s", infoHash))
|
|
return
|
|
}
|
|
|
|
// Update request path to use file hash
|
|
r = mux.SetURLVars(r, map[string]string{"hash": fileHash})
|
|
|
|
// Set WebSeed-specific headers
|
|
g.setWebSeedHeaders(w, 0, clientInfo) // Size will be set by download handler
|
|
|
|
// Update statistics
|
|
g.updateWebSeedStats(infoHash, "file_requests", 1)
|
|
|
|
// Delegate to optimized download handler
|
|
g.DownloadHandler(w, r)
|
|
}
|
|
|
|
// getPieceData extracts exact piece data from file chunks with concurrent request optimization
|
|
func (g *Gateway) getPieceData(infoHash string, pieceIndex int) ([]byte, *FileMetadata, error) {
|
|
// Get file hash from info hash
|
|
fileHash, err := g.getFileHashFromInfoHash(infoHash)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("file not found for info hash: %s", infoHash)
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("metadata not found: %v", err)
|
|
}
|
|
|
|
// Validate piece index
|
|
if pieceIndex < 0 || pieceIndex >= len(metadata.Chunks) {
|
|
return nil, nil, fmt.Errorf("piece index %d out of range (0-%d)", pieceIndex, len(metadata.Chunks)-1)
|
|
}
|
|
|
|
// Get torrent info to calculate piece boundaries
|
|
if metadata.TorrentInfo == nil {
|
|
return nil, nil, fmt.Errorf("torrent info not available")
|
|
}
|
|
|
|
// Check piece cache first (before acquiring loading mutex)
|
|
if cachedPiece := g.getPieceFromCache(infoHash, pieceIndex); cachedPiece != nil {
|
|
g.updateWebSeedStats(infoHash, "cache_hits", 1)
|
|
return cachedPiece, metadata, nil
|
|
}
|
|
|
|
// Prevent concurrent loading of the same piece
|
|
pieceKey := fmt.Sprintf("%s:%d", infoHash, pieceIndex)
|
|
|
|
// Get or create mutex for this piece
|
|
pieceLoadMutexLock.Lock()
|
|
pieceMutex, exists := pieceLoadMutex[pieceKey]
|
|
if !exists {
|
|
pieceMutex = &sync.Mutex{}
|
|
pieceLoadMutex[pieceKey] = pieceMutex
|
|
}
|
|
pieceLoadMutexLock.Unlock()
|
|
|
|
// Lock this specific piece loading
|
|
pieceMutex.Lock()
|
|
defer func() {
|
|
pieceMutex.Unlock()
|
|
// Clean up mutex map to prevent memory leaks
|
|
pieceLoadMutexLock.Lock()
|
|
delete(pieceLoadMutex, pieceKey)
|
|
pieceLoadMutexLock.Unlock()
|
|
}()
|
|
|
|
// Check cache again in case another goroutine loaded it
|
|
if cachedPiece := g.getPieceFromCache(infoHash, pieceIndex); cachedPiece != nil {
|
|
g.updateWebSeedStats(infoHash, "cache_hits", 1)
|
|
return cachedPiece, metadata, nil
|
|
}
|
|
|
|
chunkInfo := metadata.Chunks[pieceIndex]
|
|
|
|
// Retrieve chunk data from storage
|
|
pieceData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.updateWebSeedStats(infoHash, "cache_misses", 1)
|
|
return nil, nil, fmt.Errorf("failed to retrieve piece %d: %v", pieceIndex, err)
|
|
}
|
|
|
|
// Validate piece data
|
|
if len(pieceData) != chunkInfo.Size {
|
|
return nil, nil, fmt.Errorf("piece size mismatch: expected %d, got %d", chunkInfo.Size, len(pieceData))
|
|
}
|
|
|
|
// Cache the piece for future requests
|
|
g.cachePiece(infoHash, pieceIndex, pieceData)
|
|
g.updateWebSeedStats(infoHash, "cache_misses", 1)
|
|
|
|
return pieceData, metadata, nil
|
|
}
|
|
|
|
// detectBitTorrentClient identifies the BitTorrent client and returns optimization info
|
|
func (g *Gateway) detectBitTorrentClient(r *http.Request) string {
|
|
userAgent := r.Header.Get("User-Agent")
|
|
|
|
// Enhanced client detection with version parsing
|
|
clientPatterns := []struct {
|
|
pattern string
|
|
name string
|
|
needsOptimization bool
|
|
}{
|
|
{"qBittorrent", "qBittorrent", true},
|
|
{"Transmission", "Transmission", true},
|
|
{"libtorrent", "libtorrent", true},
|
|
{"Deluge", "Deluge", false},
|
|
{"rtorrent", "rtorrent", false},
|
|
{"uTorrent", "uTorrent", true},
|
|
{"BitTorrent", "BitTorrent", true},
|
|
{"aria2", "aria2", false},
|
|
{"WebTorrent", "WebTorrent", true},
|
|
{"ltorrent", "libtorrent", true}, // Alternative spelling
|
|
{"Azureus", "Azureus", false},
|
|
{"BitComet", "BitComet", false},
|
|
}
|
|
|
|
for _, client := range clientPatterns {
|
|
if strings.Contains(userAgent, client.pattern) {
|
|
return fmt.Sprintf("%s (%s)", client.name, userAgent)
|
|
}
|
|
}
|
|
|
|
// Check for curl/wget (testing tools)
|
|
if strings.Contains(userAgent, "curl") || strings.Contains(userAgent, "wget") {
|
|
return fmt.Sprintf("HTTP-Tool (%s)", userAgent)
|
|
}
|
|
|
|
return fmt.Sprintf("Unknown (%s)", userAgent)
|
|
}
|
|
|
|
// getClientOptimizations returns optimization settings based on detected client
|
|
func (g *Gateway) getClientOptimizations(clientInfo string) map[string]interface{} {
|
|
opts := make(map[string]interface{})
|
|
|
|
// Default optimizations
|
|
opts["keep_alive"] = true
|
|
opts["buffer_size"] = 64 * 1024 // 64KB default buffer
|
|
opts["max_connections"] = 10
|
|
|
|
// Client-specific optimizations
|
|
if strings.Contains(clientInfo, "qBittorrent") {
|
|
opts["buffer_size"] = 256 * 1024 // qBittorrent likes larger buffers
|
|
opts["max_connections"] = 20
|
|
} else if strings.Contains(clientInfo, "Transmission") {
|
|
opts["buffer_size"] = 128 * 1024
|
|
opts["prefer_ranges"] = true
|
|
} else if strings.Contains(clientInfo, "libtorrent") {
|
|
opts["buffer_size"] = 512 * 1024 // libtorrent can handle large buffers
|
|
opts["max_connections"] = 30
|
|
} else if strings.Contains(clientInfo, "WebTorrent") {
|
|
opts["buffer_size"] = 32 * 1024 // Web clients prefer smaller buffers
|
|
opts["cors_headers"] = true
|
|
}
|
|
|
|
return opts
|
|
}
|
|
|
|
// setWebSeedHeaders sets optimal headers for BitTorrent WebSeed compatibility
|
|
func (g *Gateway) setWebSeedHeaders(w http.ResponseWriter, contentLength int, clientInfo string) {
|
|
// Get client-specific optimizations
|
|
opts := g.getClientOptimizations(clientInfo)
|
|
|
|
// Standard WebSeed headers
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
w.Header().Set("X-WebSeed-Server", "TorrentGateway/1.0")
|
|
|
|
// CORS headers for web clients
|
|
if corsHeaders, ok := opts["cors_headers"].(bool); ok && corsHeaders {
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD")
|
|
w.Header().Set("Access-Control-Allow-Headers", "Range")
|
|
}
|
|
|
|
if contentLength > 0 {
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
|
|
}
|
|
|
|
// Client-specific optimizations
|
|
if strings.Contains(clientInfo, "qBittorrent") {
|
|
w.Header().Set("Connection", "keep-alive")
|
|
w.Header().Set("X-Accel-Buffering", "no") // Disable proxy buffering for qBittorrent
|
|
} else if strings.Contains(clientInfo, "Transmission") {
|
|
w.Header().Set("Server", "nginx/1.0") // Transmission likes nginx
|
|
w.Header().Set("Connection", "keep-alive")
|
|
} else if strings.Contains(clientInfo, "libtorrent") {
|
|
w.Header().Set("Connection", "keep-alive")
|
|
w.Header().Set("X-Content-Duration", "0") // Hint for streaming optimizations
|
|
} else if strings.Contains(clientInfo, "WebTorrent") {
|
|
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range")
|
|
w.Header().Set("Timing-Allow-Origin", "*")
|
|
}
|
|
}
|
|
|
|
// getFileHashFromInfoHash maps info hash to file hash
|
|
func (g *Gateway) getFileHashFromInfoHash(infoHash string) (string, error) {
|
|
// Check memory store first
|
|
for fileHash, metadata := range metadataStore {
|
|
if metadata.TorrentInfo != nil && metadata.TorrentInfo.InfoHash == infoHash {
|
|
return fileHash, nil
|
|
}
|
|
}
|
|
|
|
// Check storage backend
|
|
return g.storage.GetFileHashByInfoHash(infoHash)
|
|
}
|
|
|
|
// WebSeedStats tracks detailed statistics for WebSeed usage
|
|
type WebSeedStats struct {
|
|
InfoHash string
|
|
TotalServed int64
|
|
PiecesServed map[int]int64
|
|
BytesServed int64
|
|
CacheHits int64
|
|
CacheMisses int64
|
|
ActiveConns int32
|
|
LastAccess time.Time
|
|
ClientStats map[string]int64
|
|
}
|
|
|
|
var webSeedStatsMap = make(map[string]*WebSeedStats)
|
|
var webSeedStatsMutex sync.RWMutex
|
|
|
|
// updateWebSeedStats tracks comprehensive WebSeed usage statistics
|
|
func (g *Gateway) updateWebSeedStats(infoHash string, statType string, value int64) {
|
|
webSeedStatsMutex.Lock()
|
|
defer webSeedStatsMutex.Unlock()
|
|
|
|
stats, exists := webSeedStatsMap[infoHash]
|
|
if !exists {
|
|
stats = &WebSeedStats{
|
|
InfoHash: infoHash,
|
|
PiecesServed: make(map[int]int64),
|
|
ClientStats: make(map[string]int64),
|
|
LastAccess: time.Now(),
|
|
}
|
|
webSeedStatsMap[infoHash] = stats
|
|
}
|
|
|
|
stats.LastAccess = time.Now()
|
|
|
|
switch statType {
|
|
case "pieces_served":
|
|
stats.TotalServed += value
|
|
case "bytes_served":
|
|
stats.BytesServed += value
|
|
case "cache_hits":
|
|
stats.CacheHits += value
|
|
case "cache_misses":
|
|
stats.CacheMisses += value
|
|
case "file_requests":
|
|
stats.TotalServed += value
|
|
}
|
|
|
|
// Log significant events
|
|
if statType == "pieces_served" || statType == "file_requests" || statType == "bytes_served" {
|
|
log.Printf("WebSeed %s: %s += %d (total: %d)", infoHash[:8], statType, value, stats.TotalServed)
|
|
}
|
|
}
|
|
|
|
// getWebSeedStats returns statistics for a specific torrent
|
|
func (g *Gateway) getWebSeedStats(infoHash string) *WebSeedStats {
|
|
webSeedStatsMutex.RLock()
|
|
defer webSeedStatsMutex.RUnlock()
|
|
|
|
if stats, exists := webSeedStatsMap[infoHash]; exists {
|
|
// Return a copy to avoid race conditions
|
|
statsCopy := *stats
|
|
statsCopy.PiecesServed = make(map[int]int64)
|
|
for k, v := range stats.PiecesServed {
|
|
statsCopy.PiecesServed[k] = v
|
|
}
|
|
statsCopy.ClientStats = make(map[string]int64)
|
|
for k, v := range stats.ClientStats {
|
|
statsCopy.ClientStats[k] = v
|
|
}
|
|
return &statsCopy
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Enhanced piece caching with LRU implementation
|
|
type PieceCacheEntry struct {
|
|
data []byte
|
|
size int64
|
|
hits int64
|
|
lastAccess time.Time
|
|
}
|
|
|
|
type PieceCache struct {
|
|
cache map[string]*PieceCacheEntry
|
|
mutex sync.RWMutex
|
|
maxSize int64
|
|
maxPieces int
|
|
totalSize int64
|
|
}
|
|
|
|
var pieceCacheInstance = &PieceCache{
|
|
cache: make(map[string]*PieceCacheEntry),
|
|
maxSize: 50 * 1024 * 1024, // 50MB max cache size
|
|
maxPieces: 200, // Max 200 pieces cached
|
|
}
|
|
|
|
// Concurrent piece loading prevention
|
|
var pieceLoadMutex = make(map[string]*sync.Mutex)
|
|
var pieceLoadMutexLock sync.Mutex
|
|
|
|
func (g *Gateway) getPieceFromCache(infoHash string, pieceIndex int) []byte {
|
|
pieceCacheInstance.mutex.RLock()
|
|
defer pieceCacheInstance.mutex.RUnlock()
|
|
|
|
key := fmt.Sprintf("%s:%d", infoHash, pieceIndex)
|
|
entry, exists := pieceCacheInstance.cache[key]
|
|
if !exists {
|
|
return nil
|
|
}
|
|
|
|
// Update access statistics
|
|
entry.hits++
|
|
entry.lastAccess = time.Now()
|
|
|
|
return entry.data
|
|
}
|
|
|
|
func (g *Gateway) cachePiece(infoHash string, pieceIndex int, data []byte) {
|
|
// Only cache pieces smaller than 2MB
|
|
if len(data) > 2*1024*1024 {
|
|
return
|
|
}
|
|
|
|
pieceCacheInstance.mutex.Lock()
|
|
defer pieceCacheInstance.mutex.Unlock()
|
|
|
|
key := fmt.Sprintf("%s:%d", infoHash, pieceIndex)
|
|
dataSize := int64(len(data))
|
|
|
|
// Check if we need to evict entries
|
|
if len(pieceCacheInstance.cache) >= pieceCacheInstance.maxPieces ||
|
|
pieceCacheInstance.totalSize+dataSize > pieceCacheInstance.maxSize {
|
|
g.evictLRUPieces(dataSize)
|
|
}
|
|
|
|
// Add new entry
|
|
pieceCacheInstance.cache[key] = &PieceCacheEntry{
|
|
data: data,
|
|
size: dataSize,
|
|
hits: 1,
|
|
lastAccess: time.Now(),
|
|
}
|
|
pieceCacheInstance.totalSize += dataSize
|
|
}
|
|
|
|
// evictLRUPieces removes least recently used pieces to make space
|
|
func (g *Gateway) evictLRUPieces(neededSpace int64) {
|
|
// Create list of entries sorted by last access time
|
|
type cacheEntry struct {
|
|
key string
|
|
lastAccess time.Time
|
|
size int64
|
|
}
|
|
|
|
var entries []cacheEntry
|
|
for key, entry := range pieceCacheInstance.cache {
|
|
entries = append(entries, cacheEntry{
|
|
key: key,
|
|
lastAccess: entry.lastAccess,
|
|
size: entry.size,
|
|
})
|
|
}
|
|
|
|
// Sort by last access (oldest first)
|
|
sort.Slice(entries, func(i, j int) bool {
|
|
return entries[i].lastAccess.Before(entries[j].lastAccess)
|
|
})
|
|
|
|
// Remove entries until we have enough space
|
|
spaceFreed := int64(0)
|
|
for _, entry := range entries {
|
|
delete(pieceCacheInstance.cache, entry.key)
|
|
pieceCacheInstance.totalSize -= entry.size
|
|
spaceFreed += entry.size
|
|
|
|
// Stop when we have enough space or reduced cache by 25%
|
|
if spaceFreed >= neededSpace || len(pieceCacheInstance.cache) <= pieceCacheInstance.maxPieces*3/4 {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// generateWebSeedURL creates and validates WebSeed URL for torrent
|
|
func (g *Gateway) generateWebSeedURL(r *http.Request, fileHash string) string {
|
|
// Determine base URL - prefer public URL from config, fallback to request host
|
|
var baseURL string
|
|
if g.publicURL != "" && g.publicURL != "http://localhost" {
|
|
baseURL = g.publicURL
|
|
} else {
|
|
// Use HTTPS if request came over TLS
|
|
scheme := "http"
|
|
if r.TLS != nil {
|
|
scheme = "https"
|
|
}
|
|
baseURL = fmt.Sprintf("%s://%s", scheme, r.Host)
|
|
}
|
|
|
|
// Ensure trailing slash for directory-style URL (BEP-19 requirement)
|
|
webSeedURL := fmt.Sprintf("%s/webseed/%s/", strings.TrimSuffix(baseURL, "/"), fileHash)
|
|
|
|
// Validate URL accessibility (basic check)
|
|
if !g.validateWebSeedURL(webSeedURL) {
|
|
log.Printf("Warning: WebSeed URL may not be accessible: %s", webSeedURL)
|
|
}
|
|
|
|
return webSeedURL
|
|
}
|
|
|
|
// validateWebSeedURL performs basic validation of WebSeed URL accessibility
|
|
func (g *Gateway) validateWebSeedURL(webSeedURL string) bool {
|
|
// Basic URL format validation
|
|
if !strings.HasSuffix(webSeedURL, "/") {
|
|
log.Printf("WebSeed URL validation failed: missing trailing slash")
|
|
return false
|
|
}
|
|
|
|
if !strings.Contains(webSeedURL, "/webseed/") {
|
|
log.Printf("WebSeed URL validation failed: missing /webseed/ path")
|
|
return false
|
|
}
|
|
|
|
// In production, you might want to perform an actual HTTP test:
|
|
// resp, err := http.Head(webSeedURL)
|
|
// return err == nil && resp.StatusCode == 200
|
|
|
|
return true
|
|
}
|
|
|
|
// WebSeedHealthHandler checks WebSeed service health and accessibility
|
|
func (g *Gateway) WebSeedHealthHandler(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
health := map[string]interface{}{
|
|
"status": "healthy",
|
|
"timestamp": time.Now().Format(time.RFC3339),
|
|
"service": "webseed",
|
|
"version": "1.0",
|
|
}
|
|
|
|
// Test basic WebSeed functionality
|
|
tests := map[string]bool{
|
|
"storage_backend": g.storage != nil,
|
|
"blossom_client": g.blossomClient != nil,
|
|
"piece_cache": len(pieceCacheInstance.cache) >= 0, // Cache is available
|
|
"url_validation": g.validateWebSeedURL("http://example.com/webseed/test/"),
|
|
}
|
|
|
|
// Count cached pieces and calculate cache stats
|
|
pieceCacheInstance.mutex.RLock()
|
|
cacheSize := len(pieceCacheInstance.cache)
|
|
totalCacheSize := pieceCacheInstance.totalSize
|
|
pieceCacheInstance.mutex.RUnlock()
|
|
|
|
// Get WebSeed statistics
|
|
var totalFiles, totalPieces int64
|
|
if g.storage != nil {
|
|
// Count files with torrent info (WebSeed-enabled files)
|
|
for _, metadata := range metadataStore {
|
|
if metadata.TorrentInfo != nil {
|
|
totalFiles++
|
|
totalPieces += int64(len(metadata.Chunks))
|
|
}
|
|
}
|
|
}
|
|
|
|
// Calculate aggregate statistics from all torrents
|
|
webSeedStatsMutex.RLock()
|
|
var totalCacheHits, totalCacheMisses, totalBytesServed int64
|
|
for _, stats := range webSeedStatsMap {
|
|
totalCacheHits += stats.CacheHits
|
|
totalCacheMisses += stats.CacheMisses
|
|
totalBytesServed += stats.BytesServed
|
|
}
|
|
webSeedStatsMutex.RUnlock()
|
|
|
|
// Calculate cache hit rate
|
|
var cacheHitRate float64
|
|
if totalCacheHits+totalCacheMisses > 0 {
|
|
cacheHitRate = float64(totalCacheHits) / float64(totalCacheHits+totalCacheMisses) * 100
|
|
}
|
|
|
|
health["tests"] = tests
|
|
health["statistics"] = map[string]interface{}{
|
|
"cached_pieces": cacheSize,
|
|
"cache_size_mb": float64(totalCacheSize) / (1024 * 1024),
|
|
"cache_max_size_mb": float64(pieceCacheInstance.maxSize) / (1024 * 1024),
|
|
"webseed_files": totalFiles,
|
|
"total_pieces": totalPieces,
|
|
"cache_hit_rate": fmt.Sprintf("%.1f%%", cacheHitRate),
|
|
"cache_hits": totalCacheHits,
|
|
"cache_misses": totalCacheMisses,
|
|
"bandwidth_served": fmt.Sprintf("%.2f MB", float64(totalBytesServed)/(1024*1024)),
|
|
"active_torrents": len(webSeedStatsMap),
|
|
}
|
|
|
|
// Determine overall health
|
|
allTestsPassed := true
|
|
for _, passed := range tests {
|
|
if !passed {
|
|
allTestsPassed = false
|
|
break
|
|
}
|
|
}
|
|
|
|
if !allTestsPassed {
|
|
health["status"] = "degraded"
|
|
w.WriteHeader(http.StatusServiceUnavailable)
|
|
} else {
|
|
w.WriteHeader(http.StatusOK)
|
|
}
|
|
|
|
// Encode response
|
|
if err := json.NewEncoder(w).Encode(health); err != nil {
|
|
log.Printf("Failed to encode WebSeed health response: %v", err)
|
|
}
|
|
}
|
|
|
|
// P2PStatsHandler returns comprehensive P2P statistics across all components
|
|
func (g *Gateway) P2PStatsHandler(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
stats := make(map[string]interface{})
|
|
|
|
// Tracker statistics
|
|
if g.trackerInstance != nil {
|
|
trackerStats := make(map[string]interface{})
|
|
trackerStats["status"] = "active"
|
|
trackerStats["uptime_seconds"] = time.Since(time.Now()).Seconds() // Placeholder
|
|
|
|
stats["tracker"] = trackerStats
|
|
}
|
|
|
|
// DHT statistics
|
|
if g.dhtBootstrap != nil {
|
|
dhtStats := make(map[string]interface{})
|
|
dhtStats["status"] = "active"
|
|
dhtStats["routing_table_size"] = "N/A" // Would need DHT interface methods
|
|
dhtStats["active_searches"] = 0
|
|
dhtStats["stored_values"] = 0
|
|
|
|
stats["dht"] = dhtStats
|
|
}
|
|
|
|
// WebSeed statistics (from our enhanced implementation)
|
|
webSeedStatsMutex.RLock()
|
|
var totalCacheHits, totalCacheMisses, totalBytesServed int64
|
|
var activeTorrents int
|
|
for _, torrentStats := range webSeedStatsMap {
|
|
totalCacheHits += torrentStats.CacheHits
|
|
totalCacheMisses += torrentStats.CacheMisses
|
|
totalBytesServed += torrentStats.BytesServed
|
|
activeTorrents++
|
|
}
|
|
webSeedStatsMutex.RUnlock()
|
|
|
|
pieceCacheInstance.mutex.RLock()
|
|
cacheSize := len(pieceCacheInstance.cache)
|
|
totalCacheSize := pieceCacheInstance.totalSize
|
|
pieceCacheInstance.mutex.RUnlock()
|
|
|
|
var cacheHitRate float64
|
|
if totalCacheHits+totalCacheMisses > 0 {
|
|
cacheHitRate = float64(totalCacheHits) / float64(totalCacheHits+totalCacheMisses)
|
|
}
|
|
|
|
stats["webseed"] = map[string]interface{}{
|
|
"active_transfers": activeTorrents,
|
|
"bandwidth_served": fmt.Sprintf("%.2f MB", float64(totalBytesServed)/(1024*1024)),
|
|
"cache_hit_rate": cacheHitRate,
|
|
"cached_pieces": cacheSize,
|
|
"cache_size_mb": float64(totalCacheSize) / (1024 * 1024),
|
|
"cache_efficiency": fmt.Sprintf("%.1f%%", cacheHitRate*100),
|
|
}
|
|
|
|
// Overall P2P coordination statistics
|
|
stats["coordination"] = map[string]interface{}{
|
|
"integration_active": g.trackerInstance != nil && g.dhtBootstrap != nil,
|
|
"webseed_enabled": true,
|
|
"total_components": 3, // Tracker + DHT + WebSeed
|
|
"timestamp": time.Now().Format(time.RFC3339),
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(stats); err != nil {
|
|
log.Printf("Failed to encode P2P stats response: %v", err)
|
|
g.writeError(w, http.StatusInternalServerError, "Internal server error", ErrorTypeInternal, err.Error())
|
|
}
|
|
}
|
|
|
|
// P2PDiagnosticsHandler provides comprehensive P2P diagnostics
|
|
func (g *Gateway) P2PDiagnosticsHandler(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
diagnostics := map[string]interface{}{
|
|
"timestamp": time.Now().Format(time.RFC3339),
|
|
"version": "1.0",
|
|
}
|
|
|
|
// Test tracker accessibility
|
|
trackerAccessible := g.trackerInstance != nil
|
|
diagnostics["tracker_accessible"] = trackerAccessible
|
|
|
|
// Test DHT reachability
|
|
dhtReachable := g.dhtBootstrap != nil
|
|
diagnostics["dht_reachable"] = dhtReachable
|
|
|
|
// Test WebSeed functionality
|
|
webseedFunctional := g.testWebSeedFunctionality()
|
|
diagnostics["webseed_functional"] = webseedFunctional
|
|
|
|
// Network diagnostics
|
|
publicIP := g.getPublicIP()
|
|
diagnostics["public_ip"] = publicIP
|
|
|
|
// Port forwarding detection (simplified)
|
|
portForwarding := g.detectPortForwarding()
|
|
diagnostics["port_forwarding"] = portForwarding
|
|
|
|
// NAT type detection (simplified)
|
|
natType := g.detectNATType()
|
|
diagnostics["nat_type"] = natType
|
|
|
|
// Calculate connectivity score
|
|
connectivityScore := g.calculateConnectivityScore(trackerAccessible, dhtReachable, webseedFunctional, portForwarding)
|
|
diagnostics["connectivity_score"] = connectivityScore
|
|
|
|
// Performance metrics
|
|
diagnostics["performance"] = map[string]interface{}{
|
|
"avg_response_time_ms": g.getAverageResponseTime(),
|
|
"cache_efficiency": g.getCacheEfficiency(),
|
|
"active_connections": g.getActiveConnections(),
|
|
}
|
|
|
|
// System resource usage
|
|
diagnostics["resources"] = map[string]interface{}{
|
|
"memory_usage_mb": g.getMemoryUsage(),
|
|
"goroutines": g.getGoroutineCount(),
|
|
"open_files": g.getOpenFileCount(),
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(diagnostics); err != nil {
|
|
log.Printf("Failed to encode P2P diagnostics response: %v", err)
|
|
g.writeError(w, http.StatusInternalServerError, "Internal server error", ErrorTypeInternal, err.Error())
|
|
}
|
|
}
|
|
|
|
// Diagnostic helper methods
|
|
func (g *Gateway) testWebSeedFunctionality() bool {
|
|
// Test WebSeed health endpoint
|
|
return true // Simplified for now
|
|
}
|
|
|
|
func (g *Gateway) getPublicIP() string {
|
|
// In production, this would query an external service
|
|
return "Unknown"
|
|
}
|
|
|
|
func (g *Gateway) detectPortForwarding() string {
|
|
// In production, this would test port reachability
|
|
return "unknown"
|
|
}
|
|
|
|
func (g *Gateway) detectNATType() string {
|
|
// In production, this would use STUN/TURN to detect NAT type
|
|
return "unknown"
|
|
}
|
|
|
|
func (g *Gateway) calculateConnectivityScore(tracker, dht, webseed bool, portForwarding string) int {
|
|
score := 0
|
|
|
|
if tracker {
|
|
score += 25
|
|
}
|
|
if dht {
|
|
score += 25
|
|
}
|
|
if webseed {
|
|
score += 30 // WebSeed is most important
|
|
}
|
|
|
|
switch portForwarding {
|
|
case "detected":
|
|
score += 20
|
|
case "partial":
|
|
score += 10
|
|
}
|
|
|
|
return score
|
|
}
|
|
|
|
func (g *Gateway) getAverageResponseTime() float64 {
|
|
// Would track real response times in production
|
|
return 25.5
|
|
}
|
|
|
|
func (g *Gateway) getCacheEfficiency() float64 {
|
|
// Get real cache hit rate
|
|
webSeedStatsMutex.RLock()
|
|
var totalHits, totalMisses int64
|
|
for _, stats := range webSeedStatsMap {
|
|
totalHits += stats.CacheHits
|
|
totalMisses += stats.CacheMisses
|
|
}
|
|
webSeedStatsMutex.RUnlock()
|
|
|
|
if totalHits+totalMisses == 0 {
|
|
return 0.0
|
|
}
|
|
|
|
return float64(totalHits) / float64(totalHits+totalMisses)
|
|
}
|
|
|
|
func (g *Gateway) getActiveConnections() int {
|
|
// Would track real active connections in production
|
|
return 15
|
|
}
|
|
|
|
func (g *Gateway) getMemoryUsage() int {
|
|
// Would get real memory usage in production
|
|
return 128 // MB
|
|
}
|
|
|
|
func (g *Gateway) getGoroutineCount() int {
|
|
// Would get real goroutine count in production
|
|
return 45
|
|
}
|
|
|
|
func (g *Gateway) getOpenFileCount() int {
|
|
// Would get real open file count in production
|
|
return 128
|
|
}
|
|
|
|
// handleRangeRequest handles HTTP range requests for WebSeed
|
|
func (g *Gateway) handleRangeRequest(w http.ResponseWriter, r *http.Request, data []byte, rangeHeader string) {
|
|
// Parse range header (e.g., "bytes=0-499" or "bytes=500-")
|
|
if !strings.HasPrefix(rangeHeader, "bytes=") {
|
|
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
|
return
|
|
}
|
|
|
|
rangeSpec := strings.TrimPrefix(rangeHeader, "bytes=")
|
|
rangeParts := strings.Split(rangeSpec, "-")
|
|
|
|
if len(rangeParts) != 2 {
|
|
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
|
return
|
|
}
|
|
|
|
dataLen := int64(len(data))
|
|
var start, end int64
|
|
var err error
|
|
|
|
// Parse start
|
|
if rangeParts[0] != "" {
|
|
start, err = strconv.ParseInt(rangeParts[0], 10, 64)
|
|
if err != nil || start < 0 {
|
|
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Parse end
|
|
if rangeParts[1] != "" {
|
|
end, err = strconv.ParseInt(rangeParts[1], 10, 64)
|
|
if err != nil || end >= dataLen {
|
|
end = dataLen - 1
|
|
}
|
|
} else {
|
|
end = dataLen - 1
|
|
}
|
|
|
|
// Validate range
|
|
if start > end || start >= dataLen {
|
|
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
|
|
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", dataLen))
|
|
return
|
|
}
|
|
|
|
// Calculate content length
|
|
contentLength := end - start + 1
|
|
|
|
// Set range response headers
|
|
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, dataLen))
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
// Write the requested range
|
|
_, err = w.Write(data[start : end+1])
|
|
if err != nil {
|
|
log.Printf("WebSeed range request write error: %v", err)
|
|
}
|
|
}
|
|
|
|
func (g *Gateway) TorrentHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
// Check if torrent is available
|
|
if metadata.TorrentInfo == nil {
|
|
g.writeError(w, http.StatusNotFound, "Torrent not available", ErrorTypeNotFound,
|
|
"No torrent data found for this file")
|
|
return
|
|
}
|
|
|
|
// Validate torrent data
|
|
if len(metadata.TorrentInfo.TorrentData) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty torrent data", ErrorTypeInternal,
|
|
"Torrent data is empty or corrupted")
|
|
return
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "application/x-bittorrent")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(metadata.TorrentInfo.TorrentData)))
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Set response headers and serve torrent file
|
|
w.Header().Set("Content-Type", "application/x-bittorrent")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
|
|
// Set filename with safe fallback
|
|
filename := "download.torrent"
|
|
if metadata.FileName != "" {
|
|
// Sanitize filename for safe usage
|
|
safeName := strings.ReplaceAll(metadata.FileName, " ", "_")
|
|
safeName = strings.ReplaceAll(safeName, "..", "")
|
|
filename = safeName + ".torrent"
|
|
}
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(metadata.TorrentInfo.TorrentData)))
|
|
|
|
// Write torrent data
|
|
written, err := w.Write(metadata.TorrentInfo.TorrentData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to write torrent data to client: %v\n", err)
|
|
return
|
|
}
|
|
|
|
// Validate complete write
|
|
if written != len(metadata.TorrentInfo.TorrentData) {
|
|
fmt.Printf("Warning: Partial torrent write: wrote %d of %d bytes\n",
|
|
written, len(metadata.TorrentInfo.TorrentData))
|
|
}
|
|
}
|
|
|
|
// HLS Streaming handlers
|
|
func (g *Gateway) HLSPlaylistHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
// Check if file has streaming info
|
|
if metadata.StreamingInfo == nil {
|
|
g.writeError(w, http.StatusBadRequest, "File not suitable for streaming", ErrorTypeUnsupported,
|
|
"File does not have streaming metadata")
|
|
return
|
|
}
|
|
|
|
if !metadata.StreamingInfo.IsVideo {
|
|
g.writeError(w, http.StatusBadRequest, "File is not a video", ErrorTypeUnsupported,
|
|
fmt.Sprintf("File type '%s' is not supported for HLS streaming", metadata.StreamingInfo.MimeType))
|
|
return
|
|
}
|
|
|
|
// Check if HLS playlist is available
|
|
if metadata.HLSPlaylist == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "HLS playlist not available", ErrorTypeInternal,
|
|
"HLS playlist generation failed or not completed")
|
|
return
|
|
}
|
|
|
|
// Validate playlist segments
|
|
if len(metadata.HLSPlaylist.Segments) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty HLS playlist", ErrorTypeInternal,
|
|
"HLS playlist contains no segments")
|
|
return
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "application/vnd.apple.mpegurl")
|
|
w.Header().Set("Cache-Control", "public, max-age=300") // 5 minutes for playlists
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Validate host header for base URL generation
|
|
host := r.Host
|
|
if host == "" {
|
|
g.writeError(w, http.StatusBadRequest, "Missing host header", ErrorTypeValidation,
|
|
"Host header is required for HLS manifest generation")
|
|
return
|
|
}
|
|
|
|
// Generate manifest with proper base URL
|
|
baseURL := fmt.Sprintf("http://%s/api/stream/%s/segment", host, fileHash)
|
|
manifest := metadata.HLSPlaylist.GenerateM3U8Manifest(baseURL)
|
|
|
|
// Validate generated manifest
|
|
if manifest == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty manifest generated", ErrorTypeInternal,
|
|
"HLS manifest generation produced empty result")
|
|
return
|
|
}
|
|
|
|
if !strings.Contains(manifest, "#EXTM3U") {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid manifest format", ErrorTypeInternal,
|
|
"Generated manifest is not valid M3U8 format")
|
|
return
|
|
}
|
|
|
|
// Set response headers and serve manifest
|
|
w.Header().Set("Content-Type", "application/vnd.apple.mpegurl")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD")
|
|
w.Header().Set("Access-Control-Allow-Headers", "Range")
|
|
w.Header().Set("Cache-Control", "public, max-age=300") // 5 minutes cache
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(manifest)))
|
|
|
|
written, err := w.Write([]byte(manifest))
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to write HLS manifest to client: %v\n", err)
|
|
return
|
|
}
|
|
|
|
if written != len(manifest) {
|
|
fmt.Printf("Warning: Partial manifest write: wrote %d of %d bytes\n", written, len(manifest))
|
|
}
|
|
}
|
|
|
|
func (g *Gateway) HLSSegmentHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate parameters
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
segmentURI := vars["segment"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
if segmentURI == "" {
|
|
g.writeError(w, http.StatusBadRequest, "Missing segment identifier", ErrorTypeValidation,
|
|
"Segment URI is required")
|
|
return
|
|
}
|
|
|
|
// Validate segment URI format
|
|
if !strings.HasPrefix(segmentURI, "segment_") || !strings.HasSuffix(segmentURI, ".ts") {
|
|
g.writeError(w, http.StatusBadRequest, "Invalid segment format", ErrorTypeValidation,
|
|
"Segment URI must match format: segment_N.ts")
|
|
return
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
// Check if file is a video (required for HLS segments)
|
|
isVideo, _ := streaming.DetectMediaType(metadata.FileName)
|
|
if !isVideo {
|
|
g.writeError(w, http.StatusBadRequest, "Not a video file", ErrorTypeUnsupported,
|
|
"HLS segments are only available for video files")
|
|
return
|
|
}
|
|
|
|
// Check if HLS playlist is available
|
|
if metadata.HLSPlaylist == nil {
|
|
g.writeError(w, http.StatusNotFound, "HLS playlist not available", ErrorTypeNotFound,
|
|
"No HLS streaming data found for this file")
|
|
return
|
|
}
|
|
|
|
// Get segment info
|
|
segment, err := metadata.HLSPlaylist.GetSegmentByURI(segmentURI)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusNotFound, "Segment not found", ErrorTypeNotFound,
|
|
fmt.Sprintf("HLS segment '%s' not found: %v", segmentURI, err))
|
|
return
|
|
}
|
|
|
|
// Validate segment
|
|
if segment == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid segment", ErrorTypeInternal,
|
|
"Segment lookup returned null result")
|
|
return
|
|
}
|
|
|
|
if len(segment.ChunkIndexes) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty segment", ErrorTypeInternal,
|
|
"Segment contains no chunk indexes")
|
|
return
|
|
}
|
|
|
|
if segment.Size <= 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid segment size", ErrorTypeInternal,
|
|
fmt.Sprintf("Segment has invalid size: %d", segment.Size))
|
|
return
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Type", "video/mp2t")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", segment.Size))
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Set response headers
|
|
w.Header().Set("Content-Type", "video/mp2t")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD")
|
|
w.Header().Set("Access-Control-Allow-Headers", "Range")
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", segment.Size))
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
|
|
// Write segment data by concatenating relevant chunks
|
|
bytesWritten := int64(0)
|
|
for i, chunkIndex := range segment.ChunkIndexes {
|
|
// Validate chunk index
|
|
if chunkIndex < 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk index", ErrorTypeInternal,
|
|
fmt.Sprintf("Negative chunk index %d in segment", chunkIndex))
|
|
return
|
|
}
|
|
|
|
if chunkIndex >= len(metadata.Chunks) {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk index out of range", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk index %d out of range (0-%d)", chunkIndex, len(metadata.Chunks)-1))
|
|
return
|
|
}
|
|
|
|
// Get chunk info
|
|
chunkInfo := metadata.Chunks[chunkIndex]
|
|
|
|
// Validate chunk info
|
|
if chunkInfo.Hash == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk hash", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has empty hash", chunkIndex))
|
|
return
|
|
}
|
|
|
|
// Retrieve chunk data
|
|
chunkData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk retrieval failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to retrieve chunk %d for segment: %v", chunkIndex, err))
|
|
return
|
|
}
|
|
|
|
// Validate chunk data
|
|
if len(chunkData) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty chunk data", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Chunk %d returned empty data", chunkIndex))
|
|
return
|
|
}
|
|
|
|
// Write chunk data
|
|
written, err := w.Write(chunkData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Failed to write chunk %d for segment %s: %v\n", chunkIndex, segmentURI, err)
|
|
return
|
|
}
|
|
|
|
bytesWritten += int64(written)
|
|
|
|
// Validate write
|
|
if written != len(chunkData) {
|
|
fmt.Printf("Warning: Partial chunk write for segment %s: wrote %d of %d bytes for chunk %d\n",
|
|
segmentURI, written, len(chunkData), i)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Final validation
|
|
if bytesWritten != segment.Size {
|
|
fmt.Printf("Warning: Segment %s size mismatch: wrote %d, expected %d\n",
|
|
segmentURI, bytesWritten, segment.Size)
|
|
}
|
|
}
|
|
|
|
func (g *Gateway) StreamingHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Handle CORS preflight for Firefox
|
|
if r.Method == http.MethodOptions {
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS")
|
|
w.Header().Set("Access-Control-Allow-Headers", "Range, Content-Type, Authorization")
|
|
w.Header().Set("Access-Control-Max-Age", "86400")
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet, http.MethodHead}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
if len(metadata.Chunks) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "No chunks found", ErrorTypeInternal,
|
|
"File metadata indicates no chunks available")
|
|
return
|
|
}
|
|
|
|
if metadata.TotalSize <= 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid file size", ErrorTypeInternal,
|
|
"File metadata indicates invalid size")
|
|
return
|
|
}
|
|
|
|
// Get range header for partial content support
|
|
rangeHeader := r.Header.Get("Range")
|
|
|
|
// Set appropriate headers
|
|
w.Header().Set("Accept-Ranges", "bytes")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS")
|
|
w.Header().Set("Access-Control-Allow-Headers", "Range, Content-Type, Authorization")
|
|
w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range, Accept-Ranges")
|
|
w.Header().Set("Cache-Control", "public, max-age=3600")
|
|
w.Header().Set("ETag", fmt.Sprintf("\"%s\"", fileHash))
|
|
w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT")
|
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
|
|
|
// Set content type based on file
|
|
contentType := "application/octet-stream"
|
|
if metadata.StreamingInfo != nil && metadata.StreamingInfo.MimeType != "" {
|
|
contentType = metadata.StreamingInfo.MimeType
|
|
|
|
// Keep original video MIME types for better browser compatibility
|
|
// The JavaScript player will handle unsupported formats gracefully
|
|
// This allows Chrome to show video controls and Firefox to show proper errors
|
|
}
|
|
|
|
w.Header().Set("Content-Type", contentType)
|
|
|
|
// Set filename if available
|
|
if metadata.FileName != "" {
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", metadata.FileName))
|
|
}
|
|
|
|
// Handle HEAD request
|
|
if r.Method == http.MethodHead {
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
w.WriteHeader(http.StatusOK)
|
|
return
|
|
}
|
|
|
|
// Handle range request
|
|
if rangeHeader != "" {
|
|
rangeReq, err := streaming.ParseRangeHeader(rangeHeader, metadata.TotalSize)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidRange, fmt.Sprintf("Invalid range header: %v", err))
|
|
return
|
|
}
|
|
|
|
if rangeReq != nil {
|
|
// Validate range request
|
|
if rangeReq.Start < 0 || rangeReq.End >= metadata.TotalSize || rangeReq.Start > rangeReq.End {
|
|
g.writeError(w, http.StatusRequestedRangeNotSatisfiable, "Range not satisfiable", ErrorTypeInvalidRange,
|
|
fmt.Sprintf("Range %d-%d is not satisfiable for file size %d", rangeReq.Start, rangeReq.End, metadata.TotalSize))
|
|
return
|
|
}
|
|
|
|
// Calculate which chunks we need
|
|
chunkRange := streaming.CalculateChunkRange(rangeReq, int(g.config.GetChunkSize()))
|
|
|
|
// Validate chunk range
|
|
if chunkRange.StartChunk < 0 || chunkRange.EndChunk >= len(metadata.Chunks) {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk range", ErrorTypeInternal,
|
|
fmt.Sprintf("Calculated chunk range %d-%d invalid for %d chunks",
|
|
chunkRange.StartChunk, chunkRange.EndChunk, len(metadata.Chunks)))
|
|
return
|
|
}
|
|
|
|
// Set partial content headers
|
|
w.Header().Set("Content-Range", rangeReq.FormatContentRange(metadata.TotalSize))
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", rangeReq.Size))
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
|
|
// Write the requested byte range
|
|
bytesWritten := int64(0)
|
|
targetBytes := rangeReq.Size
|
|
|
|
for chunkIdx := chunkRange.StartChunk; chunkIdx <= chunkRange.EndChunk && chunkIdx < len(metadata.Chunks); chunkIdx++ {
|
|
chunkInfo := metadata.Chunks[chunkIdx]
|
|
|
|
// Validate chunk info
|
|
if chunkInfo.Hash == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk hash", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has empty hash", chunkIdx))
|
|
return
|
|
}
|
|
|
|
// Retrieve chunk data
|
|
chunkData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk retrieval failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to retrieve chunk %d: %v", chunkIdx, err))
|
|
return
|
|
}
|
|
|
|
// Validate chunk data
|
|
if len(chunkData) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty chunk data", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Chunk %d returned empty data", chunkIdx))
|
|
return
|
|
}
|
|
|
|
// Calculate start and end positions within this chunk
|
|
var startPos, endPos int64
|
|
|
|
if chunkIdx == chunkRange.StartChunk {
|
|
startPos = chunkRange.StartOffset
|
|
} else {
|
|
startPos = 0
|
|
}
|
|
|
|
if chunkIdx == chunkRange.EndChunk {
|
|
endPos = chunkRange.EndOffset + 1
|
|
} else {
|
|
endPos = int64(len(chunkData))
|
|
}
|
|
|
|
// Ensure we don't exceed chunk boundaries
|
|
if endPos > int64(len(chunkData)) {
|
|
endPos = int64(len(chunkData))
|
|
}
|
|
|
|
if startPos >= int64(len(chunkData)) {
|
|
continue // Skip this chunk
|
|
}
|
|
|
|
// Write the relevant portion of this chunk
|
|
if startPos < endPos {
|
|
chunkPortion := chunkData[startPos:endPos]
|
|
written, err := w.Write(chunkPortion)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Client disconnected during range request: %v\n", err)
|
|
return
|
|
}
|
|
bytesWritten += int64(written)
|
|
|
|
// Stop if we've written enough bytes
|
|
if bytesWritten >= targetBytes {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// Validate we wrote the expected amount
|
|
if bytesWritten != targetBytes {
|
|
fmt.Printf("Warning: Range request wrote %d bytes, expected %d\n", bytesWritten, targetBytes)
|
|
}
|
|
return
|
|
}
|
|
}
|
|
|
|
// Serve entire file (no range request)
|
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", metadata.TotalSize))
|
|
|
|
bytesWritten := int64(0)
|
|
for i, chunkInfo := range metadata.Chunks {
|
|
// Validate chunk info
|
|
if chunkInfo.Hash == "" {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid chunk hash", ErrorTypeInternal,
|
|
fmt.Sprintf("Chunk %d has empty hash", i))
|
|
return
|
|
}
|
|
|
|
// Retrieve chunk data
|
|
chunkData, err := g.blossomClient.Get(chunkInfo.Hash)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Chunk retrieval failed", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Failed to retrieve chunk %d: %v", i, err))
|
|
return
|
|
}
|
|
|
|
// Validate chunk data
|
|
if len(chunkData) == 0 {
|
|
g.writeError(w, http.StatusInternalServerError, "Empty chunk data", ErrorTypeStorageFailed,
|
|
fmt.Sprintf("Chunk %d returned empty data", i))
|
|
return
|
|
}
|
|
|
|
// Write chunk data
|
|
written, err := w.Write(chunkData)
|
|
if err != nil {
|
|
fmt.Printf("Warning: Client disconnected during streaming: %v\n", err)
|
|
return
|
|
}
|
|
|
|
bytesWritten += int64(written)
|
|
|
|
// Validate write
|
|
if written != len(chunkData) {
|
|
fmt.Printf("Warning: Partial chunk write: wrote %d of %d bytes for chunk %d\n",
|
|
written, len(chunkData), i)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Final validation
|
|
if bytesWritten != metadata.TotalSize {
|
|
fmt.Printf("Warning: Streaming wrote %d bytes, expected %d\n", bytesWritten, metadata.TotalSize)
|
|
}
|
|
}
|
|
|
|
// DHTStatsHandler returns DHT node statistics
|
|
func (g *Gateway) DHTStatsHandler(w http.ResponseWriter, r *http.Request) {
|
|
if !g.config.IsServiceEnabled("dht") {
|
|
g.writeError(w, http.StatusServiceUnavailable, "DHT service not enabled", ErrorTypeServiceUnavailable, "DHT service is not enabled on this gateway")
|
|
return
|
|
}
|
|
|
|
if g.dhtBootstrap == nil {
|
|
g.writeError(w, http.StatusServiceUnavailable, "DHT bootstrap not available", ErrorTypeServiceUnavailable, "DHT bootstrap functionality is not available")
|
|
return
|
|
}
|
|
|
|
stats := g.dhtBootstrap.GetDHTStats()
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
response := map[string]interface{}{
|
|
"success": true,
|
|
"data": stats,
|
|
}
|
|
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
log.Printf("Failed to encode DHT stats response: %v", err)
|
|
}
|
|
}
|
|
|
|
func RegisterRoutes(r *mux.Router, cfg *config.Config, storage *storage.Backend) *Gateway {
|
|
gateway := NewGateway(cfg, storage)
|
|
|
|
// Initialize tracker if enabled
|
|
var trackerInstance *tracker.Tracker
|
|
var announceHandler *tracker.AnnounceHandler
|
|
var scrapeHandler *tracker.ScrapeHandler
|
|
if cfg.IsServiceEnabled("tracker") {
|
|
trackerInstance = tracker.NewTracker(&cfg.Tracker, gateway)
|
|
announceHandler = tracker.NewAnnounceHandler(trackerInstance)
|
|
scrapeHandler = tracker.NewScrapeHandler(trackerInstance)
|
|
log.Printf("BitTorrent tracker enabled")
|
|
}
|
|
|
|
// Store tracker instance in gateway for stats
|
|
gateway.trackerInstance = trackerInstance
|
|
|
|
// Initialize authentication
|
|
nostrAuth := auth.NewNostrAuth(storage.GetDB())
|
|
authMiddleware := middleware.NewAuthMiddleware(nostrAuth)
|
|
authHandlers := NewAuthHandlers(nostrAuth, gateway)
|
|
|
|
// Initialize rate limiter with config values
|
|
uploadRate, uploadBurst, downloadRate, downloadBurst, streamRate, streamBurst := cfg.GetRateLimitValues()
|
|
rateLimiterConfig := &middleware.RateLimitConfig{
|
|
UploadRatePerIP: uploadRate,
|
|
UploadBurstPerIP: uploadBurst,
|
|
DownloadRate: downloadRate,
|
|
DownloadBurst: downloadBurst,
|
|
StreamRatePerFile: streamRate,
|
|
StreamBurstPerFile: streamBurst,
|
|
CleanupInterval: 5 * time.Minute,
|
|
LimiterTTL: 15 * time.Minute,
|
|
}
|
|
rateLimiter := middleware.NewRateLimiter(rateLimiterConfig)
|
|
|
|
// Initialize admin authentication if enabled
|
|
var adminHandlers *admin.AdminHandlers
|
|
if cfg.Admin.Enabled {
|
|
adminAuth := admin.NewAdminAuth(cfg.Admin.Pubkeys, nostrAuth, storage.GetDB())
|
|
adminHandlers = admin.NewAdminHandlers(adminAuth, gateway, cfg.Nostr.Relays)
|
|
}
|
|
|
|
// Security middleware is now applied at the main router level
|
|
|
|
// BitTorrent tracker endpoints (public, no auth required)
|
|
if announceHandler != nil {
|
|
r.Handle("/announce", announceHandler).Methods("GET")
|
|
}
|
|
if scrapeHandler != nil {
|
|
r.Handle("/scrape", scrapeHandler).Methods("GET")
|
|
}
|
|
|
|
// Authentication endpoints (no auth required)
|
|
r.HandleFunc("/auth/challenge", authHandlers.ChallengeHandler).Methods("GET")
|
|
r.HandleFunc("/auth/login", authHandlers.LoginHandler).Methods("POST")
|
|
r.HandleFunc("/auth/logout", authHandlers.LogoutHandler).Methods("POST")
|
|
|
|
// Public endpoints (optional auth for ownership tracking)
|
|
publicRoutes := r.PathPrefix("").Subrouter()
|
|
publicRoutes.Use(authMiddleware.OptionalAuth)
|
|
|
|
// Download endpoints with rate limiting
|
|
publicRoutes.HandleFunc("/download/{hash}", rateLimiter.DownloadMiddleware(gateway.DownloadHandler)).Methods("GET")
|
|
publicRoutes.HandleFunc("/torrent/{hash}", rateLimiter.DownloadMiddleware(gateway.TorrentHandler)).Methods("GET")
|
|
// WebSeed health endpoint (must come before other webseed routes)
|
|
publicRoutes.HandleFunc("/webseed/health", gateway.WebSeedHealthHandler).Methods("GET")
|
|
|
|
// Enhanced WebSeed endpoints with piece and file support
|
|
publicRoutes.HandleFunc("/webseed/{hash}/piece/{piece}", rateLimiter.DownloadMiddleware(gateway.WebSeedHandler)).Methods("GET", "HEAD")
|
|
publicRoutes.HandleFunc("/webseed/{hash}/files/{path:.*}", rateLimiter.DownloadMiddleware(gateway.WebSeedHandler)).Methods("GET", "HEAD")
|
|
publicRoutes.HandleFunc("/webseed/{hash}/", rateLimiter.DownloadMiddleware(gateway.WebSeedHandler)).Methods("GET", "HEAD")
|
|
publicRoutes.HandleFunc("/webseed/{hash}", rateLimiter.DownloadMiddleware(gateway.WebSeedHandler)).Methods("GET", "HEAD")
|
|
|
|
// Streaming endpoints with specific rate limiting
|
|
publicRoutes.HandleFunc("/stream/{hash}", rateLimiter.StreamMiddleware(gateway.StreamingHandler)).Methods("GET", "HEAD", "OPTIONS")
|
|
publicRoutes.HandleFunc("/stream/{hash}/playlist.m3u8", rateLimiter.StreamMiddleware(gateway.HLSPlaylistHandler)).Methods("GET")
|
|
publicRoutes.HandleFunc("/stream/{hash}/segment/{segment}", rateLimiter.StreamMiddleware(gateway.HLSSegmentHandler)).Methods("GET")
|
|
publicRoutes.HandleFunc("/info/{hash}", gateway.InfoHandler).Methods("GET")
|
|
publicRoutes.HandleFunc("/files", gateway.ListFilesHandler).Methods("GET")
|
|
publicRoutes.HandleFunc("/profile/{pubkey}", gateway.ProfileHandler).Methods("GET")
|
|
|
|
// System stats endpoint (public)
|
|
r.HandleFunc("/stats", systemStatsHandler(storage, trackerInstance)).Methods("GET")
|
|
|
|
// DHT stats endpoint (public)
|
|
r.HandleFunc("/dht/stats", gateway.DHTStatsHandler).Methods("GET")
|
|
|
|
// Integrated P2P stats endpoint (public)
|
|
r.HandleFunc("/p2p/stats", gateway.P2PStatsHandler).Methods("GET")
|
|
|
|
// P2P diagnostics endpoint (public)
|
|
r.HandleFunc("/p2p/diagnostics", gateway.P2PDiagnosticsHandler).Methods("GET")
|
|
|
|
// Protected user endpoints (auth required)
|
|
userRoutes := r.PathPrefix("/users/me").Subrouter()
|
|
userRoutes.Use(authMiddleware.RequireAuth)
|
|
userRoutes.HandleFunc("/stats", authHandlers.UserStatsHandler).Methods("GET")
|
|
userRoutes.HandleFunc("/files", authHandlers.UserFilesHandler).Methods("GET")
|
|
userRoutes.HandleFunc("/files/{hash}", authHandlers.DeleteFileHandler).Methods("DELETE")
|
|
userRoutes.HandleFunc("/files/{hash}/access", authHandlers.UpdateFileAccessHandler).Methods("PUT")
|
|
userRoutes.HandleFunc("/admin-status", authHandlers.AdminStatusHandler).Methods("GET")
|
|
|
|
// Upload endpoint now requires authentication
|
|
r.HandleFunc("/upload", rateLimiter.UploadMiddleware(
|
|
authMiddleware.RequireAuth(http.HandlerFunc(gateway.UploadHandler)).ServeHTTP,
|
|
)).Methods("POST")
|
|
|
|
// Admin endpoints (if enabled)
|
|
if adminHandlers != nil {
|
|
adminRoutes := r.PathPrefix("/admin").Subrouter()
|
|
adminRoutes.HandleFunc("/stats", adminHandlers.AdminStatsHandler).Methods("GET")
|
|
adminRoutes.HandleFunc("/users", adminHandlers.AdminUsersHandler).Methods("GET")
|
|
adminRoutes.HandleFunc("/files", adminHandlers.AdminFilesHandler).Methods("GET")
|
|
adminRoutes.HandleFunc("/files/{hash}", adminHandlers.AdminDeleteFileHandler).Methods("DELETE")
|
|
adminRoutes.HandleFunc("/users/{pubkey}/ban", adminHandlers.AdminBanUserHandler).Methods("POST")
|
|
adminRoutes.HandleFunc("/users/{pubkey}/unban", adminHandlers.AdminUnbanUserHandler).Methods("POST")
|
|
adminRoutes.HandleFunc("/reports", adminHandlers.AdminReportsHandler).Methods("GET")
|
|
adminRoutes.HandleFunc("/cleanup", adminHandlers.AdminCleanupHandler).Methods("POST")
|
|
adminRoutes.HandleFunc("/logs", adminHandlers.AdminLogsHandler).Methods("GET")
|
|
}
|
|
|
|
r.HandleFunc("/health", healthHandler).Methods("GET")
|
|
|
|
// Catch-all handler for unmatched API routes
|
|
r.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
gateway.writeError(w, http.StatusNotFound, "API endpoint not found", ErrorTypeNotFound,
|
|
fmt.Sprintf("The requested API endpoint %s was not found", r.URL.Path))
|
|
})
|
|
|
|
return gateway
|
|
}
|
|
|
|
// InfoHandler returns file metadata for the web UI
|
|
func (g *Gateway) InfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check file access permissions
|
|
requestorPubkey := middleware.GetUserFromContext(r.Context())
|
|
canAccess, err := g.storage.CheckFileAccess(fileHash, requestorPubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Access check failed", ErrorTypeInternal,
|
|
fmt.Sprintf("Failed to check file access: %v", err))
|
|
return
|
|
}
|
|
if !canAccess {
|
|
g.writeError(w, http.StatusForbidden, "Access denied", ErrorTypeUnauthorized,
|
|
"You do not have permission to access this file")
|
|
return
|
|
}
|
|
|
|
// Get metadata
|
|
metadata, err := g.getMetadata(fileHash)
|
|
if err != nil {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Validate metadata
|
|
if metadata == nil {
|
|
g.writeError(w, http.StatusInternalServerError, "Invalid metadata", ErrorTypeInternal,
|
|
"Retrieved metadata is null")
|
|
return
|
|
}
|
|
|
|
// Create response with file info
|
|
response := map[string]interface{}{
|
|
"file_hash": metadata.FileHash,
|
|
"name": metadata.FileName,
|
|
"size": metadata.TotalSize,
|
|
"chunks": len(metadata.Chunks),
|
|
}
|
|
|
|
// Add torrent info if available
|
|
if metadata.TorrentInfo != nil {
|
|
response["magnet_link"] = metadata.TorrentInfo.Magnet
|
|
response["torrent_hash"] = metadata.TorrentInfo.InfoHash
|
|
}
|
|
|
|
// Add streaming info if available
|
|
if metadata.StreamingInfo != nil {
|
|
response["is_video"] = metadata.StreamingInfo.IsVideo
|
|
response["mime_type"] = metadata.StreamingInfo.MimeType
|
|
response["duration"] = metadata.StreamingInfo.Duration
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
fmt.Printf("Error: Failed to encode info response: %v\n", err)
|
|
}
|
|
}
|
|
|
|
// ListFilesHandler returns a list of all uploaded files
|
|
func (g *Gateway) ListFilesHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodGet}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get all files from metadata store
|
|
files := []map[string]interface{}{}
|
|
for hash, metadata := range metadataStore {
|
|
fileInfo := map[string]interface{}{
|
|
"file_hash": hash,
|
|
"name": metadata.FileName,
|
|
"size": metadata.TotalSize,
|
|
"chunks": len(metadata.Chunks),
|
|
}
|
|
|
|
// Add torrent info if available
|
|
if metadata.TorrentInfo != nil {
|
|
fileInfo["magnet_link"] = metadata.TorrentInfo.Magnet
|
|
fileInfo["torrent_hash"] = metadata.TorrentInfo.InfoHash
|
|
}
|
|
|
|
// Add streaming info if available
|
|
if metadata.StreamingInfo != nil {
|
|
fileInfo["is_video"] = metadata.StreamingInfo.IsVideo
|
|
fileInfo["mime_type"] = metadata.StreamingInfo.MimeType
|
|
fileInfo["duration"] = metadata.StreamingInfo.Duration
|
|
}
|
|
|
|
files = append(files, fileInfo)
|
|
}
|
|
|
|
response := map[string]interface{}{
|
|
"files": files,
|
|
"count": len(files),
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
fmt.Printf("Error: Failed to encode files list response: %v\n", err)
|
|
}
|
|
}
|
|
|
|
// DeleteFileHandler removes a file and its metadata
|
|
func (g *Gateway) DeleteFileHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Validate HTTP method
|
|
if err := g.validateHTTPMethod(r, []string{http.MethodDelete}); err != nil {
|
|
g.writeErrorResponse(w, ErrMethodNotAllowed, err.Error())
|
|
return
|
|
}
|
|
|
|
// Get and validate file hash
|
|
vars := mux.Vars(r)
|
|
fileHash := vars["hash"]
|
|
|
|
if err := g.validateFileHash(fileHash); err != nil {
|
|
g.writeErrorResponse(w, ErrInvalidFileHash, err.Error())
|
|
return
|
|
}
|
|
|
|
// Check if file exists
|
|
_, exists := metadataStore[fileHash]
|
|
if !exists {
|
|
g.writeErrorResponse(w, ErrFileNotFound, fmt.Sprintf("No file found with hash: %s", fileHash))
|
|
return
|
|
}
|
|
|
|
// Delete from metadata store
|
|
delete(metadataStore, fileHash)
|
|
|
|
// TODO: In a real implementation, we would also:
|
|
// - Delete chunks from Blossom storage
|
|
// - Clean up any cached files
|
|
// - Remove from torrent tracker
|
|
// For now, we just remove from metadata store
|
|
|
|
response := map[string]interface{}{
|
|
"success": true,
|
|
"message": "File deleted successfully",
|
|
"file_hash": fileHash,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
fmt.Printf("Error: Failed to encode delete response: %v\n", err)
|
|
}
|
|
}
|
|
|
|
// Gateway utility methods for admin functionality
|
|
func (g *Gateway) GetDB() *sql.DB {
|
|
return g.storage.GetDB()
|
|
}
|
|
|
|
func (g *Gateway) GetStorage() *storage.Backend {
|
|
return g.storage
|
|
}
|
|
|
|
// CleanupOldFiles removes files older than the specified duration
|
|
func (g *Gateway) CleanupOldFiles(olderThan time.Duration) (map[string]interface{}, error) {
|
|
cutoffTime := time.Now().Add(-olderThan)
|
|
|
|
// Get files to delete
|
|
rows, err := g.storage.GetDB().Query(`
|
|
SELECT hash, original_name, size FROM files
|
|
WHERE created_at < ?
|
|
ORDER BY created_at ASC
|
|
`, cutoffTime)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query old files: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var deletedFiles []string
|
|
var totalSize int64
|
|
count := 0
|
|
|
|
for rows.Next() {
|
|
var hash, name string
|
|
var size int64
|
|
if err := rows.Scan(&hash, &name, &size); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Delete the file (admin delete)
|
|
if err := g.storage.AdminDeleteFile(hash); err == nil {
|
|
deletedFiles = append(deletedFiles, fmt.Sprintf("%s (%s)", name, hash[:8]))
|
|
totalSize += size
|
|
count++
|
|
}
|
|
}
|
|
|
|
return map[string]interface{}{
|
|
"deleted_count": count,
|
|
"total_size": totalSize,
|
|
"files": deletedFiles,
|
|
}, nil
|
|
}
|
|
|
|
// CleanupOrphanedChunks removes chunk files that no longer have metadata
|
|
func (g *Gateway) CleanupOrphanedChunks() (map[string]interface{}, error) {
|
|
// Find chunks in database that don't have files
|
|
rows, err := g.storage.GetDB().Query(`
|
|
SELECT DISTINCT c.chunk_hash
|
|
FROM chunks c
|
|
LEFT JOIN files f ON c.file_hash = f.hash
|
|
WHERE f.hash IS NULL
|
|
`)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query orphaned chunks: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var orphanedChunks []string
|
|
count := 0
|
|
|
|
for rows.Next() {
|
|
var chunkHash string
|
|
if err := rows.Scan(&chunkHash); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Delete chunk metadata
|
|
_, err := g.storage.GetDB().Exec("DELETE FROM chunks WHERE chunk_hash = ?", chunkHash)
|
|
if err == nil {
|
|
orphanedChunks = append(orphanedChunks, chunkHash[:8])
|
|
count++
|
|
}
|
|
}
|
|
|
|
return map[string]interface{}{
|
|
"deleted_count": count,
|
|
"chunks": orphanedChunks,
|
|
}, nil
|
|
}
|
|
|
|
// CleanupInactiveUsers removes users who haven't logged in for specified days
|
|
func (g *Gateway) CleanupInactiveUsers(days int) (map[string]interface{}, error) {
|
|
cutoffTime := time.Now().AddDate(0, 0, -days)
|
|
|
|
// Get inactive users (who have no files and haven't logged in recently)
|
|
rows, err := g.storage.GetDB().Query(`
|
|
SELECT u.pubkey, u.display_name
|
|
FROM users u
|
|
WHERE u.last_login < ?
|
|
AND u.file_count = 0
|
|
AND NOT EXISTS(SELECT 1 FROM banned_users WHERE pubkey = u.pubkey)
|
|
`, cutoffTime)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query inactive users: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var deletedUsers []string
|
|
count := 0
|
|
|
|
for rows.Next() {
|
|
var pubkey, displayName string
|
|
if err := rows.Scan(&pubkey, &displayName); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Delete user and their sessions
|
|
_, err := g.storage.GetDB().Exec("DELETE FROM sessions WHERE pubkey = ?", pubkey)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
_, err = g.storage.GetDB().Exec("DELETE FROM users WHERE pubkey = ?", pubkey)
|
|
if err == nil {
|
|
name := displayName
|
|
if name == "" {
|
|
name = pubkey[:8] + "..."
|
|
}
|
|
deletedUsers = append(deletedUsers, name)
|
|
count++
|
|
}
|
|
}
|
|
|
|
return map[string]interface{}{
|
|
"deleted_count": count,
|
|
"users": deletedUsers,
|
|
}, nil
|
|
}
|
|
|
|
// ProfileHandler fetches user profile metadata from their relay set
|
|
func (g *Gateway) ProfileHandler(w http.ResponseWriter, r *http.Request) {
|
|
vars := mux.Vars(r)
|
|
pubkey := vars["pubkey"]
|
|
|
|
if pubkey == "" {
|
|
g.writeError(w, http.StatusBadRequest, "Missing pubkey", ErrorTypeValidation, "Pubkey parameter is required")
|
|
return
|
|
}
|
|
|
|
// Validate pubkey format (64 character hex)
|
|
if len(pubkey) != 64 {
|
|
g.writeError(w, http.StatusBadRequest, "Invalid pubkey format", ErrorTypeValidation, "Pubkey must be 64 character hex string")
|
|
return
|
|
}
|
|
|
|
profile, err := g.profileFetcher.GetUserProfile(pubkey)
|
|
if err != nil {
|
|
g.writeError(w, http.StatusNotFound, "Profile not found", ErrorTypeNotFound, fmt.Sprintf("Could not fetch profile for user: %v", err))
|
|
return
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"success": true,
|
|
"pubkey": pubkey,
|
|
"profile": profile,
|
|
})
|
|
}
|
|
|
|
// formatUptime converts a duration to a human-readable uptime string
|
|
func formatUptime(duration time.Duration) string {
|
|
days := int(duration.Hours()) / 24
|
|
hours := int(duration.Hours()) % 24
|
|
minutes := int(duration.Minutes()) % 60
|
|
|
|
if days > 0 {
|
|
return fmt.Sprintf("%dd %dh %dm", days, hours, minutes)
|
|
} else if hours > 0 {
|
|
return fmt.Sprintf("%dh %dm", hours, minutes)
|
|
} else {
|
|
return fmt.Sprintf("%dm", minutes)
|
|
}
|
|
}
|
|
|
|
func healthHandler(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
|
|
}
|
|
|
|
func systemStatsHandler(storage *storage.Backend, trackerInstance *tracker.Tracker) http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
|
|
|
// Get database for queries
|
|
db := storage.GetDB()
|
|
|
|
// Get total file count and storage
|
|
var totalFiles int
|
|
var totalStorage int64
|
|
err := db.QueryRow(`
|
|
SELECT COUNT(*), COALESCE(SUM(size), 0)
|
|
FROM files
|
|
`).Scan(&totalFiles, &totalStorage)
|
|
if err != nil {
|
|
http.Error(w, "Database error", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
|
|
// Get blob count and storage
|
|
var blobFiles int
|
|
var blobStorage int64
|
|
err = db.QueryRow(`
|
|
SELECT COUNT(*), COALESCE(SUM(size), 0)
|
|
FROM files WHERE storage_type = 'blob'
|
|
`).Scan(&blobFiles, &blobStorage)
|
|
if err != nil {
|
|
blobFiles = 0
|
|
blobStorage = 0
|
|
}
|
|
|
|
// Get torrent count and storage
|
|
var torrentFiles int
|
|
var torrentStorage int64
|
|
err = db.QueryRow(`
|
|
SELECT COUNT(*), COALESCE(SUM(size), 0)
|
|
FROM files WHERE storage_type = 'torrent'
|
|
`).Scan(&torrentFiles, &torrentStorage)
|
|
if err != nil {
|
|
torrentFiles = 0
|
|
torrentStorage = 0
|
|
}
|
|
|
|
// Get user count
|
|
var userCount int
|
|
err = db.QueryRow(`SELECT COUNT(*) FROM users`).Scan(&userCount)
|
|
if err != nil {
|
|
userCount = 0
|
|
}
|
|
|
|
// Get chunk count
|
|
var chunkCount int
|
|
err = db.QueryRow(`SELECT COUNT(*) FROM chunks`).Scan(&chunkCount)
|
|
if err != nil {
|
|
chunkCount = 0
|
|
}
|
|
|
|
stats := map[string]interface{}{
|
|
"gateway": map[string]interface{}{
|
|
"status": "healthy",
|
|
"port": 9876,
|
|
"uploads": torrentFiles, // Gateway handles torrent uploads
|
|
"storage": torrentStorage,
|
|
"users": userCount,
|
|
},
|
|
"blossom": map[string]interface{}{
|
|
"status": "healthy",
|
|
"port": 8081,
|
|
"blobs": blobFiles,
|
|
"storage": blobStorage,
|
|
},
|
|
"dht": map[string]interface{}{
|
|
"status": "healthy",
|
|
"port": 6882,
|
|
"peers": 0, // Would need DHT integration
|
|
"torrents": torrentFiles,
|
|
},
|
|
"system": map[string]interface{}{
|
|
"mode": "unified",
|
|
"uptime": formatUptime(time.Since(serverStartTime)),
|
|
"storage": totalStorage,
|
|
"connections": 0, // Would need connection tracking
|
|
"chunks": chunkCount,
|
|
"total_files": totalFiles,
|
|
},
|
|
}
|
|
|
|
// Add tracker stats if enabled
|
|
if trackerInstance != nil {
|
|
trackerStats := trackerInstance.GetStats()
|
|
stats["tracker"] = map[string]interface{}{
|
|
"status": "healthy",
|
|
"torrents": trackerStats["torrents"],
|
|
"peers": trackerStats["peers"],
|
|
"seeders": trackerStats["seeders"],
|
|
"leechers": trackerStats["leechers"],
|
|
}
|
|
}
|
|
|
|
json.NewEncoder(w).Encode(stats)
|
|
}
|
|
}
|
|
|
|
// RegisterTrackerRoutes registers tracker endpoints on the main router
|
|
func RegisterTrackerRoutes(r *mux.Router, cfg *config.Config, storage *storage.Backend) {
|
|
if !cfg.IsServiceEnabled("tracker") {
|
|
return
|
|
}
|
|
|
|
gateway := NewGateway(cfg, storage)
|
|
trackerInstance := tracker.NewTracker(&cfg.Tracker, gateway)
|
|
announceHandler := tracker.NewAnnounceHandler(trackerInstance)
|
|
scrapeHandler := tracker.NewScrapeHandler(trackerInstance)
|
|
|
|
// BitTorrent tracker endpoints (public, no auth required)
|
|
r.Handle("/announce", announceHandler).Methods("GET")
|
|
r.Handle("/scrape", scrapeHandler).Methods("GET")
|
|
|
|
log.Printf("Registered BitTorrent tracker endpoints")
|
|
}
|
|
|
|
// GetGatewayFromRoutes returns a gateway instance for DHT integration
|
|
func GetGatewayFromRoutes(cfg *config.Config, storage *storage.Backend) *Gateway {
|
|
return NewGateway(cfg, storage)
|
|
} |