enki b3204ea07a
Some checks are pending
CI Pipeline / Run Tests (push) Waiting to run
CI Pipeline / Lint Code (push) Waiting to run
CI Pipeline / Security Scan (push) Waiting to run
CI Pipeline / Build Docker Images (push) Blocked by required conditions
CI Pipeline / E2E Tests (push) Blocked by required conditions
first commit
2025-08-18 00:40:15 -07:00

566 lines
14 KiB
Go

package tracker
import (
"fmt"
"log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/anacrolix/torrent/bencode"
)
// AnnounceHandler handles BitTorrent announce requests
type AnnounceHandler struct {
tracker *Tracker
encoder *BencodeEncoder
}
// NewAnnounceHandler creates a new announce handler
func NewAnnounceHandler(tracker *Tracker) *AnnounceHandler {
return &AnnounceHandler{
tracker: tracker,
encoder: NewBencodeEncoder(),
}
}
// ServeHTTP implements http.Handler for the /announce endpoint
func (h *AnnounceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
h.writeError(w, "Method not allowed")
return
}
// Parse and validate announce request
req, err := h.parseAnnounceRequest(r)
if err != nil {
log.Printf("Invalid announce request: %v", err)
h.writeError(w, fmt.Sprintf("Invalid request: %v", err))
return
}
// Validate info_hash with gateway
if !h.tracker.gateway.IsValidInfoHash(req.InfoHash) {
log.Printf("Unknown info_hash: %s", req.InfoHash)
h.writeError(w, "Unknown info_hash")
return
}
// Process the announce
resp := h.processAnnounce(req)
// Write successful response
h.writeResponse(w, resp)
}
// parseAnnounceRequest parses HTTP parameters into AnnounceRequest
func (h *AnnounceHandler) parseAnnounceRequest(r *http.Request) (*AnnounceRequest, error) {
query := r.URL.Query()
// Extract and validate required parameters
infoHashRaw := query.Get("info_hash")
if infoHashRaw == "" {
return nil, fmt.Errorf("missing required parameter: info_hash")
}
// URL decode info_hash and convert to hex string
infoHashBytes, err := url.QueryUnescape(infoHashRaw)
if err != nil {
return nil, fmt.Errorf("invalid info_hash encoding: %w", err)
}
if len(infoHashBytes) != 20 {
return nil, fmt.Errorf("info_hash must be 20 bytes, got %d", len(infoHashBytes))
}
infoHash := fmt.Sprintf("%x", infoHashBytes)
peerID := query.Get("peer_id")
if peerID == "" {
return nil, fmt.Errorf("missing required parameter: peer_id")
}
if len(peerID) != 20 {
return nil, fmt.Errorf("peer_id must be 20 bytes, got %d", len(peerID))
}
portStr := query.Get("port")
if portStr == "" {
return nil, fmt.Errorf("missing required parameter: port")
}
port, err := strconv.Atoi(portStr)
if err != nil || port <= 0 || port > 65535 {
return nil, fmt.Errorf("invalid port: %s", portStr)
}
// Parse optional numeric parameters
uploaded := parseIntParam(query, "uploaded", 0)
downloaded := parseIntParam(query, "downloaded", 0)
left := parseIntParam(query, "left", 0)
// Parse optional parameters
event := query.Get("event")
if event != "" && event != "started" && event != "completed" && event != "stopped" {
return nil, fmt.Errorf("invalid event: %s", event)
}
numWant := parseIntParam(query, "numwant", int64(h.tracker.config.DefaultNumWant))
if numWant > int64(h.tracker.config.MaxNumWant) {
numWant = int64(h.tracker.config.MaxNumWant)
}
if numWant < 0 {
numWant = 0
}
compact := query.Get("compact") == "1"
key := query.Get("key")
// Extract client IP
ip := h.getClientIP(r)
return &AnnounceRequest{
InfoHash: infoHash,
PeerID: peerID,
Port: port,
Uploaded: uploaded,
Downloaded: downloaded,
Left: left,
Event: event,
IP: ip,
NumWant: int(numWant),
Key: key,
Compact: compact,
}, nil
}
// processAnnounce handles the announce business logic
func (h *AnnounceHandler) processAnnounce(req *AnnounceRequest) *AnnounceResponse {
h.tracker.mutex.Lock()
defer h.tracker.mutex.Unlock()
// Initialize torrent if not exists
if h.tracker.peers[req.InfoHash] == nil {
h.tracker.peers[req.InfoHash] = make(map[string]*PeerInfo)
}
torrentPeers := h.tracker.peers[req.InfoHash]
// Handle peer lifecycle events
switch req.Event {
case "stopped":
// Remove peer
delete(torrentPeers, req.PeerID)
log.Printf("Peer %s stopped for torrent %s", req.PeerID[:8], req.InfoHash[:8])
case "completed":
// Mark as seeder and update
peer := h.updateOrCreatePeer(req, torrentPeers)
peer.Left = 0 // Completed download
log.Printf("Peer %s completed torrent %s", req.PeerID[:8], req.InfoHash[:8])
case "started":
// Add new peer
h.updateOrCreatePeer(req, torrentPeers)
log.Printf("Peer %s started torrent %s", req.PeerID[:8], req.InfoHash[:8])
default:
// Regular update
h.updateOrCreatePeer(req, torrentPeers)
}
// Count seeders and leechers
complete, incomplete := h.countPeers(torrentPeers)
// Build peer list for response
peers := h.buildPeerList(req, torrentPeers)
log.Printf("Announce for %s: %d seeders, %d leechers, returning %d peers",
req.InfoHash[:8], complete, incomplete, h.countResponsePeers(peers))
return &AnnounceResponse{
Interval: h.tracker.config.AnnounceInterval,
MinInterval: h.tracker.config.MinInterval,
Complete: complete,
Incomplete: incomplete,
Peers: peers,
}
}
// updateOrCreatePeer updates existing peer or creates new one
func (h *AnnounceHandler) updateOrCreatePeer(req *AnnounceRequest, torrentPeers map[string]*PeerInfo) *PeerInfo {
peer, exists := torrentPeers[req.PeerID]
if !exists {
peer = &PeerInfo{}
torrentPeers[req.PeerID] = peer
}
// Update peer information
peer.PeerID = req.PeerID
peer.IP = req.IP
peer.Port = req.Port
peer.Uploaded = req.Uploaded
peer.Downloaded = req.Downloaded
peer.Left = req.Left
peer.LastSeen = time.Now()
peer.Event = req.Event
peer.Key = req.Key
peer.Compact = req.Compact
return peer
}
// buildPeerList creates the peer list for the response
func (h *AnnounceHandler) buildPeerList(req *AnnounceRequest, torrentPeers map[string]*PeerInfo) interface{} {
var selectedPeers []*PeerInfo
// Always include gateway WebSeed if available
webSeedURL := h.tracker.gateway.GetWebSeedURL(req.InfoHash)
if webSeedURL != "" {
if gatewyPeer := h.createGatewayPeer(webSeedURL); gatewyPeer != nil {
selectedPeers = append(selectedPeers, gatewyPeer)
}
}
// Add other peers (excluding the requesting peer)
count := 0
maxPeers := req.NumWant
if len(selectedPeers) > 0 {
maxPeers-- // Account for gateway peer
}
for peerID, peer := range torrentPeers {
if peerID != req.PeerID && count < maxPeers {
selectedPeers = append(selectedPeers, peer)
count++
}
}
// Return in requested format
if req.Compact {
return h.createCompactPeerList(selectedPeers)
}
return h.createDictPeerList(selectedPeers)
}
// createGatewayPeer creates a peer entry for the gateway WebSeed
func (h *AnnounceHandler) createGatewayPeer(webSeedURL string) *PeerInfo {
gatewayURL := h.tracker.gateway.GetPublicURL()
if gatewayURL == "" {
return nil
}
u, err := url.Parse(gatewayURL)
if err != nil {
log.Printf("Invalid gateway URL: %v", err)
return nil
}
host := u.Hostname()
portStr := u.Port()
if portStr == "" {
if u.Scheme == "https" {
portStr = "443"
} else {
portStr = "80"
}
}
port, err := strconv.Atoi(portStr)
if err != nil {
log.Printf("Invalid gateway port: %v", err)
return nil
}
return &PeerInfo{
PeerID: generateWebSeedPeerID(),
IP: host,
Port: port,
Uploaded: 0,
Downloaded: 0,
Left: 0, // Gateway is always a complete seeder
LastSeen: time.Now(),
Event: "completed",
}
}
// createCompactPeerList converts peers to compact binary format
func (h *AnnounceHandler) createCompactPeerList(peers []*PeerInfo) []byte {
var compactPeers []byte
for _, peer := range peers {
peerBytes := h.peerToCompactBytes(peer)
if peerBytes != nil {
compactPeers = append(compactPeers, peerBytes...)
}
}
return compactPeers
}
// createDictPeerList converts peers to dictionary format
func (h *AnnounceHandler) createDictPeerList(peers []*PeerInfo) []DictPeer {
var dictPeers []DictPeer
for _, peer := range peers {
dictPeers = append(dictPeers, DictPeer{
PeerID: peer.PeerID,
IP: peer.IP,
Port: peer.Port,
})
}
return dictPeers
}
// peerToCompactBytes converts a peer to compact 6-byte format
func (h *AnnounceHandler) peerToCompactBytes(peer *PeerInfo) []byte {
// Parse IP address
ip := parseIPv4(peer.IP)
if ip == nil {
return nil
}
// 6 bytes: 4 for IP, 2 for port (big-endian)
compactPeer := make([]byte, 6)
copy(compactPeer[0:4], ip)
compactPeer[4] = byte(peer.Port >> 8) // High byte
compactPeer[5] = byte(peer.Port & 0xFF) // Low byte
return compactPeer
}
// countPeers counts complete and incomplete peers
func (h *AnnounceHandler) countPeers(torrentPeers map[string]*PeerInfo) (complete, incomplete int) {
for _, peer := range torrentPeers {
if peer.Left == 0 {
complete++
} else {
incomplete++
}
}
return
}
// countResponsePeers counts peers in response (for logging)
func (h *AnnounceHandler) countResponsePeers(peers interface{}) int {
switch p := peers.(type) {
case []byte:
return len(p) / 6 // Compact format: 6 bytes per peer
case []DictPeer:
return len(p)
default:
return 0
}
}
// getClientIP extracts the real client IP from request headers
func (h *AnnounceHandler) getClientIP(r *http.Request) string {
// Check X-Forwarded-For header (proxy/load balancer)
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
// Take the first IP (client)
if ip := extractFirstIP(xff); ip != "" {
return ip
}
}
// Check X-Real-IP header
if xri := r.Header.Get("X-Real-IP"); xri != "" {
if parseIPv4(xri) != nil {
return xri
}
}
// Fall back to connection remote address
if host, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
return host
}
return r.RemoteAddr
}
// writeResponse writes a successful announce response
func (h *AnnounceHandler) writeResponse(w http.ResponseWriter, resp *AnnounceResponse) {
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Cache-Control", "no-cache")
data, err := bencode.Marshal(resp)
if err != nil {
log.Printf("Error encoding response: %v", err)
h.writeError(w, "Internal server error")
return
}
w.WriteHeader(http.StatusOK)
w.Write(data)
}
// writeError writes an error response in bencode format
func (h *AnnounceHandler) writeError(w http.ResponseWriter, message string) {
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("Cache-Control", "no-cache")
resp := map[string]interface{}{
"failure reason": message,
}
data, err := bencode.Marshal(resp)
if err != nil {
// Fallback to plain text if bencode fails
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("d14:failure reason" + strconv.Itoa(len(message)) + ":" + message + "e"))
return
}
w.WriteHeader(http.StatusBadRequest)
w.Write(data)
}
// Helper functions
// parseIntParam safely parses integer parameters with default fallback
func parseIntParam(query url.Values, param string, defaultValue int64) int64 {
valueStr := query.Get(param)
if valueStr == "" {
return defaultValue
}
value, err := strconv.ParseInt(valueStr, 10, 64)
if err != nil {
return defaultValue
}
return value
}
// parseIPv4 parses an IPv4 address string to 4-byte representation
func parseIPv4(ipStr string) []byte {
parts := strings.Split(ipStr, ".")
if len(parts) != 4 {
return nil
}
ip := make([]byte, 4)
for i, part := range parts {
val, err := strconv.Atoi(part)
if err != nil || val < 0 || val > 255 {
return nil
}
ip[i] = byte(val)
}
return ip
}
// extractFirstIP extracts the first valid IP from X-Forwarded-For header
func extractFirstIP(xff string) string {
parts := strings.Split(xff, ",")
for _, part := range parts {
ip := strings.TrimSpace(part)
if parseIPv4(ip) != nil {
return ip
}
}
return ""
}
// ScrapeHandler handles scrape requests (optional BitTorrent feature)
type ScrapeHandler struct {
tracker *Tracker
}
// NewScrapeHandler creates a new scrape handler
func NewScrapeHandler(tracker *Tracker) *ScrapeHandler {
return &ScrapeHandler{tracker: tracker}
}
// ServeHTTP implements http.Handler for the /scrape endpoint
func (h *ScrapeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
h.writeError(w, "Method not allowed")
return
}
query := r.URL.Query()
infoHashes := query["info_hash"]
if len(infoHashes) == 0 {
h.writeError(w, "Missing info_hash parameter")
return
}
h.tracker.mutex.RLock()
defer h.tracker.mutex.RUnlock()
// Build scrape response
files := make(map[string]interface{})
for _, infoHashRaw := range infoHashes {
infoHashBytes, err := url.QueryUnescape(infoHashRaw)
if err != nil || len(infoHashBytes) != 20 {
continue
}
infoHash := fmt.Sprintf("%x", infoHashBytes)
// Check if torrent exists
if torrentPeers, exists := h.tracker.peers[infoHash]; exists {
complete, incomplete := h.countPeers(torrentPeers)
downloaded := complete // Approximate downloads as seeders
files[infoHash] = map[string]interface{}{
"complete": complete,
"incomplete": incomplete,
"downloaded": downloaded,
}
} else {
// Unknown torrent
files[infoHash] = map[string]interface{}{
"complete": 0,
"incomplete": 0,
"downloaded": 0,
}
}
}
response := map[string]interface{}{
"files": files,
}
w.Header().Set("Content-Type", "text/plain")
data, err := bencode.Marshal(response)
if err != nil {
h.writeError(w, "Internal server error")
return
}
w.WriteHeader(http.StatusOK)
w.Write(data)
}
// countPeers counts complete and incomplete peers for scrape
func (h *ScrapeHandler) countPeers(torrentPeers map[string]*PeerInfo) (complete, incomplete int) {
for _, peer := range torrentPeers {
if peer.Left == 0 {
complete++
} else {
incomplete++
}
}
return
}
// writeError writes a scrape error response
func (h *ScrapeHandler) writeError(w http.ResponseWriter, message string) {
w.Header().Set("Content-Type", "text/plain")
resp := map[string]interface{}{
"failure reason": message,
}
data, err := bencode.Marshal(resp)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("d14:failure reason" + strconv.Itoa(len(message)) + ":" + message + "e"))
return
}
w.WriteHeader(http.StatusBadRequest)
w.Write(data)
}