1172 lines
33 KiB
Go
1172 lines
33 KiB
Go
package tracker
|
|
|
|
import (
|
|
"crypto/rand"
|
|
"database/sql"
|
|
"encoding/hex"
|
|
"fmt"
|
|
"log"
|
|
"net"
|
|
"net/http"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/anacrolix/torrent/bencode"
|
|
"torrentGateway/internal/config"
|
|
)
|
|
|
|
// Database interface for tracker operations
|
|
type Database interface {
|
|
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
QueryRow(query string, args ...interface{}) *sql.Row
|
|
}
|
|
|
|
// Tracker represents a BitTorrent tracker instance with database backing
|
|
type Tracker struct {
|
|
peers map[string]map[string]*PeerInfo // infoHash -> peerID -> peer (memory cache)
|
|
mutex sync.RWMutex
|
|
config *config.TrackerConfig
|
|
gateway Gateway // Interface to gateway for WebSeed functionality
|
|
coordinator P2PCoordinator // Interface to P2P coordinator
|
|
db Database // Database interface
|
|
startTime time.Time
|
|
}
|
|
|
|
// P2PCoordinator interface for tracker integration
|
|
type P2PCoordinator interface {
|
|
GetPeers(infoHash string) []CoordinatorPeerInfo
|
|
OnPeerConnect(infoHash string, peer CoordinatorPeerInfo)
|
|
AnnounceToExternalServices(infoHash string, port int) error
|
|
}
|
|
|
|
// CoordinatorPeerInfo represents peer info for coordination
|
|
type CoordinatorPeerInfo struct {
|
|
IP string
|
|
Port int
|
|
PeerID string
|
|
Source string
|
|
Quality int
|
|
LastSeen time.Time
|
|
}
|
|
|
|
// Gateway interface for accessing gateway functionality
|
|
type Gateway interface {
|
|
GetPublicURL() string
|
|
IsValidInfoHash(infoHash string) bool
|
|
GetWebSeedURL(infoHash string) string
|
|
}
|
|
|
|
|
|
// PeerInfo represents a peer in the tracker with enhanced state tracking
|
|
type PeerInfo struct {
|
|
PeerID string `json:"peer_id"`
|
|
InfoHash string `json:"info_hash"`
|
|
IP string `json:"ip"`
|
|
IPv6 string `json:"ipv6,omitempty"` // IPv6 address if available
|
|
Port int `json:"port"`
|
|
Uploaded int64 `json:"uploaded"`
|
|
Downloaded int64 `json:"downloaded"`
|
|
Left int64 `json:"left"`
|
|
LastSeen time.Time `json:"last_seen"`
|
|
FirstSeen time.Time `json:"first_seen"`
|
|
Event string `json:"event"`
|
|
Key string `json:"key"`
|
|
Compact bool `json:"compact"`
|
|
UserAgent string `json:"user_agent"`
|
|
IsSeeder bool `json:"is_seeder"` // Cached seeder status
|
|
IsWebSeed bool `json:"is_webseed"` // True if this is a WebSeed
|
|
Priority int `json:"priority"` // Peer priority (higher = better)
|
|
}
|
|
|
|
// TorrentStats represents statistics for a torrent
|
|
type TorrentStats struct {
|
|
InfoHash string `json:"info_hash"`
|
|
Seeders int `json:"seeders"`
|
|
Leechers int `json:"leechers"`
|
|
Completed int `json:"completed"`
|
|
LastUpdate time.Time `json:"last_update"`
|
|
}
|
|
|
|
// CompactPeerIPv6 represents a peer in compact IPv6 format (18 bytes: 16 for IP, 2 for port)
|
|
type CompactPeerIPv6 struct {
|
|
IP [16]byte
|
|
Port uint16
|
|
}
|
|
|
|
// AnnounceRequest represents an announce request from a peer
|
|
type AnnounceRequest struct {
|
|
InfoHash string `json:"info_hash"`
|
|
PeerID string `json:"peer_id"`
|
|
Port int `json:"port"`
|
|
Uploaded int64 `json:"uploaded"`
|
|
Downloaded int64 `json:"downloaded"`
|
|
Left int64 `json:"left"`
|
|
Event string `json:"event"`
|
|
IP string `json:"ip"`
|
|
NumWant int `json:"numwant"`
|
|
Key string `json:"key"`
|
|
Compact bool `json:"compact"`
|
|
}
|
|
|
|
// AnnounceResponse represents the tracker's response to an announce
|
|
type AnnounceResponse struct {
|
|
FailureReason string `bencode:"failure reason,omitempty"`
|
|
WarningMessage string `bencode:"warning message,omitempty"`
|
|
Interval int `bencode:"interval"`
|
|
MinInterval int `bencode:"min interval,omitempty"`
|
|
TrackerID string `bencode:"tracker id,omitempty"`
|
|
Complete int `bencode:"complete"`
|
|
Incomplete int `bencode:"incomplete"`
|
|
Peers interface{} `bencode:"peers"`
|
|
}
|
|
|
|
// CompactPeer represents a peer in compact format (6 bytes: 4 for IP, 2 for port)
|
|
type CompactPeer struct {
|
|
IP [4]byte
|
|
Port uint16
|
|
}
|
|
|
|
// DictPeer represents a peer in dictionary format
|
|
type DictPeer struct {
|
|
PeerID string `bencode:"peer id"`
|
|
IP string `bencode:"ip"`
|
|
Port int `bencode:"port"`
|
|
}
|
|
|
|
// NewTracker creates a new tracker instance with database backing
|
|
func NewTracker(config *config.TrackerConfig, gateway Gateway, db Database) *Tracker {
|
|
t := &Tracker{
|
|
peers: make(map[string]map[string]*PeerInfo),
|
|
config: config,
|
|
gateway: gateway,
|
|
db: db,
|
|
startTime: time.Now(),
|
|
}
|
|
|
|
// Initialize database tables
|
|
if err := t.initializeDatabase(); err != nil {
|
|
log.Printf("Warning: Failed to initialize tracker database: %v", err)
|
|
}
|
|
|
|
// Start cleanup routine
|
|
go t.cleanupRoutine()
|
|
|
|
return t
|
|
}
|
|
|
|
// SetCoordinator sets the P2P coordinator for integration
|
|
func (t *Tracker) SetCoordinator(coordinator P2PCoordinator) {
|
|
t.coordinator = coordinator
|
|
}
|
|
|
|
// detectAbuse checks for suspicious announce patterns
|
|
func (t *Tracker) detectAbuse(req *AnnounceRequest, clientIP string) bool {
|
|
// Check for too frequent announces from same IP
|
|
if t.isAnnounceSpam(clientIP, req.InfoHash) {
|
|
log.Printf("Abuse detected: Too frequent announces from IP %s", clientIP)
|
|
return true
|
|
}
|
|
|
|
// Check for invalid peer_id patterns
|
|
if t.isInvalidPeerID(req.PeerID) {
|
|
log.Printf("Abuse detected: Invalid peer_id pattern from IP %s", clientIP)
|
|
return true
|
|
}
|
|
|
|
// Check for suspicious port numbers
|
|
if t.isSuspiciousPort(req.Port) {
|
|
log.Printf("Abuse detected: Suspicious port %d from IP %s", req.Port, clientIP)
|
|
return true
|
|
}
|
|
|
|
// Check for known bad actors (would be a database in production)
|
|
if t.isKnownBadActor(clientIP) {
|
|
log.Printf("Abuse detected: Known bad actor IP %s", clientIP)
|
|
return true
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// Abuse detection helper methods
|
|
func (t *Tracker) isAnnounceSpam(clientIP, infoHash string) bool {
|
|
// In production, this would check a time-windowed database
|
|
// For now, use simple in-memory tracking
|
|
_ = clientIP + ":" + infoHash // Would be used for tracking
|
|
|
|
// Simple spam detection: more than 10 announces per minute
|
|
// This would be more sophisticated in production
|
|
return false // Placeholder
|
|
}
|
|
|
|
func (t *Tracker) isInvalidPeerID(peerID string) bool {
|
|
// Check for invalid peer_id patterns
|
|
if len(peerID) != 20 {
|
|
return true
|
|
}
|
|
|
|
// Check for all zeros or all same character (suspicious)
|
|
allSame := true
|
|
firstChar := peerID[0]
|
|
for i := 1; i < len(peerID); i++ {
|
|
if peerID[i] != firstChar {
|
|
allSame = false
|
|
break
|
|
}
|
|
}
|
|
|
|
return allSame
|
|
}
|
|
|
|
func (t *Tracker) isSuspiciousPort(port int) bool {
|
|
// Flag potentially suspicious ports
|
|
suspiciousPorts := map[int]bool{
|
|
22: true, // SSH
|
|
23: true, // Telnet
|
|
25: true, // SMTP
|
|
53: true, // DNS
|
|
80: true, // HTTP (web servers shouldn't be P2P clients)
|
|
135: true, // Windows RPC
|
|
139: true, // NetBIOS
|
|
443: true, // HTTPS (web servers shouldn't be P2P clients)
|
|
445: true, // SMB
|
|
993: true, // IMAPS
|
|
995: true, // POP3S
|
|
1433: true, // SQL Server
|
|
3389: true, // RDP
|
|
5432: true, // PostgreSQL
|
|
}
|
|
|
|
// Ports < 1024 are privileged and suspicious for P2P
|
|
// Ports > 65535 are invalid
|
|
return suspiciousPorts[port] || port < 1024 || port > 65535
|
|
}
|
|
|
|
func (t *Tracker) isKnownBadActor(clientIP string) bool {
|
|
// In production, this would check against:
|
|
// - Blocklists from organizations like Bluetack
|
|
// - Local abuse database
|
|
// - Cloud provider IP ranges (if configured to block)
|
|
|
|
// For now, just block obvious local/private ranges if configured
|
|
privateRanges := []string{
|
|
"192.168.", "10.", "172.16.", "172.17.", "172.18.", "172.19.",
|
|
"172.20.", "172.21.", "172.22.", "172.23.", "172.24.", "172.25.",
|
|
"172.26.", "172.27.", "172.28.", "172.29.", "172.30.", "172.31.",
|
|
}
|
|
|
|
// Only block private IPs if we're in a production environment
|
|
// (you wouldn't want to block private IPs in development)
|
|
for _, prefix := range privateRanges {
|
|
if strings.HasPrefix(clientIP, prefix) {
|
|
// In development, allow private IPs
|
|
return false
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// applyClientCompatibility adjusts response for specific BitTorrent clients
|
|
func (t *Tracker) applyClientCompatibility(userAgent string, response *AnnounceResponse) {
|
|
client := t.detectClient(userAgent)
|
|
|
|
switch client {
|
|
case "qBittorrent":
|
|
// qBittorrent works well with default settings
|
|
// No special adjustments needed
|
|
|
|
case "Transmission":
|
|
// Transmission prefers shorter intervals
|
|
if response.Interval > 1800 {
|
|
response.Interval = 1800 // Max 30 minutes
|
|
}
|
|
|
|
case "WebTorrent":
|
|
// WebTorrent needs specific adjustments for web compatibility
|
|
// Ensure reasonable intervals for web clients
|
|
if response.Interval > 300 {
|
|
response.Interval = 300 // Max 5 minutes for web clients
|
|
}
|
|
if response.MinInterval > 60 {
|
|
response.MinInterval = 60 // Min 1 minute for web clients
|
|
}
|
|
|
|
case "Deluge":
|
|
// Deluge can handle longer intervals
|
|
// No special adjustments needed
|
|
|
|
case "uTorrent":
|
|
// uTorrent specific compatibility
|
|
// Some versions have issues with very short intervals
|
|
if response.MinInterval < 60 {
|
|
response.MinInterval = 60
|
|
}
|
|
}
|
|
}
|
|
|
|
// detectClient identifies BitTorrent client from User-Agent
|
|
func (t *Tracker) detectClient(userAgent string) string {
|
|
if userAgent == "" {
|
|
return "Unknown"
|
|
}
|
|
|
|
userAgent = strings.ToLower(userAgent)
|
|
|
|
if strings.Contains(userAgent, "qbittorrent") {
|
|
return "qBittorrent"
|
|
}
|
|
if strings.Contains(userAgent, "transmission") {
|
|
return "Transmission"
|
|
}
|
|
if strings.Contains(userAgent, "webtorrent") {
|
|
return "WebTorrent"
|
|
}
|
|
if strings.Contains(userAgent, "deluge") {
|
|
return "Deluge"
|
|
}
|
|
if strings.Contains(userAgent, "utorrent") || strings.Contains(userAgent, "µtorrent") {
|
|
return "uTorrent"
|
|
}
|
|
if strings.Contains(userAgent, "libtorrent") {
|
|
return "libtorrent"
|
|
}
|
|
if strings.Contains(userAgent, "azureus") || strings.Contains(userAgent, "vuze") {
|
|
return "Azureus"
|
|
}
|
|
if strings.Contains(userAgent, "bitcomet") {
|
|
return "BitComet"
|
|
}
|
|
|
|
return "Unknown"
|
|
}
|
|
|
|
// getClientIP extracts the real client IP address
|
|
func getClientIP(r *http.Request) string {
|
|
// Check X-Forwarded-For header first (proxy/load balancer)
|
|
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
|
|
// Take the first IP in the chain
|
|
if ips := strings.Split(xff, ","); len(ips) > 0 {
|
|
return strings.TrimSpace(ips[0])
|
|
}
|
|
}
|
|
|
|
// Check X-Real-IP header (nginx proxy)
|
|
if xri := r.Header.Get("X-Real-IP"); xri != "" {
|
|
return strings.TrimSpace(xri)
|
|
}
|
|
|
|
// Fall back to RemoteAddr
|
|
ip, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
if err != nil {
|
|
return r.RemoteAddr // Return as-is if can't parse
|
|
}
|
|
|
|
return ip
|
|
}
|
|
|
|
// HandleAnnounce processes announce requests from peers
|
|
func (t *Tracker) HandleAnnounce(w http.ResponseWriter, r *http.Request) {
|
|
// Get client IP for abuse detection
|
|
clientIP := getClientIP(r)
|
|
|
|
// Parse announce request
|
|
req, err := t.parseAnnounceRequest(r)
|
|
if err != nil {
|
|
t.writeErrorResponse(w, fmt.Sprintf("Invalid announce request: %v", err))
|
|
return
|
|
}
|
|
|
|
// Detect and prevent abuse
|
|
if t.detectAbuse(req, clientIP) {
|
|
t.writeErrorResponse(w, "Request rejected due to abuse detection")
|
|
return
|
|
}
|
|
|
|
// Validate info hash with gateway
|
|
if !t.gateway.IsValidInfoHash(req.InfoHash) {
|
|
t.writeErrorResponse(w, "Invalid info_hash")
|
|
return
|
|
}
|
|
|
|
// Process the announce with client compatibility
|
|
resp := t.processAnnounce(req, r)
|
|
t.applyClientCompatibility(r.Header.Get("User-Agent"), resp)
|
|
|
|
// Write response
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
data, err := bencode.Marshal(resp)
|
|
if err != nil {
|
|
t.writeErrorResponse(w, "Internal server error")
|
|
return
|
|
}
|
|
|
|
w.Write(data)
|
|
}
|
|
|
|
// parseAnnounceRequest extracts announce parameters from HTTP request
|
|
func (t *Tracker) parseAnnounceRequest(r *http.Request) (*AnnounceRequest, error) {
|
|
query := r.URL.Query()
|
|
|
|
// Required parameters
|
|
infoHashHex := query.Get("info_hash")
|
|
if infoHashHex == "" {
|
|
return nil, fmt.Errorf("missing info_hash")
|
|
}
|
|
|
|
// URL decode the info_hash
|
|
infoHash, err := url.QueryUnescape(infoHashHex)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("invalid info_hash encoding")
|
|
}
|
|
infoHashStr := hex.EncodeToString([]byte(infoHash))
|
|
|
|
peerID := query.Get("peer_id")
|
|
if peerID == "" {
|
|
return nil, fmt.Errorf("missing peer_id")
|
|
}
|
|
|
|
portStr := query.Get("port")
|
|
if portStr == "" {
|
|
return nil, fmt.Errorf("missing port")
|
|
}
|
|
port, err := strconv.Atoi(portStr)
|
|
if err != nil || port <= 0 || port > 65535 {
|
|
return nil, fmt.Errorf("invalid port")
|
|
}
|
|
|
|
// Parse numeric parameters
|
|
uploaded, _ := strconv.ParseInt(query.Get("uploaded"), 10, 64)
|
|
downloaded, _ := strconv.ParseInt(query.Get("downloaded"), 10, 64)
|
|
left, _ := strconv.ParseInt(query.Get("left"), 10, 64)
|
|
|
|
// Optional parameters
|
|
event := query.Get("event")
|
|
numWantStr := query.Get("numwant")
|
|
numWant := t.config.DefaultNumWant
|
|
if numWantStr != "" {
|
|
if nw, err := strconv.Atoi(numWantStr); err == nil && nw > 0 {
|
|
numWant = nw
|
|
if numWant > t.config.MaxNumWant {
|
|
numWant = t.config.MaxNumWant
|
|
}
|
|
}
|
|
}
|
|
|
|
compact := query.Get("compact") == "1"
|
|
key := query.Get("key")
|
|
|
|
// Get client IP
|
|
ip := t.getClientIP(r)
|
|
|
|
return &AnnounceRequest{
|
|
InfoHash: infoHashStr,
|
|
PeerID: peerID,
|
|
Port: port,
|
|
Uploaded: uploaded,
|
|
Downloaded: downloaded,
|
|
Left: left,
|
|
Event: event,
|
|
IP: ip,
|
|
NumWant: numWant,
|
|
Key: key,
|
|
Compact: compact,
|
|
}, nil
|
|
}
|
|
|
|
// processAnnounce handles the announce logic and returns a response
|
|
func (t *Tracker) processAnnounce(req *AnnounceRequest, r *http.Request) *AnnounceResponse {
|
|
t.mutex.Lock()
|
|
defer t.mutex.Unlock()
|
|
|
|
// Initialize torrent peer map if not exists
|
|
if t.peers[req.InfoHash] == nil {
|
|
t.peers[req.InfoHash] = make(map[string]*PeerInfo)
|
|
}
|
|
|
|
torrentPeers := t.peers[req.InfoHash]
|
|
|
|
// Handle peer events
|
|
switch req.Event {
|
|
case "stopped":
|
|
delete(torrentPeers, req.PeerID)
|
|
// Remove from database
|
|
if err := t.removePeerFromDatabase(req.PeerID, req.InfoHash); err != nil {
|
|
log.Printf("Failed to remove peer from database: %v", err)
|
|
}
|
|
default:
|
|
// Update or add peer
|
|
now := time.Now()
|
|
peer := &PeerInfo{
|
|
PeerID: req.PeerID,
|
|
InfoHash: req.InfoHash,
|
|
IP: req.IP,
|
|
Port: req.Port,
|
|
Uploaded: req.Uploaded,
|
|
Downloaded: req.Downloaded,
|
|
Left: req.Left,
|
|
LastSeen: now,
|
|
FirstSeen: now, // Will be preserved by database if peer already exists
|
|
Event: req.Event,
|
|
Key: req.Key,
|
|
Compact: req.Compact,
|
|
UserAgent: r.Header.Get("User-Agent"),
|
|
IsSeeder: req.Left == 0,
|
|
IsWebSeed: false,
|
|
Priority: 50, // Default priority
|
|
}
|
|
torrentPeers[req.PeerID] = peer
|
|
|
|
// Store in database
|
|
if err := t.storePeerInDatabase(peer); err != nil {
|
|
log.Printf("Failed to store peer in database: %v", err)
|
|
}
|
|
|
|
// Notify coordinator of new peer connection
|
|
if t.coordinator != nil {
|
|
coordPeer := CoordinatorPeerInfo{
|
|
IP: peer.IP,
|
|
Port: peer.Port,
|
|
PeerID: peer.PeerID,
|
|
Source: "tracker",
|
|
Quality: 70, // Tracker peers have good quality
|
|
LastSeen: peer.LastSeen,
|
|
}
|
|
t.coordinator.OnPeerConnect(req.InfoHash, coordPeer)
|
|
|
|
// Announce to external services (DHT, etc.) for new torrents
|
|
if req.Event == "started" {
|
|
go func() {
|
|
if err := t.coordinator.AnnounceToExternalServices(req.InfoHash, req.Port); err != nil {
|
|
log.Printf("Failed to announce to external services: %v", err)
|
|
}
|
|
}()
|
|
}
|
|
}
|
|
}
|
|
|
|
// Count seeders and leechers
|
|
complete, incomplete := t.countPeers(torrentPeers)
|
|
|
|
// Get peer list for response
|
|
peers := t.getPeerList(req, torrentPeers)
|
|
|
|
return &AnnounceResponse{
|
|
Interval: t.config.AnnounceInterval,
|
|
MinInterval: t.config.MinInterval,
|
|
Complete: complete,
|
|
Incomplete: incomplete,
|
|
Peers: peers,
|
|
}
|
|
}
|
|
|
|
// getPeerList returns a list of peers with WebSeed injection and priority handling
|
|
func (t *Tracker) getPeerList(req *AnnounceRequest, torrentPeers map[string]*PeerInfo) interface{} {
|
|
var selectedPeers []*PeerInfo
|
|
|
|
// Get peers from database (includes both tracker and coordinator peers)
|
|
dbPeers, err := t.GetPeersForTorrent(req.InfoHash)
|
|
if err != nil {
|
|
log.Printf("Failed to get peers from database: %v", err)
|
|
// Fall back to memory peers
|
|
for peerID, peer := range torrentPeers {
|
|
if peerID != req.PeerID {
|
|
dbPeers = append(dbPeers, peer)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Always inject WebSeed as highest priority peer if available
|
|
webSeedURL := t.gateway.GetWebSeedURL(req.InfoHash)
|
|
if webSeedURL != "" {
|
|
if webSeedPeer := t.createWebSeedPeer(req.InfoHash); webSeedPeer != nil {
|
|
// Store WebSeed peer in database for consistency
|
|
if err := t.storePeerInDatabase(webSeedPeer); err != nil {
|
|
log.Printf("Failed to store WebSeed peer: %v", err)
|
|
}
|
|
// Add to front of list (highest priority)
|
|
selectedPeers = append([]*PeerInfo{webSeedPeer}, selectedPeers...)
|
|
}
|
|
}
|
|
|
|
// Filter out the requesting peer and add others by priority
|
|
for _, peer := range dbPeers {
|
|
if peer.PeerID != req.PeerID && len(selectedPeers) < req.NumWant {
|
|
// Skip if we already added this peer (avoid duplicates)
|
|
duplicate := false
|
|
for _, existing := range selectedPeers {
|
|
if existing.PeerID == peer.PeerID {
|
|
duplicate = true
|
|
break
|
|
}
|
|
}
|
|
if !duplicate {
|
|
selectedPeers = append(selectedPeers, peer)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Use coordinator for additional peers if available and we need more
|
|
if t.coordinator != nil && len(selectedPeers) < req.NumWant {
|
|
coordinatorPeers := t.coordinator.GetPeers(req.InfoHash)
|
|
|
|
for _, coordPeer := range coordinatorPeers {
|
|
if coordPeer.PeerID == req.PeerID || len(selectedPeers) >= req.NumWant {
|
|
continue
|
|
}
|
|
|
|
// Check for duplicates
|
|
duplicate := false
|
|
for _, existing := range selectedPeers {
|
|
if existing.IP == coordPeer.IP && existing.Port == coordPeer.Port {
|
|
duplicate = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !duplicate {
|
|
trackerPeer := &PeerInfo{
|
|
PeerID: coordPeer.PeerID,
|
|
InfoHash: req.InfoHash,
|
|
IP: coordPeer.IP,
|
|
Port: coordPeer.Port,
|
|
Left: 0, // Assume seeder from coordinator
|
|
LastSeen: coordPeer.LastSeen,
|
|
IsSeeder: true,
|
|
IsWebSeed: false,
|
|
Priority: coordPeer.Quality,
|
|
}
|
|
selectedPeers = append(selectedPeers, trackerPeer)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Return in requested format
|
|
if req.Compact {
|
|
return t.createCompactPeerList(selectedPeers)
|
|
}
|
|
return t.createDictPeerList(selectedPeers)
|
|
}
|
|
|
|
// createWebSeedPeer creates a WebSeed peer for the gateway
|
|
func (t *Tracker) createWebSeedPeer(infoHash string) *PeerInfo {
|
|
webSeedURL := t.gateway.GetWebSeedURL(infoHash)
|
|
if webSeedURL == "" {
|
|
return nil
|
|
}
|
|
|
|
// Parse gateway URL to get IP and port
|
|
u, err := url.Parse(t.gateway.GetPublicURL())
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
|
|
host := u.Hostname()
|
|
portStr := u.Port()
|
|
if portStr == "" {
|
|
portStr = "80"
|
|
if u.Scheme == "https" {
|
|
portStr = "443"
|
|
}
|
|
}
|
|
|
|
port, err := strconv.Atoi(portStr)
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
|
|
return &PeerInfo{
|
|
PeerID: generateWebSeedPeerID(),
|
|
InfoHash: infoHash,
|
|
IP: host,
|
|
Port: port,
|
|
Uploaded: 0,
|
|
Downloaded: 0,
|
|
Left: 0, // WebSeed is always a seeder
|
|
FirstSeen: time.Now(),
|
|
LastSeen: time.Now(),
|
|
Event: "started",
|
|
IsSeeder: true,
|
|
IsWebSeed: true,
|
|
Priority: 100, // Highest priority for WebSeed
|
|
UserAgent: "TorrentGateway-WebSeed/1.0",
|
|
}
|
|
}
|
|
|
|
// createCompactPeerList creates compact peer list supporting both IPv4 and IPv6
|
|
func (t *Tracker) createCompactPeerList(peers []*PeerInfo) interface{} {
|
|
// Create separate lists for IPv4 and IPv6
|
|
var ipv4Peers []byte
|
|
var ipv6Peers []byte
|
|
|
|
for _, peer := range peers {
|
|
// Try IPv4 first
|
|
ip := net.ParseIP(peer.IP)
|
|
if ip != nil {
|
|
if ipv4 := ip.To4(); ipv4 != nil {
|
|
// 6 bytes: 4 for IPv4, 2 for port
|
|
peerBytes := make([]byte, 6)
|
|
copy(peerBytes[0:4], ipv4)
|
|
peerBytes[4] = byte(peer.Port >> 8)
|
|
peerBytes[5] = byte(peer.Port & 0xFF)
|
|
ipv4Peers = append(ipv4Peers, peerBytes...)
|
|
}
|
|
}
|
|
|
|
// Try IPv6 if available
|
|
if peer.IPv6 != "" {
|
|
if ipv6 := net.ParseIP(peer.IPv6); ipv6 != nil && ipv6.To4() == nil {
|
|
// 18 bytes: 16 for IPv6, 2 for port
|
|
peerBytes := make([]byte, 18)
|
|
copy(peerBytes[0:16], ipv6)
|
|
peerBytes[16] = byte(peer.Port >> 8)
|
|
peerBytes[17] = byte(peer.Port & 0xFF)
|
|
ipv6Peers = append(ipv6Peers, peerBytes...)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Return format depends on what peers we have
|
|
if len(ipv6Peers) > 0 && len(ipv4Peers) > 0 {
|
|
// Return both IPv4 and IPv6 peers
|
|
return map[string]interface{}{
|
|
"peers": ipv4Peers,
|
|
"peers6": ipv6Peers,
|
|
}
|
|
} else if len(ipv6Peers) > 0 {
|
|
// Return only IPv6 peers
|
|
return map[string]interface{}{
|
|
"peers6": ipv6Peers,
|
|
}
|
|
} else {
|
|
// Return only IPv4 peers (traditional format)
|
|
return ipv4Peers
|
|
}
|
|
}
|
|
|
|
// createDictPeerList creates dictionary peer list
|
|
func (t *Tracker) createDictPeerList(peers []*PeerInfo) []DictPeer {
|
|
var dictPeers []DictPeer
|
|
|
|
for _, peer := range peers {
|
|
dictPeers = append(dictPeers, DictPeer{
|
|
PeerID: peer.PeerID,
|
|
IP: peer.IP,
|
|
Port: peer.Port,
|
|
})
|
|
}
|
|
|
|
return dictPeers
|
|
}
|
|
|
|
// countPeers counts seeders and leechers
|
|
func (t *Tracker) countPeers(torrentPeers map[string]*PeerInfo) (complete, incomplete int) {
|
|
for _, peer := range torrentPeers {
|
|
if peer.Left == 0 {
|
|
complete++
|
|
} else {
|
|
incomplete++
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
// getClientIP extracts the client IP from the request
|
|
func (t *Tracker) getClientIP(r *http.Request) string {
|
|
// Check X-Forwarded-For header first
|
|
xff := r.Header.Get("X-Forwarded-For")
|
|
if xff != "" {
|
|
// Take the first IP in the chain
|
|
parts := strings.Split(xff, ",")
|
|
ip := strings.TrimSpace(parts[0])
|
|
if net.ParseIP(ip) != nil {
|
|
return ip
|
|
}
|
|
}
|
|
|
|
// Check X-Real-IP header
|
|
xri := r.Header.Get("X-Real-IP")
|
|
if xri != "" && net.ParseIP(xri) != nil {
|
|
return xri
|
|
}
|
|
|
|
// Fall back to RemoteAddr
|
|
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
if err != nil {
|
|
return r.RemoteAddr
|
|
}
|
|
return host
|
|
}
|
|
|
|
// writeErrorResponse writes an error response in bencode format
|
|
func (t *Tracker) writeErrorResponse(w http.ResponseWriter, message string) {
|
|
resp := map[string]interface{}{
|
|
"failure reason": message,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
data, _ := bencode.Marshal(resp)
|
|
w.Write(data)
|
|
}
|
|
|
|
// cleanupRoutine periodically removes expired peers
|
|
func (t *Tracker) cleanupRoutine() {
|
|
ticker := time.NewTicker(t.config.CleanupInterval)
|
|
defer ticker.Stop()
|
|
|
|
for range ticker.C {
|
|
t.cleanupExpiredPeers()
|
|
}
|
|
}
|
|
|
|
// cleanupExpiredPeers removes peers that haven't announced recently (45 minutes)
|
|
func (t *Tracker) cleanupExpiredPeers() {
|
|
t.mutex.Lock()
|
|
defer t.mutex.Unlock()
|
|
|
|
// Clean up memory cache
|
|
now := time.Now()
|
|
expiry := now.Add(-45 * time.Minute) // 45-minute expiration
|
|
|
|
for infoHash, torrentPeers := range t.peers {
|
|
for peerID, peer := range torrentPeers {
|
|
if peer.LastSeen.Before(expiry) {
|
|
delete(torrentPeers, peerID)
|
|
}
|
|
}
|
|
|
|
// Remove empty torrent entries
|
|
if len(torrentPeers) == 0 {
|
|
delete(t.peers, infoHash)
|
|
}
|
|
}
|
|
|
|
// Clean up database - remove peers older than 45 minutes
|
|
dbCleanupQuery := `DELETE FROM tracker_peers WHERE last_seen < datetime('now', '-45 minutes')`
|
|
result, err := t.db.Exec(dbCleanupQuery)
|
|
if err != nil {
|
|
log.Printf("Failed to cleanup expired peers from database: %v", err)
|
|
} else {
|
|
if rowsAffected, _ := result.RowsAffected(); rowsAffected > 0 {
|
|
log.Printf("Cleaned up %d expired peers from database", rowsAffected)
|
|
}
|
|
}
|
|
|
|
// Clean up old torrent stats (older than 24 hours)
|
|
statsCleanupQuery := `DELETE FROM torrent_stats WHERE last_update < datetime('now', '-24 hours')`
|
|
if _, err := t.db.Exec(statsCleanupQuery); err != nil {
|
|
log.Printf("Failed to cleanup old torrent stats: %v", err)
|
|
}
|
|
}
|
|
|
|
// ============ DATABASE OPERATIONS ============
|
|
|
|
// initializeDatabase creates the necessary database tables
|
|
func (t *Tracker) initializeDatabase() error {
|
|
tables := []string{
|
|
`CREATE TABLE IF NOT EXISTS tracker_peers (
|
|
peer_id TEXT NOT NULL,
|
|
info_hash TEXT NOT NULL,
|
|
ip TEXT NOT NULL,
|
|
ipv6 TEXT,
|
|
port INTEGER NOT NULL,
|
|
uploaded INTEGER DEFAULT 0,
|
|
downloaded INTEGER DEFAULT 0,
|
|
left_bytes INTEGER DEFAULT 0,
|
|
first_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
event TEXT,
|
|
key_value TEXT,
|
|
user_agent TEXT,
|
|
is_seeder BOOLEAN DEFAULT FALSE,
|
|
is_webseed BOOLEAN DEFAULT FALSE,
|
|
priority INTEGER DEFAULT 50,
|
|
PRIMARY KEY (peer_id, info_hash)
|
|
)`,
|
|
`CREATE TABLE IF NOT EXISTS torrent_stats (
|
|
info_hash TEXT PRIMARY KEY,
|
|
seeders INTEGER DEFAULT 0,
|
|
leechers INTEGER DEFAULT 0,
|
|
completed INTEGER DEFAULT 0,
|
|
last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
)`,
|
|
}
|
|
|
|
// Create indexes for performance
|
|
indexes := []string{
|
|
`CREATE INDEX IF NOT EXISTS idx_tracker_peers_info_hash ON tracker_peers(info_hash)`,
|
|
`CREATE INDEX IF NOT EXISTS idx_tracker_peers_last_seen ON tracker_peers(last_seen)`,
|
|
`CREATE INDEX IF NOT EXISTS idx_tracker_peers_is_seeder ON tracker_peers(is_seeder)`,
|
|
`CREATE INDEX IF NOT EXISTS idx_tracker_peers_priority ON tracker_peers(priority DESC)`,
|
|
}
|
|
|
|
for _, query := range tables {
|
|
if _, err := t.db.Exec(query); err != nil {
|
|
return fmt.Errorf("failed to create table: %w", err)
|
|
}
|
|
}
|
|
|
|
for _, query := range indexes {
|
|
if _, err := t.db.Exec(query); err != nil {
|
|
return fmt.Errorf("failed to create index: %w", err)
|
|
}
|
|
}
|
|
|
|
log.Printf("Tracker database tables initialized successfully")
|
|
return nil
|
|
}
|
|
|
|
// storePeerInDatabase stores or updates a peer in the database
|
|
func (t *Tracker) storePeerInDatabase(peer *PeerInfo) error {
|
|
query := `
|
|
INSERT OR REPLACE INTO tracker_peers (
|
|
peer_id, info_hash, ip, ipv6, port, uploaded, downloaded, left_bytes,
|
|
first_seen, last_seen, event, key_value, user_agent, is_seeder, is_webseed, priority
|
|
) VALUES (
|
|
?, ?, ?, ?, ?, ?, ?, ?,
|
|
COALESCE((SELECT first_seen FROM tracker_peers WHERE peer_id = ? AND info_hash = ?), ?),
|
|
?, ?, ?, ?, ?, ?, ?
|
|
)`
|
|
|
|
_, err := t.db.Exec(query,
|
|
peer.PeerID, peer.InfoHash, peer.IP, peer.IPv6, peer.Port,
|
|
peer.Uploaded, peer.Downloaded, peer.Left,
|
|
peer.PeerID, peer.InfoHash, peer.FirstSeen, // For COALESCE
|
|
peer.LastSeen, peer.Event, peer.Key, peer.UserAgent,
|
|
peer.IsSeeder, peer.IsWebSeed, peer.Priority)
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("failed to store peer: %w", err)
|
|
}
|
|
|
|
// Update torrent stats
|
|
go t.updateTorrentStats(peer.InfoHash)
|
|
|
|
return nil
|
|
}
|
|
|
|
// removePeerFromDatabase removes a peer from the database
|
|
func (t *Tracker) removePeerFromDatabase(peerID, infoHash string) error {
|
|
query := `DELETE FROM tracker_peers WHERE peer_id = ? AND info_hash = ?`
|
|
_, err := t.db.Exec(query, peerID, infoHash)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to remove peer: %w", err)
|
|
}
|
|
|
|
// Update torrent stats
|
|
go t.updateTorrentStats(infoHash)
|
|
|
|
return nil
|
|
}
|
|
|
|
// updateTorrentStats updates the cached statistics for a torrent
|
|
func (t *Tracker) updateTorrentStats(infoHash string) {
|
|
query := `
|
|
SELECT
|
|
COUNT(CASE WHEN is_seeder = 1 THEN 1 END) as seeders,
|
|
COUNT(CASE WHEN is_seeder = 0 THEN 1 END) as leechers,
|
|
COUNT(CASE WHEN left_bytes = 0 THEN 1 END) as completed
|
|
FROM tracker_peers
|
|
WHERE info_hash = ? AND last_seen > datetime('now', '-45 minutes')`
|
|
|
|
row := t.db.QueryRow(query, infoHash)
|
|
var seeders, leechers, completed int
|
|
if err := row.Scan(&seeders, &leechers, &completed); err != nil {
|
|
log.Printf("Failed to update torrent stats for %s: %v", infoHash, err)
|
|
return
|
|
}
|
|
|
|
updateQuery := `
|
|
INSERT OR REPLACE INTO torrent_stats (info_hash, seeders, leechers, completed, last_update)
|
|
VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)`
|
|
|
|
if _, err := t.db.Exec(updateQuery, infoHash, seeders, leechers, completed); err != nil {
|
|
log.Printf("Failed to store torrent stats for %s: %v", infoHash, err)
|
|
}
|
|
}
|
|
|
|
// ============ PUBLIC API METHODS ============
|
|
|
|
// GetPeersForTorrent returns the list of peers for a specific torrent
|
|
func (t *Tracker) GetPeersForTorrent(infoHash string) ([]*PeerInfo, error) {
|
|
query := `
|
|
SELECT peer_id, info_hash, ip, COALESCE(ipv6, '') as ipv6, port, uploaded, downloaded,
|
|
left_bytes, first_seen, last_seen, COALESCE(event, '') as event,
|
|
COALESCE(key_value, '') as key_value, COALESCE(user_agent, '') as user_agent,
|
|
is_seeder, is_webseed, priority
|
|
FROM tracker_peers
|
|
WHERE info_hash = ? AND last_seen > datetime('now', '-45 minutes')
|
|
ORDER BY priority DESC, is_webseed DESC, is_seeder DESC, last_seen DESC`
|
|
|
|
rows, err := t.db.Query(query, infoHash)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query peers: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var peers []*PeerInfo
|
|
for rows.Next() {
|
|
peer := &PeerInfo{}
|
|
err := rows.Scan(
|
|
&peer.PeerID, &peer.InfoHash, &peer.IP, &peer.IPv6, &peer.Port,
|
|
&peer.Uploaded, &peer.Downloaded, &peer.Left,
|
|
&peer.FirstSeen, &peer.LastSeen, &peer.Event, &peer.Key, &peer.UserAgent,
|
|
&peer.IsSeeder, &peer.IsWebSeed, &peer.Priority,
|
|
)
|
|
if err != nil {
|
|
log.Printf("Failed to scan peer: %v", err)
|
|
continue
|
|
}
|
|
peers = append(peers, peer)
|
|
}
|
|
|
|
if err = rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating peers: %w", err)
|
|
}
|
|
|
|
return peers, nil
|
|
}
|
|
|
|
// GetTorrentStats returns statistics for a specific torrent
|
|
func (t *Tracker) GetTorrentStats(infoHash string) (*TorrentStats, error) {
|
|
// Try to get cached stats first
|
|
query := `SELECT seeders, leechers, completed, last_update FROM torrent_stats WHERE info_hash = ?`
|
|
row := t.db.QueryRow(query, infoHash)
|
|
|
|
stats := &TorrentStats{InfoHash: infoHash}
|
|
err := row.Scan(&stats.Seeders, &stats.Leechers, &stats.Completed, &stats.LastUpdate)
|
|
|
|
// If no cached stats or they're old, recalculate
|
|
if err != nil || time.Since(stats.LastUpdate) > 5*time.Minute {
|
|
// Calculate real-time stats
|
|
realTimeQuery := `
|
|
SELECT
|
|
COUNT(CASE WHEN is_seeder = 1 OR left_bytes = 0 THEN 1 END) as seeders,
|
|
COUNT(CASE WHEN is_seeder = 0 AND left_bytes > 0 THEN 1 END) as leechers,
|
|
COUNT(CASE WHEN left_bytes = 0 THEN 1 END) as completed
|
|
FROM tracker_peers
|
|
WHERE info_hash = ? AND last_seen > datetime('now', '-45 minutes')`
|
|
|
|
realTimeRow := t.db.QueryRow(realTimeQuery, infoHash)
|
|
if err := realTimeRow.Scan(&stats.Seeders, &stats.Leechers, &stats.Completed); err != nil {
|
|
return nil, fmt.Errorf("failed to calculate torrent stats: %w", err)
|
|
}
|
|
|
|
stats.LastUpdate = time.Now()
|
|
// Update cache asynchronously
|
|
go t.updateTorrentStats(infoHash)
|
|
}
|
|
|
|
return stats, nil
|
|
}
|
|
|
|
// GetAllTorrents returns a list of all active torrents with their stats
|
|
func (t *Tracker) GetAllTorrents() (map[string]*TorrentStats, error) {
|
|
query := `
|
|
SELECT DISTINCT p.info_hash,
|
|
COALESCE(s.seeders, 0) as seeders,
|
|
COALESCE(s.leechers, 0) as leechers,
|
|
COALESCE(s.completed, 0) as completed,
|
|
COALESCE(s.last_update, p.last_seen) as last_update
|
|
FROM tracker_peers p
|
|
LEFT JOIN torrent_stats s ON p.info_hash = s.info_hash
|
|
WHERE p.last_seen > datetime('now', '-45 minutes')
|
|
ORDER BY last_update DESC`
|
|
|
|
rows, err := t.db.Query(query)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to query torrents: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
torrents := make(map[string]*TorrentStats)
|
|
for rows.Next() {
|
|
stats := &TorrentStats{}
|
|
err := rows.Scan(&stats.InfoHash, &stats.Seeders, &stats.Leechers,
|
|
&stats.Completed, &stats.LastUpdate)
|
|
if err != nil {
|
|
log.Printf("Failed to scan torrent stats: %v", err)
|
|
continue
|
|
}
|
|
torrents[stats.InfoHash] = stats
|
|
}
|
|
|
|
if err = rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating torrents: %w", err)
|
|
}
|
|
|
|
return torrents, nil
|
|
}
|
|
|
|
// generateWebSeedPeerID generates a consistent peer ID for the gateway WebSeed
|
|
func generateWebSeedPeerID() string {
|
|
// Use a predictable prefix for WebSeed peers
|
|
prefix := "-GT0001-" // Gateway Tracker v0.0.1
|
|
|
|
// Generate random suffix
|
|
suffix := make([]byte, 6)
|
|
rand.Read(suffix)
|
|
|
|
return prefix + hex.EncodeToString(suffix)
|
|
}
|
|
|
|
// GetStats returns comprehensive tracker statistics
|
|
func (t *Tracker) GetStats() map[string]interface{} {
|
|
// Get stats from database for accurate counts
|
|
statsQuery := `
|
|
SELECT
|
|
COUNT(DISTINCT info_hash) as total_torrents,
|
|
COUNT(*) as total_peers,
|
|
COUNT(CASE WHEN is_seeder = 1 OR left_bytes = 0 THEN 1 END) as total_seeders,
|
|
COUNT(CASE WHEN is_seeder = 0 AND left_bytes > 0 THEN 1 END) as total_leechers,
|
|
COUNT(CASE WHEN is_webseed = 1 THEN 1 END) as webseeds
|
|
FROM tracker_peers
|
|
WHERE last_seen > datetime('now', '-45 minutes')`
|
|
|
|
row := t.db.QueryRow(statsQuery)
|
|
var totalTorrents, totalPeers, totalSeeders, totalLeechers, webseeds int
|
|
err := row.Scan(&totalTorrents, &totalPeers, &totalSeeders, &totalLeechers, &webseeds)
|
|
|
|
stats := map[string]interface{}{
|
|
"uptime_seconds": int(time.Since(t.startTime).Seconds()),
|
|
"torrents": totalTorrents,
|
|
"peers": totalPeers,
|
|
"seeders": totalSeeders,
|
|
"leechers": totalLeechers,
|
|
"webseeds": webseeds,
|
|
}
|
|
|
|
if err != nil {
|
|
log.Printf("Failed to get database stats, using memory stats: %v", err)
|
|
// Fallback to memory stats
|
|
t.mutex.RLock()
|
|
memoryTorrents := len(t.peers)
|
|
memoryPeers := 0
|
|
memorySeeders := 0
|
|
memoryLeechers := 0
|
|
|
|
for _, torrentPeers := range t.peers {
|
|
memoryPeers += len(torrentPeers)
|
|
for _, peer := range torrentPeers {
|
|
if peer.Left == 0 {
|
|
memorySeeders++
|
|
} else {
|
|
memoryLeechers++
|
|
}
|
|
}
|
|
}
|
|
t.mutex.RUnlock()
|
|
|
|
stats["torrents"] = memoryTorrents
|
|
stats["peers"] = memoryPeers
|
|
stats["seeders"] = memorySeeders
|
|
stats["leechers"] = memoryLeechers
|
|
stats["webseeds"] = 0
|
|
}
|
|
|
|
return stats
|
|
} |