Some checks are pending
CI Pipeline / Run Tests (push) Waiting to run
CI Pipeline / Lint Code (push) Waiting to run
CI Pipeline / Security Scan (push) Waiting to run
CI Pipeline / Build Docker Images (push) Blocked by required conditions
CI Pipeline / E2E Tests (push) Blocked by required conditions
638 lines
18 KiB
Go
638 lines
18 KiB
Go
package main
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"mime/multipart"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"os"
|
|
"path/filepath"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"git.sovbit.dev/enki/torrentGateway/internal/api"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/config"
|
|
"git.sovbit.dev/enki/torrentGateway/internal/storage"
|
|
"github.com/gorilla/mux"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// TestEnvironment represents a test environment
|
|
type TestEnvironment struct {
|
|
server *httptest.Server
|
|
storage *storage.Backend
|
|
config *config.Config
|
|
tempDir string
|
|
t *testing.T
|
|
testPubkey string
|
|
}
|
|
|
|
// NewTestEnvironment creates a new test environment
|
|
func NewTestEnvironment(t *testing.T) *TestEnvironment {
|
|
// Create temporary directory
|
|
tempDir, err := os.MkdirTemp("", "gateway_test_*")
|
|
require.NoError(t, err)
|
|
|
|
// Create test config
|
|
cfg := &config.Config{
|
|
Mode: "unified",
|
|
Gateway: config.GatewayConfig{
|
|
Enabled: true,
|
|
Port: 0, // Will be set by httptest
|
|
MaxUploadSize: "10GB",
|
|
},
|
|
Storage: config.StorageConfig{
|
|
MetadataDB: filepath.Join(tempDir, "test.db"),
|
|
BlobStorage: filepath.Join(tempDir, "blobs"),
|
|
ChunkStorage: filepath.Join(tempDir, "chunks"),
|
|
ChunkSize: 2 * 1024 * 1024, // 2MB
|
|
},
|
|
Admin: config.AdminConfig{
|
|
Enabled: true,
|
|
Pubkeys: []string{"test_admin_pubkey"},
|
|
},
|
|
}
|
|
|
|
// Create storage backend
|
|
storageBackend, err := storage.NewBackend(
|
|
cfg.Storage.MetadataDB,
|
|
cfg.Storage.ChunkStorage,
|
|
cfg.Storage.BlobStorage,
|
|
int64(cfg.Storage.ChunkSize),
|
|
cfg,
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
// Create test pubkey and session
|
|
testPubkey := "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
|
|
|
|
// Create router and register routes
|
|
router := mux.NewRouter()
|
|
api.RegisterRoutes(router.PathPrefix("/api").Subrouter(), cfg, storageBackend)
|
|
|
|
// Create test user and session in database
|
|
db := storageBackend.GetDB()
|
|
_, err = db.Exec(`
|
|
INSERT OR IGNORE INTO users (pubkey, storage_used, file_count, last_login, created_at)
|
|
VALUES (?, 0, 0, ?, ?)
|
|
`, testPubkey, time.Now(), time.Now())
|
|
require.NoError(t, err)
|
|
|
|
// Create a test session
|
|
sessionToken := "test_session_token_" + testPubkey
|
|
_, err = db.Exec(`
|
|
INSERT OR IGNORE INTO sessions (token, pubkey, created_at, expires_at)
|
|
VALUES (?, ?, ?, ?)
|
|
`, sessionToken, testPubkey, time.Now(), time.Now().Add(24*time.Hour))
|
|
require.NoError(t, err)
|
|
|
|
// Create test server
|
|
server := httptest.NewServer(router)
|
|
|
|
return &TestEnvironment{
|
|
server: server,
|
|
storage: storageBackend,
|
|
config: cfg,
|
|
tempDir: tempDir,
|
|
t: t,
|
|
testPubkey: testPubkey,
|
|
}
|
|
}
|
|
|
|
// Cleanup cleans up test resources
|
|
func (te *TestEnvironment) Cleanup() {
|
|
te.server.Close()
|
|
te.storage.Close()
|
|
os.RemoveAll(te.tempDir)
|
|
}
|
|
|
|
// TestFullUploadDownloadCycle tests the complete upload->store->download flow
|
|
func TestFullUploadDownloadCycle(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Test data
|
|
testContent := []byte("This is test file content for integration testing")
|
|
filename := "test.txt"
|
|
|
|
// Test small file (blob storage)
|
|
t.Run("SmallFileBlob", func(t *testing.T) {
|
|
// Upload file
|
|
uploadResp := uploadTestFile(t, env, testContent, filename)
|
|
assert.NotEmpty(t, uploadResp.FileHash)
|
|
assert.Equal(t, "blob", uploadResp.StorageType)
|
|
|
|
// Download file
|
|
downloadedContent := downloadTestFile(t, env, uploadResp.FileHash)
|
|
assert.Equal(t, testContent, downloadedContent)
|
|
})
|
|
|
|
// Test large file (torrent storage)
|
|
t.Run("LargeFileTorrent", func(t *testing.T) {
|
|
// Create large test content (>100MB)
|
|
largeContent := make([]byte, 110*1024*1024) // 110MB
|
|
for i := range largeContent {
|
|
largeContent[i] = byte(i % 256)
|
|
}
|
|
|
|
// Upload large file
|
|
uploadResp := uploadTestFile(t, env, largeContent, "large_test.bin")
|
|
assert.NotEmpty(t, uploadResp.FileHash)
|
|
assert.Equal(t, "torrent", uploadResp.StorageType)
|
|
|
|
// Download large file
|
|
downloadedContent := downloadTestFile(t, env, uploadResp.FileHash)
|
|
assert.Equal(t, largeContent, downloadedContent)
|
|
})
|
|
}
|
|
|
|
// TestAuthenticationFlow tests the complete authentication flow
|
|
func TestAuthenticationFlow(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
t.Run("ChallengeGeneration", func(t *testing.T) {
|
|
resp, err := http.Get(env.server.URL + "/api/auth/challenge")
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
var challengeResp map[string]string
|
|
err = json.NewDecoder(resp.Body).Decode(&challengeResp)
|
|
require.NoError(t, err)
|
|
|
|
assert.NotEmpty(t, challengeResp["challenge"])
|
|
})
|
|
|
|
t.Run("ProtectedEndpointWithoutAuth", func(t *testing.T) {
|
|
resp, err := http.Get(env.server.URL + "/api/users/me/files")
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
|
})
|
|
}
|
|
|
|
// TestAdminOperations tests admin functionality
|
|
func TestAdminOperations(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
t.Run("AdminStats", func(t *testing.T) {
|
|
// Note: This would need mock admin authentication
|
|
// For now, test that the endpoint exists
|
|
resp, err := http.Get(env.server.URL + "/api/admin/stats")
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
// Should return 401 without auth
|
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
|
|
})
|
|
}
|
|
|
|
// TestConcurrentUploads tests concurrent upload handling
|
|
func TestConcurrentUploads(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
concurrency := 10
|
|
var wg sync.WaitGroup
|
|
results := make(chan UploadResponse, concurrency)
|
|
errors := make(chan error, concurrency)
|
|
|
|
// Launch concurrent uploads
|
|
for i := 0; i < concurrency; i++ {
|
|
wg.Add(1)
|
|
go func(index int) {
|
|
defer wg.Done()
|
|
|
|
content := []byte(fmt.Sprintf("Test content for file %d", index))
|
|
filename := fmt.Sprintf("test_%d.txt", index)
|
|
|
|
resp := uploadTestFile(t, env, content, filename)
|
|
if resp.FileHash != "" {
|
|
results <- resp
|
|
} else {
|
|
errors <- fmt.Errorf("upload %d failed", index)
|
|
}
|
|
}(i)
|
|
}
|
|
|
|
// Wait for all uploads to complete
|
|
wg.Wait()
|
|
close(results)
|
|
close(errors)
|
|
|
|
// Check results
|
|
successCount := len(results)
|
|
errorCount := len(errors)
|
|
|
|
assert.Equal(t, concurrency, successCount+errorCount)
|
|
assert.Greater(t, successCount, errorCount, "More uploads should succeed than fail")
|
|
|
|
// Verify each uploaded file can be downloaded
|
|
for result := range results {
|
|
content := downloadTestFile(t, env, result.FileHash)
|
|
assert.NotEmpty(t, content)
|
|
}
|
|
}
|
|
|
|
// TestStorageTypeRouting tests that files are routed to correct storage based on size
|
|
func TestStorageTypeRouting(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
testCases := []struct {
|
|
name string
|
|
contentSize int
|
|
expectedType string
|
|
}{
|
|
{"SmallFile", 1024, "blob"}, // 1KB -> blob
|
|
{"MediumFile", 50*1024*1024, "blob"}, // 50MB -> blob
|
|
{"LargeFile", 150*1024*1024, "torrent"}, // 150MB -> torrent
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
content := make([]byte, tc.contentSize)
|
|
for i := range content {
|
|
content[i] = byte(i % 256)
|
|
}
|
|
|
|
resp := uploadTestFile(t, env, content, tc.name+".bin")
|
|
assert.Equal(t, tc.expectedType, resp.StorageType)
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestSystemStats tests the system statistics endpoint
|
|
func TestSystemStats(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Upload some test files first
|
|
uploadTestFile(t, env, []byte("blob content"), "blob.txt")
|
|
uploadTestFile(t, env, make([]byte, 150*1024*1024), "torrent.bin") // 150MB
|
|
|
|
// Get system stats
|
|
resp, err := http.Get(env.server.URL + "/api/stats")
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
var stats map[string]interface{}
|
|
err = json.NewDecoder(resp.Body).Decode(&stats)
|
|
require.NoError(t, err)
|
|
|
|
// Verify stats structure
|
|
assert.Contains(t, stats, "gateway")
|
|
assert.Contains(t, stats, "blossom")
|
|
assert.Contains(t, stats, "dht")
|
|
assert.Contains(t, stats, "system")
|
|
|
|
// Verify some values
|
|
gateway := stats["gateway"].(map[string]interface{})
|
|
assert.Equal(t, "healthy", gateway["status"])
|
|
assert.Equal(t, float64(9876), gateway["port"])
|
|
}
|
|
|
|
// TestLoadTesting performs basic load testing
|
|
func TestLoadTesting(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("Skipping load test in short mode")
|
|
}
|
|
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Test parameters
|
|
numUsers := 50
|
|
uploadsPerUser := 2
|
|
concurrency := make(chan struct{}, 10) // Limit to 10 concurrent operations
|
|
|
|
var wg sync.WaitGroup
|
|
successCount := int64(0)
|
|
errorCount := int64(0)
|
|
var mu sync.Mutex
|
|
|
|
startTime := time.Now()
|
|
|
|
// Simulate multiple users uploading files
|
|
for user := 0; user < numUsers; user++ {
|
|
for upload := 0; upload < uploadsPerUser; upload++ {
|
|
wg.Add(1)
|
|
go func(userID, uploadID int) {
|
|
defer wg.Done()
|
|
concurrency <- struct{}{} // Acquire slot
|
|
defer func() { <-concurrency }() // Release slot
|
|
|
|
content := []byte(fmt.Sprintf("User %d upload %d content", userID, uploadID))
|
|
filename := fmt.Sprintf("user_%d_file_%d.txt", userID, uploadID)
|
|
|
|
resp := uploadTestFile(t, env, content, filename)
|
|
|
|
mu.Lock()
|
|
if resp.FileHash != "" {
|
|
successCount++
|
|
} else {
|
|
errorCount++
|
|
}
|
|
mu.Unlock()
|
|
}(user, upload)
|
|
}
|
|
}
|
|
|
|
wg.Wait()
|
|
duration := time.Since(startTime)
|
|
|
|
t.Logf("Load test completed in %v", duration)
|
|
t.Logf("Successful uploads: %d", successCount)
|
|
t.Logf("Failed uploads: %d", errorCount)
|
|
t.Logf("Throughput: %.2f uploads/second", float64(successCount)/duration.Seconds())
|
|
|
|
// Assertions
|
|
assert.Greater(t, successCount, int64(0), "Should have some successful uploads")
|
|
assert.Less(t, errorCount, successCount, "Error rate should be less than success rate")
|
|
}
|
|
|
|
// Helper functions
|
|
|
|
type UploadResponse struct {
|
|
Success bool `json:"success"`
|
|
FileHash string `json:"file_hash"`
|
|
StorageType string `json:"storage_type"`
|
|
Message string `json:"message"`
|
|
}
|
|
|
|
func uploadTestFile(t *testing.T, env *TestEnvironment, content []byte, filename string) UploadResponse {
|
|
// Create multipart form
|
|
var buf bytes.Buffer
|
|
writer := multipart.NewWriter(&buf)
|
|
|
|
part, err := writer.CreateFormFile("file", filename)
|
|
require.NoError(t, err)
|
|
|
|
_, err = part.Write(content)
|
|
require.NoError(t, err)
|
|
|
|
err = writer.Close()
|
|
require.NoError(t, err)
|
|
|
|
// Create request
|
|
req, err := http.NewRequest("POST", env.server.URL+"/api/upload", &buf)
|
|
require.NoError(t, err)
|
|
|
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
|
|
// Add authentication header
|
|
sessionToken := "test_session_token_" + env.testPubkey
|
|
req.Header.Set("Authorization", "Bearer "+sessionToken)
|
|
|
|
// Send request
|
|
client := &http.Client{Timeout: 30 * time.Second}
|
|
resp, err := client.Do(req)
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
// Parse response
|
|
var uploadResp UploadResponse
|
|
err = json.NewDecoder(resp.Body).Decode(&uploadResp)
|
|
if err != nil {
|
|
// If JSON parsing fails, read the body as text for debugging
|
|
resp.Body.Close()
|
|
req, _ = http.NewRequest("POST", env.server.URL+"/api/upload", bytes.NewReader(buf.Bytes()))
|
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
resp, _ = client.Do(req)
|
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
|
t.Logf("Upload response body: %s", string(bodyBytes))
|
|
require.NoError(t, err)
|
|
}
|
|
|
|
if !uploadResp.Success {
|
|
t.Logf("Upload failed: %s", uploadResp.Message)
|
|
}
|
|
|
|
return uploadResp
|
|
}
|
|
|
|
func downloadTestFile(t *testing.T, env *TestEnvironment, fileHash string) []byte {
|
|
resp, err := http.Get(env.server.URL + "/api/download/" + fileHash)
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
content, err := io.ReadAll(resp.Body)
|
|
require.NoError(t, err)
|
|
|
|
return content
|
|
}
|
|
|
|
// TestDatabaseIntegrity tests database consistency
|
|
func TestDatabaseIntegrity(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Upload test files
|
|
blobContent := []byte("blob test content")
|
|
torrentContent := make([]byte, 150*1024*1024) // 150MB
|
|
|
|
blobResp := uploadTestFile(t, env, blobContent, "blob.txt")
|
|
torrentResp := uploadTestFile(t, env, torrentContent, "torrent.bin")
|
|
|
|
require.True(t, blobResp.Success)
|
|
require.True(t, torrentResp.Success)
|
|
|
|
// Test database queries directly
|
|
db := env.storage.GetDB()
|
|
|
|
// Check files table
|
|
var fileCount int
|
|
err := db.QueryRow("SELECT COUNT(*) FROM files").Scan(&fileCount)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, 2, fileCount)
|
|
|
|
// Check blobs table
|
|
var blobCount int
|
|
err = db.QueryRow("SELECT COUNT(*) FROM blobs").Scan(&blobCount)
|
|
require.NoError(t, err)
|
|
assert.Equal(t, 1, blobCount) // Only blob file should be in blobs table
|
|
|
|
// Check chunks table
|
|
var chunkCount int
|
|
err = db.QueryRow("SELECT COUNT(*) FROM chunks").Scan(&chunkCount)
|
|
require.NoError(t, err)
|
|
assert.Greater(t, chunkCount, 0) // Torrent file should have chunks
|
|
|
|
// Verify file metadata consistency
|
|
blobMeta, err := env.storage.GetFileMetadata(blobResp.FileHash)
|
|
require.NoError(t, err)
|
|
require.NotNil(t, blobMeta)
|
|
assert.Equal(t, "blob", blobMeta.StorageType)
|
|
assert.Equal(t, int64(len(blobContent)), blobMeta.Size)
|
|
|
|
torrentMeta, err := env.storage.GetFileMetadata(torrentResp.FileHash)
|
|
require.NoError(t, err)
|
|
require.NotNil(t, torrentMeta)
|
|
assert.Equal(t, "torrent", torrentMeta.StorageType)
|
|
assert.Equal(t, int64(len(torrentContent)), torrentMeta.Size)
|
|
}
|
|
|
|
// TestCacheIntegration tests caching functionality
|
|
func TestCacheIntegration(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Upload a file
|
|
content := []byte("cache test content")
|
|
uploadResp := uploadTestFile(t, env, content, "cache_test.txt")
|
|
require.True(t, uploadResp.Success)
|
|
|
|
// Download twice to test caching
|
|
start1 := time.Now()
|
|
content1 := downloadTestFile(t, env, uploadResp.FileHash)
|
|
duration1 := time.Since(start1)
|
|
|
|
start2 := time.Now()
|
|
content2 := downloadTestFile(t, env, uploadResp.FileHash)
|
|
duration2 := time.Since(start2)
|
|
|
|
// Verify content is identical
|
|
assert.Equal(t, content1, content2)
|
|
assert.Equal(t, content, content1)
|
|
|
|
// Second request should be faster (cached)
|
|
// Note: In test environment this might not be significant
|
|
t.Logf("First download: %v, Second download: %v", duration1, duration2)
|
|
}
|
|
|
|
// TestStreamingEndpoint tests HLS streaming functionality
|
|
func TestStreamingEndpoint(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Upload a video file
|
|
videoContent := make([]byte, 10*1024*1024) // 10MB simulated video
|
|
uploadResp := uploadTestFile(t, env, videoContent, "test_video.mp4")
|
|
require.True(t, uploadResp.Success)
|
|
|
|
// Test streaming endpoint
|
|
resp, err := http.Get(env.server.URL + "/api/stream/" + uploadResp.FileHash)
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
assert.Equal(t, "application/octet-stream", resp.Header.Get("Content-Type"))
|
|
}
|
|
|
|
// TestErrorHandling tests various error conditions
|
|
func TestErrorHandling(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
testCases := []struct {
|
|
name string
|
|
endpoint string
|
|
method string
|
|
expectedStatus int
|
|
}{
|
|
{"InvalidFileHash", "/api/download/invalid_hash", "GET", http.StatusBadRequest},
|
|
{"NonexistentFile", "/api/download/0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "GET", http.StatusNotFound},
|
|
{"InvalidMethod", "/api/upload", "GET", http.StatusMethodNotAllowed},
|
|
{"NonexistentEndpoint", "/api/nonexistent", "GET", http.StatusNotFound},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
req, err := http.NewRequest(tc.method, env.server.URL+tc.endpoint, nil)
|
|
require.NoError(t, err)
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
|
require.NoError(t, err)
|
|
defer resp.Body.Close()
|
|
|
|
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestPerformanceBenchmarks runs performance benchmarks
|
|
func TestPerformanceBenchmarks(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("Skipping performance benchmarks in short mode")
|
|
}
|
|
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
// Benchmark small file uploads
|
|
t.Run("BenchmarkSmallUploads", func(t *testing.T) {
|
|
content := make([]byte, 1024) // 1KB
|
|
iterations := 100
|
|
|
|
start := time.Now()
|
|
for i := 0; i < iterations; i++ {
|
|
filename := fmt.Sprintf("bench_small_%d.bin", i)
|
|
resp := uploadTestFile(t, env, content, filename)
|
|
assert.True(t, resp.Success)
|
|
}
|
|
duration := time.Since(start)
|
|
|
|
throughput := float64(iterations) / duration.Seconds()
|
|
t.Logf("Small file upload throughput: %.2f files/second", throughput)
|
|
assert.Greater(t, throughput, 10.0, "Should achieve >10 small uploads/second")
|
|
})
|
|
|
|
// Benchmark downloads
|
|
t.Run("BenchmarkDownloads", func(t *testing.T) {
|
|
// Upload a test file first
|
|
content := make([]byte, 1024*1024) // 1MB
|
|
uploadResp := uploadTestFile(t, env, content, "download_bench.bin")
|
|
require.True(t, uploadResp.Success)
|
|
|
|
iterations := 50
|
|
start := time.Now()
|
|
for i := 0; i < iterations; i++ {
|
|
downloadedContent := downloadTestFile(t, env, uploadResp.FileHash)
|
|
assert.Equal(t, len(content), len(downloadedContent))
|
|
}
|
|
duration := time.Since(start)
|
|
|
|
throughput := float64(iterations) / duration.Seconds()
|
|
t.Logf("Download throughput: %.2f downloads/second", throughput)
|
|
assert.Greater(t, throughput, 20.0, "Should achieve >20 downloads/second")
|
|
})
|
|
}
|
|
|
|
// TestDatabaseMigrations tests database schema migrations
|
|
func TestDatabaseMigrations(t *testing.T) {
|
|
env := NewTestEnvironment(t)
|
|
defer env.Cleanup()
|
|
|
|
db := env.storage.GetDB()
|
|
|
|
// Test that all required tables exist
|
|
tables := []string{"files", "chunks", "blobs", "users", "sessions", "admin_actions", "banned_users", "content_reports"}
|
|
|
|
for _, table := range tables {
|
|
var count int
|
|
err := db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", table)).Scan(&count)
|
|
assert.NoError(t, err, "Table %s should exist and be queryable", table)
|
|
}
|
|
|
|
// Test that all required indexes exist
|
|
var indexCount int
|
|
err := db.QueryRow(`
|
|
SELECT COUNT(*) FROM sqlite_master
|
|
WHERE type = 'index' AND name LIKE 'idx_%'
|
|
`).Scan(&indexCount)
|
|
require.NoError(t, err)
|
|
assert.Greater(t, indexCount, 10, "Should have multiple indexes for performance")
|
|
} |