enki b3204ea07a
Some checks are pending
CI Pipeline / Run Tests (push) Waiting to run
CI Pipeline / Lint Code (push) Waiting to run
CI Pipeline / Security Scan (push) Waiting to run
CI Pipeline / Build Docker Images (push) Blocked by required conditions
CI Pipeline / E2E Tests (push) Blocked by required conditions
first commit
2025-08-18 00:40:15 -07:00

367 lines
9.1 KiB
Go

package memory
import (
"log"
"runtime"
"sync"
"time"
)
// BufferPool manages reusable byte buffers to reduce garbage collection
type BufferPool struct {
pools map[int]*sync.Pool // Different pools for different buffer sizes
sizes []int // Available buffer sizes
stats BufferStats // Pool statistics
mutex sync.RWMutex // Protects stats
}
// BufferStats tracks buffer pool usage statistics
type BufferStats struct {
TotalGets int64
TotalPuts int64
TotalNews int64 // Buffers created (not reused)
ActiveBuffers int64
PoolHits int64 // Successful reuse
PoolMisses int64 // Had to create new buffer
}
// NewBufferPool creates a new buffer pool with predefined sizes
func NewBufferPool() *BufferPool {
// Common buffer sizes: 4KB, 32KB, 256KB, 2MB, 16MB
sizes := []int{4096, 32768, 262144, 2097152, 16777216}
pools := make(map[int]*sync.Pool)
for _, size := range sizes {
size := size // Capture for closure
pools[size] = &sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
}
}
return &BufferPool{
pools: pools,
sizes: sizes,
}
}
// Get retrieves a buffer of at least the requested size
func (bp *BufferPool) Get(size int) []byte {
bp.mutex.Lock()
bp.stats.TotalGets++
bp.stats.ActiveBuffers++
bp.mutex.Unlock()
// Find the smallest pool that can accommodate the request
poolSize := bp.findPoolSize(size)
if poolSize == 0 {
// No suitable pool, create new buffer
bp.mutex.Lock()
bp.stats.PoolMisses++
bp.stats.TotalNews++
bp.mutex.Unlock()
return make([]byte, size)
}
// Get from pool
pool := bp.pools[poolSize]
buffer := pool.Get().([]byte)
bp.mutex.Lock()
bp.stats.PoolHits++
bp.mutex.Unlock()
// Return slice of requested size
return buffer[:size]
}
// Put returns a buffer to the pool for reuse
func (bp *BufferPool) Put(buffer []byte) {
if buffer == nil {
return
}
bp.mutex.Lock()
bp.stats.TotalPuts++
bp.stats.ActiveBuffers--
bp.mutex.Unlock()
// Find the original pool size
originalCap := cap(buffer)
poolSize := bp.findExactPoolSize(originalCap)
if poolSize == 0 {
// Buffer didn't come from a pool, just let GC handle it
return
}
// Reset buffer and return to pool
buffer = buffer[:cap(buffer)]
for i := range buffer {
buffer[i] = 0
}
bp.pools[poolSize].Put(buffer)
}
// findPoolSize finds the smallest pool that can accommodate the request
func (bp *BufferPool) findPoolSize(requestedSize int) int {
for _, size := range bp.sizes {
if size >= requestedSize {
return size
}
}
return 0 // No suitable pool found
}
// findExactPoolSize finds the exact pool size for a buffer
func (bp *BufferPool) findExactPoolSize(capacity int) int {
for _, size := range bp.sizes {
if size == capacity {
return size
}
}
return 0
}
// GetStats returns current buffer pool statistics
func (bp *BufferPool) GetStats() BufferStats {
bp.mutex.RLock()
defer bp.mutex.RUnlock()
return bp.stats
}
// ChunkBufferManager manages buffers specifically for chunk operations
type ChunkBufferManager struct {
chunkPool *sync.Pool
chunkSize int64
stats ChunkBufferStats
mutex sync.RWMutex
}
// ChunkBufferStats tracks chunk buffer usage
type ChunkBufferStats struct {
ChunkGets int64
ChunkPuts int64
ChunkNews int64
ActiveChunks int64
ChunkPoolHits int64
ChunkPoolMiss int64
}
// NewChunkBufferManager creates a manager for chunk-sized buffers
func NewChunkBufferManager(chunkSize int64) *ChunkBufferManager {
return &ChunkBufferManager{
chunkSize: chunkSize,
chunkPool: &sync.Pool{
New: func() interface{} {
return make([]byte, chunkSize)
},
},
}
}
// GetChunkBuffer gets a buffer sized for chunks
func (cbm *ChunkBufferManager) GetChunkBuffer() []byte {
cbm.mutex.Lock()
cbm.stats.ChunkGets++
cbm.stats.ActiveChunks++
cbm.stats.ChunkPoolHits++
cbm.mutex.Unlock()
return cbm.chunkPool.Get().([]byte)
}
// PutChunkBuffer returns a chunk buffer to the pool
func (cbm *ChunkBufferManager) PutChunkBuffer(buffer []byte) {
if buffer == nil || int64(cap(buffer)) != cbm.chunkSize {
return
}
cbm.mutex.Lock()
cbm.stats.ChunkPuts++
cbm.stats.ActiveChunks--
cbm.mutex.Unlock()
// Clear buffer
for i := range buffer {
buffer[i] = 0
}
cbm.chunkPool.Put(buffer)
}
// GetChunkStats returns chunk buffer statistics
func (cbm *ChunkBufferManager) GetChunkStats() ChunkBufferStats {
cbm.mutex.RLock()
defer cbm.mutex.RUnlock()
return cbm.stats
}
// MemoryOptimizer provides overall memory optimization features
type MemoryOptimizer struct {
bufferPool *BufferPool
chunkManager *ChunkBufferManager
// GC optimization
gcTarget float64
gcInterval time.Duration
// Memory monitoring
memStats runtime.MemStats
lastGCTime time.Time
config *MemoryConfig
}
// MemoryConfig configures memory optimization behavior
type MemoryConfig struct {
GCTargetPercent int // Target GC percentage
GCInterval time.Duration // How often to trigger GC
ChunkSize int64 // Chunk size for buffer management
// Memory thresholds
MemoryWarnThreshold int64 // Warn when memory usage exceeds this
MemoryLimitThreshold int64 // Take action when memory exceeds this
}
// NewMemoryOptimizer creates a new memory optimizer
func NewMemoryOptimizer(config *MemoryConfig) *MemoryOptimizer {
if config == nil {
config = &MemoryConfig{
GCTargetPercent: 50, // More aggressive GC
GCInterval: 30 * time.Second,
ChunkSize: 2 * 1024 * 1024, // 2MB
MemoryWarnThreshold: 1024 * 1024 * 1024, // 1GB
MemoryLimitThreshold: 2048 * 1024 * 1024, // 2GB
}
}
optimizer := &MemoryOptimizer{
bufferPool: NewBufferPool(),
chunkManager: NewChunkBufferManager(config.ChunkSize),
gcTarget: float64(config.GCTargetPercent),
gcInterval: config.GCInterval,
config: config,
}
// Set GC target
runtime.SetGCPercent(config.GCTargetPercent)
// Start memory monitoring
go optimizer.memoryMonitorRoutine()
return optimizer
}
// GetBuffer gets a buffer from the pool
func (mo *MemoryOptimizer) GetBuffer(size int) []byte {
return mo.bufferPool.Get(size)
}
// PutBuffer returns a buffer to the pool
func (mo *MemoryOptimizer) PutBuffer(buffer []byte) {
mo.bufferPool.Put(buffer)
}
// GetChunkBuffer gets a chunk-sized buffer
func (mo *MemoryOptimizer) GetChunkBuffer() []byte {
return mo.chunkManager.GetChunkBuffer()
}
// PutChunkBuffer returns a chunk buffer
func (mo *MemoryOptimizer) PutChunkBuffer(buffer []byte) {
mo.chunkManager.PutChunkBuffer(buffer)
}
// memoryMonitorRoutine monitors memory usage and triggers optimizations
func (mo *MemoryOptimizer) memoryMonitorRoutine() {
ticker := time.NewTicker(mo.gcInterval)
defer ticker.Stop()
for range ticker.C {
runtime.ReadMemStats(&mo.memStats)
currentMemory := int64(mo.memStats.Alloc)
// Check memory thresholds
if currentMemory > mo.config.MemoryLimitThreshold {
// Emergency GC and buffer pool cleanup
mo.emergencyCleanup()
} else if currentMemory > mo.config.MemoryWarnThreshold {
// Gentle GC
runtime.GC()
}
// Log memory stats periodically
if time.Since(mo.lastGCTime) > 5*time.Minute {
mo.logMemoryStats()
mo.lastGCTime = time.Now()
}
}
}
// emergencyCleanup performs aggressive memory cleanup
func (mo *MemoryOptimizer) emergencyCleanup() {
// Force GC
runtime.GC()
runtime.GC() // Double GC for thorough cleanup
// Clear buffer pools (they'll be recreated as needed)
mo.bufferPool = NewBufferPool()
mo.chunkManager = NewChunkBufferManager(mo.config.ChunkSize)
runtime.ReadMemStats(&mo.memStats)
}
// logMemoryStats logs current memory statistics
func (mo *MemoryOptimizer) logMemoryStats() {
bufferStats := mo.bufferPool.GetStats()
chunkStats := mo.chunkManager.GetChunkStats()
log.Printf("Memory Stats - Alloc: %d MB, Sys: %d MB, NumGC: %d, Buffer Pool Hits: %d/%d, Chunk Pool Hits: %d/%d",
mo.memStats.Alloc/1024/1024,
mo.memStats.Sys/1024/1024,
mo.memStats.NumGC,
bufferStats.PoolHits,
bufferStats.TotalGets,
chunkStats.ChunkPoolHits,
chunkStats.ChunkGets,
)
}
// GetMemoryStats returns detailed memory statistics
func (mo *MemoryOptimizer) GetMemoryStats() map[string]interface{} {
runtime.ReadMemStats(&mo.memStats)
bufferStats := mo.bufferPool.GetStats()
chunkStats := mo.chunkManager.GetChunkStats()
return map[string]interface{}{
"allocated_bytes": mo.memStats.Alloc,
"total_allocated": mo.memStats.TotalAlloc,
"system_memory": mo.memStats.Sys,
"gc_runs": mo.memStats.NumGC,
"gc_pause_ns": mo.memStats.PauseTotalNs,
"heap_objects": mo.memStats.HeapObjects,
"stack_bytes": mo.memStats.StackSys,
"buffer_pool_stats": bufferStats,
"chunk_pool_stats": chunkStats,
"goroutine_count": runtime.NumGoroutine(),
}
}
// OptimizeForHighLoad adjusts memory settings for high load scenarios
func (mo *MemoryOptimizer) OptimizeForHighLoad() {
// More aggressive GC during high load
runtime.SetGCPercent(25)
// Force immediate cleanup
runtime.GC()
}
// OptimizeForLowLoad adjusts memory settings for low load scenarios
func (mo *MemoryOptimizer) OptimizeForLowLoad() {
// Less aggressive GC during low load
runtime.SetGCPercent(100)
}