enki b3204ea07a
Some checks are pending
CI Pipeline / Run Tests (push) Waiting to run
CI Pipeline / Lint Code (push) Waiting to run
CI Pipeline / Security Scan (push) Waiting to run
CI Pipeline / Build Docker Images (push) Blocked by required conditions
CI Pipeline / E2E Tests (push) Blocked by required conditions
first commit
2025-08-18 00:40:15 -07:00

419 lines
8.8 KiB
Go

package cache
import (
"container/list"
"context"
"encoding/json"
"fmt"
"log"
"sync"
"time"
"github.com/go-redis/redis/v8"
)
// CacheInterface defines the cache operations
type CacheInterface interface {
Get(key string) ([]byte, bool)
Set(key string, value []byte, ttl time.Duration) error
Delete(key string) error
Clear() error
Stats() CacheStats
}
// CacheStats provides cache statistics
type CacheStats struct {
Hits int64 `json:"hits"`
Misses int64 `json:"misses"`
Size int `json:"size"`
MaxSize int `json:"max_size"`
HitRate float64 `json:"hit_rate"`
MemoryUsage int64 `json:"memory_usage_bytes"`
}
// LRUCache implements an in-memory LRU cache
type LRUCache struct {
maxSize int
items map[string]*list.Element
evictList *list.List
hits int64
misses int64
memoryUsed int64
mutex sync.RWMutex
}
// cacheItem represents an item in the cache
type cacheItem struct {
key string
value []byte
expiry time.Time
size int64
accessed time.Time
}
// NewLRUCache creates a new LRU cache
func NewLRUCache(maxSize int) *LRUCache {
return &LRUCache{
maxSize: maxSize,
items: make(map[string]*list.Element),
evictList: list.New(),
}
}
// Get retrieves an item from the cache
func (c *LRUCache) Get(key string) ([]byte, bool) {
c.mutex.Lock()
defer c.mutex.Unlock()
if element, exists := c.items[key]; exists {
item := element.Value.(*cacheItem)
// Check expiry
if !item.expiry.IsZero() && time.Now().After(item.expiry) {
c.removeElement(element)
c.misses++
return nil, false
}
// Move to front (most recently used)
c.evictList.MoveToFront(element)
item.accessed = time.Now()
c.hits++
return item.value, true
}
c.misses++
return nil, false
}
// Set adds an item to the cache
func (c *LRUCache) Set(key string, value []byte, ttl time.Duration) error {
c.mutex.Lock()
defer c.mutex.Unlock()
// Check if item already exists
if element, exists := c.items[key]; exists {
// Update existing item
item := element.Value.(*cacheItem)
c.memoryUsed -= item.size
item.value = value
item.size = int64(len(value))
item.accessed = time.Now()
if ttl > 0 {
item.expiry = time.Now().Add(ttl)
} else {
item.expiry = time.Time{}
}
c.memoryUsed += item.size
c.evictList.MoveToFront(element)
return nil
}
// Add new item
now := time.Now()
item := &cacheItem{
key: key,
value: value,
size: int64(len(value)),
accessed: now,
}
if ttl > 0 {
item.expiry = now.Add(ttl)
}
element := c.evictList.PushFront(item)
c.items[key] = element
c.memoryUsed += item.size
// Evict if necessary
c.evictIfNeeded()
return nil
}
// Delete removes an item from the cache
func (c *LRUCache) Delete(key string) error {
c.mutex.Lock()
defer c.mutex.Unlock()
if element, exists := c.items[key]; exists {
c.removeElement(element)
}
return nil
}
// Clear removes all items from the cache
func (c *LRUCache) Clear() error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.items = make(map[string]*list.Element)
c.evictList.Init()
c.memoryUsed = 0
return nil
}
// Stats returns cache statistics
func (c *LRUCache) Stats() CacheStats {
c.mutex.RLock()
defer c.mutex.RUnlock()
total := c.hits + c.misses
hitRate := 0.0
if total > 0 {
hitRate = float64(c.hits) / float64(total)
}
return CacheStats{
Hits: c.hits,
Misses: c.misses,
Size: len(c.items),
MaxSize: c.maxSize,
HitRate: hitRate,
MemoryUsage: c.memoryUsed,
}
}
// evictIfNeeded removes old items if cache is full
func (c *LRUCache) evictIfNeeded() {
for len(c.items) > c.maxSize {
c.evictOldest()
}
}
// evictOldest removes the least recently used item
func (c *LRUCache) evictOldest() {
element := c.evictList.Back()
if element != nil {
c.removeElement(element)
}
}
// removeElement removes an element from the cache
func (c *LRUCache) removeElement(element *list.Element) {
c.evictList.Remove(element)
item := element.Value.(*cacheItem)
delete(c.items, item.key)
c.memoryUsed -= item.size
}
// RedisCache implements cache using Redis
type RedisCache struct {
client *redis.Client
prefix string
hits int64
misses int64
mutex sync.RWMutex
}
// NewRedisCache creates a new Redis-backed cache
func NewRedisCache(addr, password string, db int, prefix string) (*RedisCache, error) {
client := redis.NewClient(&redis.Options{
Addr: addr,
Password: password,
DB: db,
})
// Test connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := client.Ping(ctx).Err(); err != nil {
return nil, fmt.Errorf("failed to connect to Redis: %w", err)
}
return &RedisCache{
client: client,
prefix: prefix,
}, nil
}
// Get retrieves an item from Redis cache
func (r *RedisCache) Get(key string) ([]byte, bool) {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
value, err := r.client.Get(ctx, r.prefix+key).Bytes()
if err == redis.Nil {
r.mutex.Lock()
r.misses++
r.mutex.Unlock()
return nil, false
} else if err != nil {
log.Printf("Redis cache error: %v", err)
r.mutex.Lock()
r.misses++
r.mutex.Unlock()
return nil, false
}
r.mutex.Lock()
r.hits++
r.mutex.Unlock()
return value, true
}
// Set adds an item to Redis cache
func (r *RedisCache) Set(key string, value []byte, ttl time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
return r.client.Set(ctx, r.prefix+key, value, ttl).Err()
}
// Delete removes an item from Redis cache
func (r *RedisCache) Delete(key string) error {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
return r.client.Del(ctx, r.prefix+key).Err()
}
// Clear removes all items with the cache prefix
func (r *RedisCache) Clear() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Use SCAN to find all keys with prefix
iter := r.client.Scan(ctx, 0, r.prefix+"*", 0).Iterator()
var keys []string
for iter.Next(ctx) {
keys = append(keys, iter.Val())
}
if err := iter.Err(); err != nil {
return err
}
if len(keys) > 0 {
return r.client.Del(ctx, keys...).Err()
}
return nil
}
// Stats returns Redis cache statistics
func (r *RedisCache) Stats() CacheStats {
r.mutex.RLock()
defer r.mutex.RUnlock()
total := r.hits + r.misses
hitRate := 0.0
if total > 0 {
hitRate = float64(r.hits) / float64(total)
}
return CacheStats{
Hits: r.hits,
Misses: r.misses,
HitRate: hitRate,
}
}
// TieredCache combines LRU and Redis for hot/warm caching
type TieredCache struct {
l1 *LRUCache // Hot cache (in-memory)
l2 *RedisCache // Warm cache (Redis)
l1Size int // L1 cache size limit
}
// NewTieredCache creates a tiered cache system
func NewTieredCache(l1Size int, redisAddr, redisPassword string, redisDB int) (*TieredCache, error) {
l1 := NewLRUCache(l1Size)
var l2 *RedisCache
var err error
if redisAddr != "" {
l2, err = NewRedisCache(redisAddr, redisPassword, redisDB, "gateway:")
if err != nil {
log.Printf("Warning: Redis cache unavailable, using L1 only: %v", err)
}
}
return &TieredCache{
l1: l1,
l2: l2,
l1Size: l1Size,
}, nil
}
// Get retrieves from L1 first, then L2
func (t *TieredCache) Get(key string) ([]byte, bool) {
// Try L1 first
if value, found := t.l1.Get(key); found {
return value, true
}
// Try L2 if available
if t.l2 != nil {
if value, found := t.l2.Get(key); found {
// Promote to L1
t.l1.Set(key, value, 15*time.Minute)
return value, true
}
}
return nil, false
}
// Set stores in both L1 and L2
func (t *TieredCache) Set(key string, value []byte, ttl time.Duration) error {
// Store in L1
if err := t.l1.Set(key, value, ttl); err != nil {
return err
}
// Store in L2 if available
if t.l2 != nil {
// Use longer TTL for L2
l2TTL := ttl * 2
if l2TTL > 24*time.Hour {
l2TTL = 24 * time.Hour
}
return t.l2.Set(key, value, l2TTL)
}
return nil
}
// Delete removes from both caches
func (t *TieredCache) Delete(key string) error {
t.l1.Delete(key)
if t.l2 != nil {
t.l2.Delete(key)
}
return nil
}
// Clear removes all items from both caches
func (t *TieredCache) Clear() error {
t.l1.Clear()
if t.l2 != nil {
t.l2.Clear()
}
return nil
}
// Stats returns combined cache statistics
func (t *TieredCache) Stats() CacheStats {
l1Stats := t.l1.Stats()
if t.l2 != nil {
l2Stats := t.l2.Stats()
return CacheStats{
Hits: l1Stats.Hits + l2Stats.Hits,
Misses: l1Stats.Misses + l2Stats.Misses,
Size: l1Stats.Size,
MaxSize: l1Stats.MaxSize,
HitRate: float64(l1Stats.Hits+l2Stats.Hits) / float64(l1Stats.Hits+l1Stats.Misses+l2Stats.Hits+l2Stats.Misses),
MemoryUsage: l1Stats.MemoryUsage,
}
}
return l1Stats
}