main.go
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
package main

import (
	"container/list"
	"fmt"
	"sync"
	"time"
)

// CacheEntry represents a single entry in the LRU cache
type CacheEntry struct {
	key        string
	value      interface{}
	expiresAt  time.Time
	lastAccessed time.Time
}

// LRUCache is a thread-safe LRU cache with TTL support
type LRUCache struct {
	mu           sync.RWMutex
	capacity     int
	defaultTTL   time.Duration
	items        map[string]*list.Element
	order        *list.List
	hitCount     int64
	missCount    int64
	stopEviction chan struct{}
}

// NewLRUCache creates a new LRU cache with specified capacity and default TTL
func NewLRUCache(capacity int, defaultTTL time.Duration) *LRUCache {
	cache := &LRUCache{
		capacity:     capacity,
		defaultTTL:   defaultTTL,
		items:        make(map[string]*list.Element),
		order:        list.New(),
		stopEviction: make(chan struct{}),
	}

	// Start background eviction goroutine
	go cache.startEvictionWorker(1 * time.Minute)

	return cache
}

// startEvictionWorker periodically removes expired entries
func (c *LRUCache) startEvictionWorker(interval time.Duration) {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			c.EvictExpired()
		case <-c.stopEviction:
			return
		}
	}
}

// Set adds or updates an entry in the cache with the default TTL
func (c *LRUCache) Set(key string, value interface{}) {
	c.SetWithTTL(key, value, c.defaultTTL)
}

// SetWithTTL adds or updates an entry in the cache with a custom TTL
func (c *LRUCache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
	c.mu.Lock()
	defer c.mu.Unlock()

	// Check if key already exists
	if elem, exists := c.items[key]; exists {
		// Update existing entry
		c.order.MoveToFront(elem)
		entry := elem.Value.(*CacheEntry)
		entry.value = value
		entry.expiresAt = time.Now().Add(ttl)
		entry.lastAccessed = time.Now()
		return
	}

	// Evict least recently used entry if at capacity
	if c.order.Len() >= c.capacity {
		c.evictLRU()
	}

	// Create new entry
	entry := &CacheEntry{
		key:        key,
		value:      value,
		expiresAt:  time.Now().Add(ttl),
		lastAccessed: time.Now(),
	}
	elem := c.order.PushFront(entry)
	c.items[key] = elem
}

// Get retrieves an entry from the cache (updates access time)
func (c *LRUCache) Get(key string) (interface{}, bool) {
	c.mu.Lock()
	defer c.mu.Unlock()

	if elem, exists := c.items[key]; exists {
		entry := elem.Value.(*CacheEntry)

		// Check if entry is expired
		if time.Now().After(entry.expiresAt) {
			c.removeElement(elem)
			c.missCount++
			return nil, false
		}

		// Update access time and move to front
		entry.lastAccessed = time.Now()
		c.order.MoveToFront(elem)
		c.hitCount++
		return entry.value, true
	}

	c.missCount++
	return nil, false
}

// Delete removes an entry from the cache
func (c *LRUCache) Delete(key string) {
	c.mu.Lock()
	defer c.mu.Unlock()

	if elem, exists := c.items[key]; exists {
		c.removeElement(elem)
	}
}

// EvictExpired removes all expired entries from the cache
func (c *LRUCache) EvictExpired() {
	c.mu.Lock()
	defer c.mu.Unlock()

	now := time.Now()
	for elem := c.order.Back(); elem != nil; {
		nextElem := elem.Prev()
		entry := elem.Value.(*CacheEntry)

		if now.After(entry.expiresAt) {
			c.removeElement(elem)
		}

		elems = nextElem
	}
}

// evictLRU removes the least recently used entry
func (c *LRUCache) evictLRU() {
	if elem := c.order.Back(); elem != nil {
		c.removeElement(elem)
	}
}

// removeElement removes an element from the cache and order list
func (c *LRUCache) removeElement(elem *list.Element) {
	entry := elem.Value.(*CacheEntry)
	delete(c.items, entry.key)
	c.order.Remove(elem)
}

// Stats returns cache statistics (hits, misses, size)
func (c *LRUCache) Stats() (hits, misses, size int64) {
	c.mu.RLock()
	defer c.mu.RUnlock()
	return c.hitCount, c.missCount, int64(c.order.Len())
}

// Close stops the background eviction worker and clears the cache
func (c *LRUCache) Close() {
	close(c.stopEviction)
	c.mu.Lock()
	defer c.mu.Unlock()
	c.items = make(map[string]*list.Element)
	c.order = list.New()
	c.hitCount = 0
	c.missCount = 0
}

func main() {
	// Create cache with capacity 5 and default TTL 10 seconds
	cache := NewLRUCache(5, 10*time.Second)
	defer cache.Close()

	// Add entries
	cache.Set("user:1", map[string]string{"name": "Alice", "email": "[email protected]"})
	cache.Set("user:2", map[string]string{"name": "Bob", "email": "[email protected]"})

	// Retrieve entry
	if val, ok := cache.Get("user:1"); ok {
		fmt.Printf("Found user: %v\n", val)
	}

	// Add entry with custom TTL
	cache.SetWithTTL("temp:data", "short-lived-value", 2*time.Second)

	// Wait for expiration
	time.Sleep(3 * time.Second)

	// Try to get expired entry
	if _, ok := cache.Get("temp:data"); !ok {
		fmt.Println("temp:data has expired")
	}

	// Print stats
	hits, misses, size := cache.Stats()
	fmt.Printf("Cache Stats - Hits: %d, Misses: %d, Size: %d\n", hits, misses, size)
}

How It Works

LRU cache that tracks recency and TTL per entry, expiring stale items automatically and enforcing a maximum size.

Stores entries in a map plus linked list; Get and Set update recency; expired items are pruned when accessed and via a janitor goroutine; evicts the least recently used item when capacity is reached.

Key Concepts

  • 1Combines recency eviction with time-based expiration.
  • 2Background cleanup keeps size bounded without active traffic.
  • 3Mutex guards allow safe concurrent access.

When to Use This Pattern

  • Caching HTTP responses or database query results.
  • Short-lived token or session caches.
  • Speeding up expensive computations with bounded memory.

Best Practices

  • Choose TTL and capacity to fit the workload and monitor hit rates.
  • Stop cleanup goroutines during shutdown.
  • Treat missing or expired values as cache misses and repopulate safely.
Go Version1.18
Difficultyintermediate
Production ReadyYes
Lines of Code212