diff --git a/tests/arc_test.go b/tests/arc_test.go new file mode 100644 index 0000000..d5a73a4 --- /dev/null +++ b/tests/arc_test.go @@ -0,0 +1,105 @@ +package zwis_test + +import ( + "context" + "testing" + "time" + + "github.com/NonsoAmadi10/zwis/zwis" +) + +func TestARCCache(t *testing.T) { + ctx := context.Background() + cache := zwis.NewARCCache(3) + + // Test Set and Get + cache.Set(ctx, "key1", "value1", 0) + cache.Set(ctx, "key2", "value2", 0) + cache.Set(ctx, "key3", "value3", 0) + + if v, ok := cache.Get(ctx, "key1"); !ok || v != "value1" { + t.Errorf("Expected value1, got %v", v) + } + + // Test eviction + cache.Set(ctx, "key4", "value4", 0) + if _, ok := cache.Get(ctx, "key2"); ok { + t.Error("key2 should have been evicted") + } + + // Test updating existing key + cache.Set(ctx, "key1", "new_value1", 0) + if v, ok := cache.Get(ctx, "key1"); !ok || v != "new_value1" { + t.Errorf("Expected new_value1, got %v", v) + } + + // Test expiration + cache.Set(ctx, "key5", "value5", 50*time.Millisecond) + time.Sleep(100 * time.Millisecond) + if _, ok := cache.Get(ctx, "key5"); ok { + t.Error("key5 should have expired") + } + + // Test Delete + cache.Set(ctx, "key6", "value6", 0) + cache.Delete(ctx, "key6") + if _, ok := cache.Get(ctx, "key6"); ok { + t.Error("key6 should have been deleted") + } + + // Test Clear + cache.Set(ctx, "key7", "value7", 0) + cache.Clear(ctx) + if _, ok := cache.Get(ctx, "key7"); ok { + t.Error("Cache should be empty after Clear") + } +} + +func TestARCCacheAdaptiveness(t *testing.T) { + ctx := context.Background() + cache := zwis.NewARCCache(5) + + // Fill the cache + cache.Set(ctx, "A", "A", 0) + cache.Set(ctx, "B", "B", 0) + cache.Set(ctx, "C", "C", 0) + cache.Set(ctx, "D", "D", 0) + + // Access pattern: B, C, D, E + cache.Get(ctx, "B") + cache.Get(ctx, "C") + cache.Get(ctx, "D") + cache.Set(ctx, "E", "E", 0) + + // // A should be evicted + // if _, ok := cache.Get(ctx, "A"); ok { + // t.Error("A should have been evicted") + // } + + // B, C, D, E should still be in the cache + for _, key := range []string{"B", "C", "D", "E"} { + if _, ok := cache.Get(ctx, key); !ok { + t.Errorf("%s should still be in the cache", key) + } + } + + // Now, let's access A multiple times + cache.Set(ctx, "A", "A", 0) + cache.Get(ctx, "A") + cache.Get(ctx, "A") + + // Access a new item F + cache.Set(ctx, "F", "F", 0) + + // B should be evicted now, as it was least recently used among B, C, D, E + if _, ok := cache.Get(ctx, "B"); ok { + t.Error("B should have been evicted") + } + + // A, C, D, E, F should be in the cache + for _, key := range []string{"A", "C", "D", "E", "F"} { + if _, ok := cache.Get(ctx, key); !ok { + t.Errorf("%s should be in the cache", key) + } + } +} diff --git a/zwis/arc.go b/zwis/arc.go index 35d465d..61ce3a1 100644 --- a/zwis/arc.go +++ b/zwis/arc.go @@ -1 +1,253 @@ package zwis + +/* +Adaptive Replacement Cache (ARC) is a sophisticated caching algorithm that provides a high hit rate and adapts to varying access patterns. ARC dynamically balances between recent and frequently accessed items by maintaining two lists of pages (recently accessed and frequently accessed) and two ghost lists (recently evicted from each of the main lists). +*/ + +import ( + "container/list" + "context" + "sync" + "time" +) + +// ARCCache implements the Adaptive Replacement Cache algorithm. +// It maintains four lists: T1, T2, B1, and B2. +// T1 and T2 contain cached items, while B1 and B2 contain "ghost" entries (only keys). +type ARCCache struct { + capacity int // Maximum number of items in the cache + p int // Target size for the T1 list + t1 *list.List // List for items accessed once recently + t2 *list.List // List for items accessed at least twice recently + b1 *list.List // Ghost list for items evicted from T1 + b2 *list.List // Ghost list for items evicted from T2 + cache map[string]*list.Element // Map for quick lookup of list elements + mu sync.Mutex // Mutex for thread-safety +} + +// arcItem represents an item in the cache. +type arcItem struct { + key string + value interface{} + expiration int64 // Unix timestamp for item expiration (0 means no expiration) +} + +// NewARCCache creates a new ARC cache with the given capacity. +func NewARCCache(capacity int) *ARCCache { + return &ARCCache{ + capacity: capacity, + p: 0, + t1: list.New(), + t2: list.New(), + b1: list.New(), + b2: list.New(), + cache: make(map[string]*list.Element), + } +} + +// Get retrieves an item from the cache. +func (c *ARCCache) Get(ctx context.Context, key string) (interface{}, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + if elt, ok := c.cache[key]; ok { + item := elt.Value.(*arcItem) + + if item.expiration > 0 && item.expiration < time.Now().UnixNano() { + c.remove(key) + return nil, false + } + + if c.listContains(c.t1, elt) { + c.t1.Remove(elt) + c.t2.PushFront(item) + c.cache[key] = c.t2.Front() + } else if c.listContains(c.t2, elt) { + c.t2.MoveToFront(elt) + } + return item.value, true + } + + // Cache miss, but update ghost lists + c.request(key) + return nil, false +} + +// Set adds or updates an item in the cache. +// Set adds or updates an item in the cache. +func (c *ARCCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error { + c.mu.Lock() + defer c.mu.Unlock() + + var expiration int64 + if ttl > 0 { + expiration = time.Now().Add(ttl).UnixNano() + } + + if elt, ok := c.cache[key]; ok { + item := elt.Value.(*arcItem) + item.value = value + item.expiration = expiration + if c.listContains(c.t1, elt) { + c.t1.Remove(elt) + c.t2.PushFront(item) + c.cache[key] = c.t2.Front() + } else if c.listContains(c.t2, elt) { + c.t2.MoveToFront(elt) + } + return nil + } + + // New item + c.request(key) + + if c.t1.Len()+c.t2.Len() >= c.capacity { + c.replace(key) + } + + item := &arcItem{key: key, value: value, expiration: expiration} + c.t1.PushFront(item) + c.cache[key] = c.t1.Front() + + return nil +} + +// Delete removes an item from the cache. +func (c *ARCCache) Delete(ctx context.Context, key string) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.remove(key) + return nil +} + +// Clear removes all items from the cache. +func (c *ARCCache) Clear(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.t1.Init() + c.t2.Init() + c.b1.Init() + c.b2.Init() + c.cache = make(map[string]*list.Element) + c.p = 0 + return nil +} + +// remove deletes an item from the cache and moves it to the appropriate ghost list. +func (c *ARCCache) remove(key string) { + if elt, ok := c.cache[key]; ok { + if c.listContains(c.t1, elt) { + c.t1.Remove(elt) + c.b1.PushFront(key) + if c.b1.Len() > c.capacity { + c.b1.Remove(c.b1.Back()) + } + } else if c.listContains(c.t2, elt) { + c.t2.Remove(elt) + c.b2.PushFront(key) + if c.b2.Len() > c.capacity { + c.b2.Remove(c.b2.Back()) + } + } + delete(c.cache, key) + } +} + +// replace is called when the cache is full and a new item needs to be added. +// It chooses which item to evict based on the ARC algorithm. +func (c *ARCCache) replace(key string) { + if c.t1.Len() > 0 && (c.t1.Len() > c.p || (c.listContainsKey(c.b2, key) && c.t1.Len() == c.p)) { + // Evict from T1 + lru := c.t1.Back() + c.t1.Remove(lru) + c.b1.PushFront(lru.Value.(*arcItem).key) + if c.b1.Len() > c.capacity { + c.b1.Remove(c.b1.Back()) + } + delete(c.cache, lru.Value.(*arcItem).key) + } else { + // Evict from T2 + lru := c.t2.Back() + c.t2.Remove(lru) + c.b2.PushFront(lru.Value.(*arcItem).key) + if c.b2.Len() > c.capacity { + c.b2.Remove(c.b2.Back()) + } + delete(c.cache, lru.Value.(*arcItem).key) + } +} + +// request updates the target size p based on which ghost list contains the requested key. +func (c *ARCCache) request(key string) { + if c.listContainsKey(c.b1, key) { + c.p = min(c.capacity, c.p+max(c.b2.Len()/c.b1.Len(), 1)) + c.moveToT2(key) + item := &arcItem{key: key, value: nil} + c.t2.PushFront(item) + c.cache[key] = c.t2.Front() + } else if c.listContainsKey(c.b2, key) { + c.p = max(0, c.p-max(c.b1.Len()/c.b2.Len(), 1)) + c.moveToT2(key) + item := &arcItem{key: key, value: nil} + c.t2.PushFront(item) + c.cache[key] = c.t2.Front() + } +} + +func (c *ARCCache) moveToT2(key string) { + if elt := c.removeFromList(c.b1, key); elt != nil { + c.b1.Remove(elt) + } else if elt := c.removeFromList(c.b2, key); elt != nil { + c.b2.Remove(elt) + } +} + +func (c *ARCCache) removeFromList(l *list.List, key string) *list.Element { + for e := l.Front(); e != nil; e = e.Next() { + if k, ok := e.Value.(string); ok && k == key { + return e + } + } + return nil +} + +// listContains checks if a list contains a specific element. +func (c *ARCCache) listContains(l *list.List, element *list.Element) bool { + for e := l.Front(); e != nil; e = e.Next() { + if e == element { + return true + } + } + return false +} + +// listContainsKey checks if a list contains an item with a specific key. +func (c *ARCCache) listContainsKey(l *list.List, key string) bool { + for e := l.Front(); e != nil; e = e.Next() { + if item, ok := e.Value.(*arcItem); ok && item.key == key { + return true + } + if s, ok := e.Value.(string); ok && s == key { + return true + } + } + return false +} + +// min returns the minimum of two integers. +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// max returns the maximum of two integers. +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/zwis/lfu.go b/zwis/lfu.go index 35d465d..f1fe8d8 100644 --- a/zwis/lfu.go +++ b/zwis/lfu.go @@ -1 +1,167 @@ package zwis + +/* +Least Frequently Used (LFU) is a caching algorithm in which the least frequently used cache block is removed whenever the cache is overflowed. In LFU we check the old page as well as the frequency of that page and if the frequency of the page is larger than the old page we cannot remove it and if all the old pages are having same frequency then take last i.e FIFO method for that and remove that page. +*/ +import ( + "context" + "sync" + "time" +) + +type LFUCache struct { + capacity int + items map[string]*lfuItem + freqs map[int]*freqNode + minFreq int + mu sync.Mutex +} + +type lfuItem struct { + key string + value interface{} + frequency int + expiration int64 + freqNode *freqNode +} + +type freqNode struct { + freq int + items map[string]*lfuItem + prev *freqNode + next *freqNode +} + +func NewLFUCache(capacity int) *LFUCache { + return &LFUCache{ + capacity: capacity, + items: make(map[string]*lfuItem), + freqs: make(map[int]*freqNode), + } +} + +func (c *LFUCache) Get(ctx context.Context, key string) (interface{}, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + if item, ok := c.items[key]; ok { + if item.expiration > 0 && item.expiration < time.Now().UnixNano() { + c.remove(item) + return nil, false + } + c.incrementFreq(item) + return item.value, true + } + return nil, false +} + +func (c *LFUCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error { + c.mu.Lock() + defer c.mu.Unlock() + + var expiration int64 + if ttl > 0 { + expiration = time.Now().Add(ttl).UnixNano() + } + + if item, ok := c.items[key]; ok { + item.value = value + item.expiration = expiration + c.incrementFreq(item) + } else { + if len(c.items) >= c.capacity { + c.evict() + } + item := &lfuItem{key: key, value: value, frequency: 0, expiration: expiration} + c.items[key] = item + c.incrementFreq(item) + } + return nil +} + +func (c *LFUCache) Delete(ctx context.Context, key string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if item, ok := c.items[key]; ok { + c.remove(item) + } + return nil +} + +func (c *LFUCache) Flush(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.items = make(map[string]*lfuItem) + c.freqs = make(map[int]*freqNode) + c.minFreq = 0 + return nil +} + +func (c *LFUCache) incrementFreq(item *lfuItem) { + if item.freqNode != nil { + delete(item.freqNode.items, item.key) + if len(item.freqNode.items) == 0 { + c.removeFreqNode(item.freqNode) + } + } + + item.frequency++ + nextFreq := item.frequency + + if node, ok := c.freqs[nextFreq]; ok { + node.items[item.key] = item + item.freqNode = node + } else { + node := &freqNode{freq: nextFreq, items: make(map[string]*lfuItem)} + c.freqs[nextFreq] = node + c.addFreqNode(node) + node.items[item.key] = item + item.freqNode = node + } + + if item.frequency == 1 { + c.minFreq = 1 + } else if item.frequency-1 == c.minFreq && len(c.freqs[c.minFreq].items) == 0 { + c.minFreq++ + } +} + +func (c *LFUCache) evict() { + if node, ok := c.freqs[c.minFreq]; ok { + for _, item := range node.items { + c.remove(item) + break + } + } +} + +func (c *LFUCache) remove(item *lfuItem) { + delete(c.items, item.key) + delete(item.freqNode.items, item.key) + if len(item.freqNode.items) == 0 { + c.removeFreqNode(item.freqNode) + } +} + +func (c *LFUCache) removeFreqNode(node *freqNode) { + delete(c.freqs, node.freq) + if node.prev != nil { + node.prev.next = node.next + } + if node.next != nil { + node.next.prev = node.prev + } +} + +func (c *LFUCache) addFreqNode(node *freqNode) { + if prevNode, ok := c.freqs[node.freq-1]; ok { + node.prev = prevNode + node.next = prevNode.next + prevNode.next = node + if node.next != nil { + node.next.prev = node + } + } +}