-
Notifications
You must be signed in to change notification settings - Fork 0
/
caches.go
105 lines (83 loc) · 2.08 KB
/
caches.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
package bam
import "container/list"
type blockCache interface {
Get(key int64) ([]byte, bool)
Set(key int64, value []byte)
}
////
// uses maps for storing small data sets
type mapCache map[int64][]byte
func newMapCache(n int) blockCache {
return mapCache(make(map[int64][]byte, n))
}
func (x mapCache) Get(key int64) ([]byte, bool) {
r, b := x[key]
return r, b
}
func (x mapCache) Set(key int64, value []byte) {
x[key] = value
}
////
// uses a more advanced cache for storing large data sets
// S4-LRU described in http://www.cs.cornell.edu/~qhuang/papers/sosp_fbanalysis.pdf
type cacheItem struct {
qid int
key int64
value []byte
}
type blockLRUCache struct {
cap int
data map[int64]*list.Element
queues []*list.List
}
func newLRUCache(capacity int) blockCache {
return &blockLRUCache{
cap: (capacity + 3) / 4,
data: make(map[int64]*list.Element),
queues: []*list.List{list.New(), list.New(), list.New(), list.New()},
}
}
func (c *blockLRUCache) Get(key int64) ([]byte, bool) {
v, ok := c.data[key]
if !ok {
return nil, false
}
item := v.Value.(*cacheItem)
if item.qid == 3 {
// can't bump up a level
c.queues[3].MoveToFront(v)
return item.value, true
}
if c.queues[item.qid+1].Len() < c.cap {
// plenty of room to bump up
c.queues[item.qid].Remove(v)
item.qid++
c.data[key] = c.queues[item.qid].PushFront(item)
return item.value, true
}
// full queues, swap items in place
w := c.queues[item.qid+1].Back()
other := w.Value.(*cacheItem)
// keep qid on each
other.key, item.key = item.key, other.key
other.value, item.value = item.value, other.value
c.data[item.key] = v
c.data[other.key] = w
c.queues[item.qid].MoveToFront(v)
c.queues[other.qid].MoveToFront(w)
return other.value, true
}
func (c *blockLRUCache) Set(key int64, value []byte) {
if c.queues[0].Len() < c.cap {
c.data[key] = c.queues[0].PushFront(&cacheItem{0, key, value})
return
}
// reuse the tail item
e := c.queues[0].Back()
item := e.Value.(*cacheItem)
delete(c.data, item.key)
item.key = key
item.value = value
c.data[key] = e
c.queues[0].MoveToFront(e)
}