-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcache.go
More file actions
140 lines (120 loc) · 2.72 KB
/
cache.go
File metadata and controls
140 lines (120 loc) · 2.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
package go_lru_ttl_cache
import (
"container/list"
"sync"
"time"
)
type LRUCache struct {
values map[interface{}]*cacheValue
queue *list.List
maxSize int
defaultTTL time.Duration
deleteCallback func(count int64)
lock sync.RWMutex
}
func NewLRUCache(config *ConfigBuilder) *LRUCache {
cache := &LRUCache{
values: make(map[interface{}]*cacheValue),
queue: list.New(),
// Configuration
maxSize: config.maxSize,
defaultTTL: config.defaultTTL,
deleteCallback: config.deleteCallback,
}
// Package agreement: there is no way to stop this goroutine. Reuse cache if possible.
// Avoid multiple create/delete cycles
if config.defaultTTL >= 0 {
go func() {
for {
<-time.After(config.cleanInterval)
cache.cleanInterval()
}
}()
}
return cache
}
func (c *LRUCache) Get(key interface{}) (interface{}, bool) {
c.lock.RLock()
value, found := c.values[key]
c.lock.RUnlock()
if !found {
return nil, false
}
c.lock.Lock()
c.queue.MoveToFront(value.link)
c.lock.Unlock()
return value.data, true
}
func (c *LRUCache) Set(key interface{}, value interface{}) {
c.lock.Lock()
_, found := c.values[key]
if found {
c.values[key].data = value
c.queue.MoveToFront(c.values[key].link)
c.lock.Unlock()
return
}
c.lock.Unlock()
LRUitem := &lruQueueItem{
key: key,
ttl: time.Now().Add(c.defaultTTL),
}
c.lock.Lock()
queueItem := c.queue.PushFront(LRUitem)
c.values[key] = &cacheValue{
data: value,
link: queueItem,
}
if c.queue.Len() > c.maxSize {
item := c.queue.Back().Value.(*lruQueueItem)
cachedItem := c.values[item.key]
c.unsafeDelete(item.key, cachedItem)
}
c.lock.Unlock()
}
func (c *LRUCache) Delete(key interface{}) {
c.lock.RLock()
value, found := c.values[key]
c.lock.RUnlock()
if found {
c.lock.Lock()
c.unsafeDelete(key, value)
c.lock.Unlock()
if c.deleteCallback != nil {
c.deleteCallback(1)
}
}
}
func (c *LRUCache) Clean() {
// TODO: Check if GOCG cleans the dropped values and do not do a memory leaking
c.lock.Lock()
c.values = make(map[interface{}]*cacheValue)
c.queue = list.New()
c.lock.Unlock()
}
func (c *LRUCache) Size() int {
c.lock.Lock()
defer c.lock.Unlock()
return c.queue.Len()
}
// Cleans up the expired items. Do not set the clean interval too low to avoid CPU load
func (c *LRUCache) cleanInterval() {
var deleted int64
c.lock.Lock()
for key, value := range c.values {
item := value.link.Value.(*lruQueueItem)
if item.ttl.Sub(time.Now()) < 0 {
c.unsafeDelete(key, value)
deleted++
}
}
c.lock.Unlock()
if c.deleteCallback != nil {
c.deleteCallback(deleted)
}
}
func (c *LRUCache) unsafeDelete(key interface{}, value *cacheValue) {
c.queue.Remove(value.link)
delete(c.values, key)
value = nil
}