removed debugging cruft, added README and stub headers for standalone testing
[fb-resolver] / lru_cache.c
1 /*
2 LRU Cache
3
4 A simple base struct for maintaining a least-recently-used cache of
5 entries.
6
7 Entries are pushed onto the front of a deque, overflows are popped
8 off the back. A hash table indexes the entries for lookup.
9
10 Use a lru_entry_t as the first member of the struct you want to
11 cache, and swaddle these routines with casts.
12
13 */
14 #include "lru_cache.h"
15
16 #include <stdlib.h>
17 #include <string.h>
18
19 static inline unsigned long hash_(struct lru_cache_entry *, lru_hash_feed_fn *);
20
21
22 /* lru_cache_new
23 Allocate and initialize a new LRU Cache.
24
25 hash_sz - size of the hash table (this ought to be prime)
26 capacity - retain only this many entries in queue (0 for unlimited)
27 feed - function which provides a buffer for an entry, to generate hash from
28 cmp - function for comparing two entries
29 */
30 struct lru_cache *
31 lru_cache_new(size_t hash_sz,
32 size_t capacity,
33 lru_hash_feed_fn *feed,
34 lru_entry_cmp_fn *cmp)
35 {
36 struct lru_cache *c;
37 size_t sz = sizeof *c + (hash_sz - 1) * sizeof *c->hash;
38 /* hash_sz - 1 because of old-style flexible array */
39
40 c = malloc(sz);
41 if (c == NULL)
42 return NULL;
43
44 *((size_t *)&(c->hash_sz)) = hash_sz;
45 *((size_t *)&(c->capacity)) = capacity;
46
47 c->feed_fn = feed;
48 c->cmp_fn = cmp;
49
50 c->num_entries = 0;
51 c->newest = NULL;
52 c->oldest = NULL;
53
54 memset(c->hash, 0, hash_sz * sizeof *c->hash);
55
56 return c;
57 }
58
59
60 /* lru_cache_insert
61 Insert an entry into the front of the queue.
62
63 cache - the cache to use
64 e - the entry to insert
65 overflow - will be set to the entry which was removed if cache is at capacity
66 */
67 void
68 lru_cache_insert(struct lru_cache *cache, struct lru_cache_entry *e, struct lru_cache_entry **overflow)
69 {
70 *overflow = NULL;
71 if (cache->capacity && cache->num_entries == cache->capacity) {
72 *overflow = cache->oldest;
73 lru_cache_extract(cache, *overflow);
74 }
75
76 e->hash_slot = hash_(e, cache->feed_fn) % cache->hash_sz;
77
78 e->hash_next = cache->hash[e->hash_slot].entry;
79 cache->hash[e->hash_slot].entry = e;
80 cache->hash[e->hash_slot].tally++;
81
82 e->prev = NULL;
83 e->next = cache->newest;
84 if (cache->newest)
85 cache->newest->prev = e;
86 else
87 cache->oldest = e;
88 cache->newest = e;
89
90 cache->num_entries++;
91 }
92
93
94 /* lru_cache_locate
95 Return the first entry for which cache->cmp_fn equates to the provided
96 entry to match.
97 */
98 struct lru_cache_entry *
99 lru_cache_locate(struct lru_cache *cache, struct lru_cache_entry *match)
100 {
101 struct lru_cache_entry *e;
102 unsigned long hash;
103
104 hash = hash_(match, cache->feed_fn) % cache->hash_sz;
105
106 for (e = cache->hash[hash].entry; e; e = e->hash_next)
107 if (cache->cmp_fn(match, e) == 0)
108 break;
109
110 return e;
111 }
112
113
114 /* lru_cache_extract
115 Remove an entry from the cache.
116 */
117 void
118 lru_cache_extract(struct lru_cache *cache, struct lru_cache_entry *e)
119 {
120 /* remove from hash list */
121 if (cache->hash[e->hash_slot].entry == e) {
122 cache->hash[e->hash_slot].entry = e->hash_next;
123 } else {
124 struct lru_cache_entry *hp;
125
126 for (hp = cache->hash[e->hash_slot].entry; hp; hp = hp->hash_next)
127 if (hp->hash_next == e) {
128 hp->hash_next = e->hash_next;
129 break;
130 }
131
132 if (hp) {
133 hp->hash_next = e->hash_next;
134 }
135 }
136 cache->hash[e->hash_slot].tally--;
137
138 /* remove from queue */
139 if (cache->newest == e)
140 cache->newest = e->next;
141 if (cache->oldest == e)
142 cache->oldest = e->prev;
143 if (e->prev)
144 e->prev->next = e->next;
145 if (e->next)
146 e->next->prev = e->prev;
147
148 cache->num_entries--;
149 }
150
151
152 /* lru_cache_foreach
153 Calls fn(entry, index, data) for each entry in cache, from newest to oldest.
154 */
155 void
156 lru_cache_foreach(struct lru_cache *cache, void (*fn)(lru_entry_t *, size_t, void *), void *data)
157 {
158 struct lru_cache_entry *e;
159 size_t i;
160
161 for (i = 0, e = cache->newest; e; e = e->next, i++)
162 fn(e, i, data);
163 }
164
165
166 /* hash_
167 djbx33x
168 The hash used for lookup table. Is best at lowercase strings, but should
169 be non-pathological for most inputs.
170 */
171 static
172 inline
173 unsigned long
174 hash_(struct lru_cache_entry *e, lru_hash_feed_fn *feed_fn)
175 {
176 unsigned long hash = 5381;
177 char *buf;
178 size_t len;
179
180 feed_fn(e, &buf, &len);
181
182 while (len--)
183 hash = ((hash << 5) + hash) ^ *buf++;
184
185 return hash;
186 }