4 A simple base struct for maintaining a least-recently-used cache of
7 Entries are pushed onto the front of a deque, overflows are popped
8 off the back. A hash table indexes the entries for lookup.
10 Use a lru_entry_t as the first member of the struct you want to
11 cache, and swaddle these routines with casts.
14 #include "lru_cache.h"
19 static inline unsigned long hash_(struct lru_cache_entry
*, lru_hash_feed_fn
*);
23 Allocate and initialize a new LRU Cache.
25 hash_sz - size of the hash table (this ought to be prime)
26 capacity - retain only this many entries in queue (0 for unlimited)
27 feed - function which provides a buffer for an entry, to generate hash from
28 cmp - function for comparing two entries
31 lru_cache_new(size_t hash_sz
,
33 lru_hash_feed_fn
*feed
,
34 lru_entry_cmp_fn
*cmp
)
37 size_t sz
= sizeof *c
+ (hash_sz
- 1) * sizeof *c
->hash
;
38 /* hash_sz - 1 because of old-style flexible array */
44 *((size_t *)&(c
->hash_sz
)) = hash_sz
;
45 *((size_t *)&(c
->capacity
)) = capacity
;
54 memset(c
->hash
, 0, hash_sz
* sizeof *c
->hash
);
61 Insert an entry into the front of the queue.
63 cache - the cache to use
64 e - the entry to insert
65 overflow - will be set to the entry which was removed if cache is at capacity
68 lru_cache_insert(struct lru_cache
*cache
, struct lru_cache_entry
*e
, struct lru_cache_entry
**overflow
)
71 if (cache
->capacity
&& cache
->num_entries
== cache
->capacity
) {
72 *overflow
= cache
->oldest
;
73 lru_cache_extract(cache
, *overflow
);
76 e
->hash_slot
= hash_(e
, cache
->feed_fn
) % cache
->hash_sz
;
78 e
->hash_next
= cache
->hash
[e
->hash_slot
].entry
;
79 cache
->hash
[e
->hash_slot
].entry
= e
;
80 cache
->hash
[e
->hash_slot
].tally
++;
83 e
->next
= cache
->newest
;
85 cache
->newest
->prev
= e
;
95 Return the first entry for which cache->cmp_fn equates to the provided
98 struct lru_cache_entry
*
99 lru_cache_locate(struct lru_cache
*cache
, struct lru_cache_entry
*match
)
101 struct lru_cache_entry
*e
;
104 hash
= hash_(match
, cache
->feed_fn
) % cache
->hash_sz
;
106 for (e
= cache
->hash
[hash
].entry
; e
; e
= e
->hash_next
)
107 if (cache
->cmp_fn(match
, e
) == 0)
115 Remove an entry from the cache.
118 lru_cache_extract(struct lru_cache
*cache
, struct lru_cache_entry
*e
)
120 /* remove from hash list */
121 if (cache
->hash
[e
->hash_slot
].entry
== e
) {
122 cache
->hash
[e
->hash_slot
].entry
= e
->hash_next
;
124 struct lru_cache_entry
*hp
;
126 for (hp
= cache
->hash
[e
->hash_slot
].entry
; hp
; hp
= hp
->hash_next
)
127 if (hp
->hash_next
== e
) {
128 hp
->hash_next
= e
->hash_next
;
133 hp
->hash_next
= e
->hash_next
;
136 cache
->hash
[e
->hash_slot
].tally
--;
138 /* remove from queue */
139 if (cache
->newest
== e
)
140 cache
->newest
= e
->next
;
141 if (cache
->oldest
== e
)
142 cache
->oldest
= e
->prev
;
144 e
->prev
->next
= e
->next
;
146 e
->next
->prev
= e
->prev
;
148 cache
->num_entries
--;
153 Calls fn(entry, index, data) for each entry in cache, from newest to oldest.
156 lru_cache_foreach(struct lru_cache
*cache
, void (*fn
)(lru_entry_t
*, size_t, void *), void *data
)
158 struct lru_cache_entry
*e
;
161 for (i
= 0, e
= cache
->newest
; e
; e
= e
->next
, i
++)
168 The hash used for lookup table. Is best at lowercase strings, but should
169 be non-pathological for most inputs.
174 hash_(struct lru_cache_entry
*e
, lru_hash_feed_fn
*feed_fn
)
176 unsigned long hash
= 5381;
180 feed_fn(e
, &buf
, &len
);
183 hash
= ((hash
<< 5) + hash
) ^ *buf
++;