/* LRU Cache A simple base struct for maintaining a least-recently-used cache of entries. Entries are pushed onto the front of a deque, overflows are popped off the back. A hash table indexes the entries for lookup. Use a lru_entry_t as the first member of the struct you want to cache, and swaddle these routines with casts. */ #include "lru_cache.h" #include #include static inline unsigned long hash_(struct lru_cache_entry *, lru_hash_feed_fn *); /* lru_cache_new Allocate and initialize a new LRU Cache. hash_sz - size of the hash table (this ought to be prime) capacity - retain only this many entries in queue (0 for unlimited) feed - function which provides a buffer for an entry, to generate hash from cmp - function for comparing two entries */ struct lru_cache * lru_cache_new(size_t hash_sz, size_t capacity, lru_hash_feed_fn *feed, lru_entry_cmp_fn *cmp) { struct lru_cache *c; size_t sz = sizeof *c + (hash_sz - 1) * sizeof *c->hash; /* hash_sz - 1 because of old-style flexible array */ c = malloc(sz); if (c == NULL) return NULL; *((size_t *)&(c->hash_sz)) = hash_sz; *((size_t *)&(c->capacity)) = capacity; c->feed_fn = feed; c->cmp_fn = cmp; c->num_entries = 0; c->newest = NULL; c->oldest = NULL; memset(c->hash, 0, hash_sz * sizeof *c->hash); return c; } /* lru_cache_insert Insert an entry into the front of the queue. cache - the cache to use e - the entry to insert overflow - will be set to the entry which was removed if cache is at capacity */ void lru_cache_insert(struct lru_cache *cache, struct lru_cache_entry *e, struct lru_cache_entry **overflow) { *overflow = NULL; if (cache->capacity && cache->num_entries == cache->capacity) { *overflow = cache->oldest; lru_cache_extract(cache, *overflow); } e->hash_slot = hash_(e, cache->feed_fn) % cache->hash_sz; e->hash_next = cache->hash[e->hash_slot].entry; cache->hash[e->hash_slot].entry = e; cache->hash[e->hash_slot].tally++; e->prev = NULL; e->next = cache->newest; if (cache->newest) cache->newest->prev = e; else cache->oldest = e; cache->newest = e; cache->num_entries++; } /* lru_cache_locate Return the first entry for which cache->cmp_fn equates to the provided entry to match. */ struct lru_cache_entry * lru_cache_locate(struct lru_cache *cache, struct lru_cache_entry *match) { struct lru_cache_entry *e; unsigned long hash; hash = hash_(match, cache->feed_fn) % cache->hash_sz; for (e = cache->hash[hash].entry; e; e = e->hash_next) if (cache->cmp_fn(match, e) == 0) break; return e; } /* lru_cache_extract Remove an entry from the cache. */ void lru_cache_extract(struct lru_cache *cache, struct lru_cache_entry *e) { /* remove from hash list */ if (cache->hash[e->hash_slot].entry == e) { cache->hash[e->hash_slot].entry = e->hash_next; } else { struct lru_cache_entry *hp; for (hp = cache->hash[e->hash_slot].entry; hp; hp = hp->hash_next) if (hp->hash_next == e) { hp->hash_next = e->hash_next; break; } if (hp) { hp->hash_next = e->hash_next; } } cache->hash[e->hash_slot].tally--; /* remove from queue */ if (cache->newest == e) cache->newest = e->next; if (cache->oldest == e) cache->oldest = e->prev; if (e->prev) e->prev->next = e->next; if (e->next) e->next->prev = e->prev; cache->num_entries--; } /* lru_cache_foreach Calls fn(entry, index, data) for each entry in cache, from newest to oldest. */ void lru_cache_foreach(struct lru_cache *cache, void (*fn)(lru_entry_t *, size_t, void *), void *data) { struct lru_cache_entry *e; size_t i; for (i = 0, e = cache->newest; e; e = e->next, i++) fn(e, i, data); } /* hash_ djbx33x The hash used for lookup table. Is best at lowercase strings, but should be non-pathological for most inputs. */ static inline unsigned long hash_(struct lru_cache_entry *e, lru_hash_feed_fn *feed_fn) { unsigned long hash = 5381; char *buf; size_t len; feed_fn(e, &buf, &len); while (len--) hash = ((hash << 5) + hash) ^ *buf++; return hash; }