Karsten Blees | 6a364ce | 2013-11-14 20:17:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Generic implementation of hash-based key value mappings. |
| 3 | */ |
| 4 | #include "cache.h" |
| 5 | #include "hashmap.h" |
| 6 | |
| 7 | #define FNV32_BASE ((unsigned int) 0x811c9dc5) |
| 8 | #define FNV32_PRIME ((unsigned int) 0x01000193) |
| 9 | |
| 10 | unsigned int strhash(const char *str) |
| 11 | { |
| 12 | unsigned int c, hash = FNV32_BASE; |
| 13 | while ((c = (unsigned char) *str++)) |
| 14 | hash = (hash * FNV32_PRIME) ^ c; |
| 15 | return hash; |
| 16 | } |
| 17 | |
| 18 | unsigned int strihash(const char *str) |
| 19 | { |
| 20 | unsigned int c, hash = FNV32_BASE; |
| 21 | while ((c = (unsigned char) *str++)) { |
| 22 | if (c >= 'a' && c <= 'z') |
| 23 | c -= 'a' - 'A'; |
| 24 | hash = (hash * FNV32_PRIME) ^ c; |
| 25 | } |
| 26 | return hash; |
| 27 | } |
| 28 | |
| 29 | unsigned int memhash(const void *buf, size_t len) |
| 30 | { |
| 31 | unsigned int hash = FNV32_BASE; |
| 32 | unsigned char *ucbuf = (unsigned char *) buf; |
| 33 | while (len--) { |
| 34 | unsigned int c = *ucbuf++; |
| 35 | hash = (hash * FNV32_PRIME) ^ c; |
| 36 | } |
| 37 | return hash; |
| 38 | } |
| 39 | |
| 40 | unsigned int memihash(const void *buf, size_t len) |
| 41 | { |
| 42 | unsigned int hash = FNV32_BASE; |
| 43 | unsigned char *ucbuf = (unsigned char *) buf; |
| 44 | while (len--) { |
| 45 | unsigned int c = *ucbuf++; |
| 46 | if (c >= 'a' && c <= 'z') |
| 47 | c -= 'a' - 'A'; |
| 48 | hash = (hash * FNV32_PRIME) ^ c; |
| 49 | } |
| 50 | return hash; |
| 51 | } |
| 52 | |
Jeff Hostetler | f75619b | 2017-03-22 17:14:21 +0000 | [diff] [blame] | 53 | /* |
| 54 | * Incoporate another chunk of data into a memihash |
| 55 | * computation. |
| 56 | */ |
| 57 | unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len) |
| 58 | { |
| 59 | unsigned int hash = hash_seed; |
| 60 | unsigned char *ucbuf = (unsigned char *) buf; |
| 61 | while (len--) { |
| 62 | unsigned int c = *ucbuf++; |
| 63 | if (c >= 'a' && c <= 'z') |
| 64 | c -= 'a' - 'A'; |
| 65 | hash = (hash * FNV32_PRIME) ^ c; |
| 66 | } |
| 67 | return hash; |
| 68 | } |
| 69 | |
Karsten Blees | 6a364ce | 2013-11-14 20:17:54 +0100 | [diff] [blame] | 70 | #define HASHMAP_INITIAL_SIZE 64 |
| 71 | /* grow / shrink by 2^2 */ |
| 72 | #define HASHMAP_RESIZE_BITS 2 |
| 73 | /* load factor in percent */ |
| 74 | #define HASHMAP_LOAD_FACTOR 80 |
| 75 | |
| 76 | static void alloc_table(struct hashmap *map, unsigned int size) |
| 77 | { |
| 78 | map->tablesize = size; |
| 79 | map->table = xcalloc(size, sizeof(struct hashmap_entry *)); |
| 80 | |
| 81 | /* calculate resize thresholds for new size */ |
| 82 | map->grow_at = (unsigned int) ((uint64_t) size * HASHMAP_LOAD_FACTOR / 100); |
| 83 | if (size <= HASHMAP_INITIAL_SIZE) |
| 84 | map->shrink_at = 0; |
| 85 | else |
| 86 | /* |
| 87 | * The shrink-threshold must be slightly smaller than |
| 88 | * (grow-threshold / resize-factor) to prevent erratic resizing, |
| 89 | * thus we divide by (resize-factor + 1). |
| 90 | */ |
| 91 | map->shrink_at = map->grow_at / ((1 << HASHMAP_RESIZE_BITS) + 1); |
| 92 | } |
| 93 | |
| 94 | static inline int entry_equals(const struct hashmap *map, |
| 95 | const struct hashmap_entry *e1, const struct hashmap_entry *e2, |
| 96 | const void *keydata) |
| 97 | { |
| 98 | return (e1 == e2) || (e1->hash == e2->hash && !map->cmpfn(e1, e2, keydata)); |
| 99 | } |
| 100 | |
| 101 | static inline unsigned int bucket(const struct hashmap *map, |
| 102 | const struct hashmap_entry *key) |
| 103 | { |
| 104 | return key->hash & (map->tablesize - 1); |
| 105 | } |
| 106 | |
Jeff Hostetler | 0607e10 | 2017-03-22 17:14:22 +0000 | [diff] [blame] | 107 | int hashmap_bucket(const struct hashmap *map, unsigned int hash) |
| 108 | { |
| 109 | return hash & (map->tablesize - 1); |
| 110 | } |
| 111 | |
Karsten Blees | 6a364ce | 2013-11-14 20:17:54 +0100 | [diff] [blame] | 112 | static void rehash(struct hashmap *map, unsigned int newsize) |
| 113 | { |
| 114 | unsigned int i, oldsize = map->tablesize; |
| 115 | struct hashmap_entry **oldtable = map->table; |
| 116 | |
Jeff Hostetler | 0607e10 | 2017-03-22 17:14:22 +0000 | [diff] [blame] | 117 | if (map->disallow_rehash) |
| 118 | return; |
| 119 | |
Karsten Blees | 6a364ce | 2013-11-14 20:17:54 +0100 | [diff] [blame] | 120 | alloc_table(map, newsize); |
| 121 | for (i = 0; i < oldsize; i++) { |
| 122 | struct hashmap_entry *e = oldtable[i]; |
| 123 | while (e) { |
| 124 | struct hashmap_entry *next = e->next; |
| 125 | unsigned int b = bucket(map, e); |
| 126 | e->next = map->table[b]; |
| 127 | map->table[b] = e; |
| 128 | e = next; |
| 129 | } |
| 130 | } |
| 131 | free(oldtable); |
| 132 | } |
| 133 | |
| 134 | static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map, |
| 135 | const struct hashmap_entry *key, const void *keydata) |
| 136 | { |
| 137 | struct hashmap_entry **e = &map->table[bucket(map, key)]; |
| 138 | while (*e && !entry_equals(map, *e, key, keydata)) |
| 139 | e = &(*e)->next; |
| 140 | return e; |
| 141 | } |
| 142 | |
| 143 | static int always_equal(const void *unused1, const void *unused2, const void *unused3) |
| 144 | { |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | void hashmap_init(struct hashmap *map, hashmap_cmp_fn equals_function, |
| 149 | size_t initial_size) |
| 150 | { |
| 151 | unsigned int size = HASHMAP_INITIAL_SIZE; |
Jeff Hostetler | 0607e10 | 2017-03-22 17:14:22 +0000 | [diff] [blame] | 152 | |
| 153 | memset(map, 0, sizeof(*map)); |
| 154 | |
Karsten Blees | 6a364ce | 2013-11-14 20:17:54 +0100 | [diff] [blame] | 155 | map->cmpfn = equals_function ? equals_function : always_equal; |
| 156 | |
| 157 | /* calculate initial table size and allocate the table */ |
| 158 | initial_size = (unsigned int) ((uint64_t) initial_size * 100 |
| 159 | / HASHMAP_LOAD_FACTOR); |
| 160 | while (initial_size > size) |
| 161 | size <<= HASHMAP_RESIZE_BITS; |
| 162 | alloc_table(map, size); |
| 163 | } |
| 164 | |
| 165 | void hashmap_free(struct hashmap *map, int free_entries) |
| 166 | { |
| 167 | if (!map || !map->table) |
| 168 | return; |
| 169 | if (free_entries) { |
| 170 | struct hashmap_iter iter; |
| 171 | struct hashmap_entry *e; |
| 172 | hashmap_iter_init(map, &iter); |
| 173 | while ((e = hashmap_iter_next(&iter))) |
| 174 | free(e); |
| 175 | } |
| 176 | free(map->table); |
| 177 | memset(map, 0, sizeof(*map)); |
| 178 | } |
| 179 | |
| 180 | void *hashmap_get(const struct hashmap *map, const void *key, const void *keydata) |
| 181 | { |
| 182 | return *find_entry_ptr(map, key, keydata); |
| 183 | } |
| 184 | |
| 185 | void *hashmap_get_next(const struct hashmap *map, const void *entry) |
| 186 | { |
| 187 | struct hashmap_entry *e = ((struct hashmap_entry *) entry)->next; |
| 188 | for (; e; e = e->next) |
| 189 | if (entry_equals(map, entry, e, NULL)) |
| 190 | return e; |
| 191 | return NULL; |
| 192 | } |
| 193 | |
| 194 | void hashmap_add(struct hashmap *map, void *entry) |
| 195 | { |
| 196 | unsigned int b = bucket(map, entry); |
| 197 | |
| 198 | /* add entry */ |
| 199 | ((struct hashmap_entry *) entry)->next = map->table[b]; |
| 200 | map->table[b] = entry; |
| 201 | |
| 202 | /* fix size and rehash if appropriate */ |
| 203 | map->size++; |
| 204 | if (map->size > map->grow_at) |
| 205 | rehash(map, map->tablesize << HASHMAP_RESIZE_BITS); |
| 206 | } |
| 207 | |
| 208 | void *hashmap_remove(struct hashmap *map, const void *key, const void *keydata) |
| 209 | { |
| 210 | struct hashmap_entry *old; |
| 211 | struct hashmap_entry **e = find_entry_ptr(map, key, keydata); |
| 212 | if (!*e) |
| 213 | return NULL; |
| 214 | |
| 215 | /* remove existing entry */ |
| 216 | old = *e; |
| 217 | *e = old->next; |
| 218 | old->next = NULL; |
| 219 | |
| 220 | /* fix size and rehash if appropriate */ |
| 221 | map->size--; |
| 222 | if (map->size < map->shrink_at) |
| 223 | rehash(map, map->tablesize >> HASHMAP_RESIZE_BITS); |
| 224 | return old; |
| 225 | } |
| 226 | |
| 227 | void *hashmap_put(struct hashmap *map, void *entry) |
| 228 | { |
| 229 | struct hashmap_entry *old = hashmap_remove(map, entry, NULL); |
| 230 | hashmap_add(map, entry); |
| 231 | return old; |
| 232 | } |
| 233 | |
| 234 | void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter) |
| 235 | { |
| 236 | iter->map = map; |
| 237 | iter->tablepos = 0; |
| 238 | iter->next = NULL; |
| 239 | } |
| 240 | |
| 241 | void *hashmap_iter_next(struct hashmap_iter *iter) |
| 242 | { |
| 243 | struct hashmap_entry *current = iter->next; |
| 244 | for (;;) { |
| 245 | if (current) { |
| 246 | iter->next = current->next; |
| 247 | return current; |
| 248 | } |
| 249 | |
| 250 | if (iter->tablepos >= iter->map->tablesize) |
| 251 | return NULL; |
| 252 | |
| 253 | current = iter->map->table[iter->tablepos++]; |
| 254 | } |
| 255 | } |
Karsten Blees | 7b64d42 | 2014-07-03 00:22:54 +0200 | [diff] [blame] | 256 | |
| 257 | struct pool_entry { |
| 258 | struct hashmap_entry ent; |
| 259 | size_t len; |
| 260 | unsigned char data[FLEX_ARRAY]; |
| 261 | }; |
| 262 | |
| 263 | static int pool_entry_cmp(const struct pool_entry *e1, |
| 264 | const struct pool_entry *e2, |
| 265 | const unsigned char *keydata) |
| 266 | { |
| 267 | return e1->data != keydata && |
| 268 | (e1->len != e2->len || memcmp(e1->data, keydata, e1->len)); |
| 269 | } |
| 270 | |
| 271 | const void *memintern(const void *data, size_t len) |
| 272 | { |
| 273 | static struct hashmap map; |
| 274 | struct pool_entry key, *e; |
| 275 | |
| 276 | /* initialize string pool hashmap */ |
| 277 | if (!map.tablesize) |
| 278 | hashmap_init(&map, (hashmap_cmp_fn) pool_entry_cmp, 0); |
| 279 | |
| 280 | /* lookup interned string in pool */ |
| 281 | hashmap_entry_init(&key, memhash(data, len)); |
| 282 | key.len = len; |
| 283 | e = hashmap_get(&map, &key, data); |
| 284 | if (!e) { |
| 285 | /* not found: create it */ |
Jeff King | 96ffc06 | 2016-02-22 17:44:32 -0500 | [diff] [blame] | 286 | FLEX_ALLOC_MEM(e, data, data, len); |
Karsten Blees | 7b64d42 | 2014-07-03 00:22:54 +0200 | [diff] [blame] | 287 | hashmap_entry_init(e, key.ent.hash); |
| 288 | e->len = len; |
Karsten Blees | 7b64d42 | 2014-07-03 00:22:54 +0200 | [diff] [blame] | 289 | hashmap_add(&map, e); |
| 290 | } |
| 291 | return e->data; |
| 292 | } |