Bug Summary

File:src/sbin/unwind/libunbound/util/storage/lruhash.c
Warning:line 201, column 25
Access to field 'lru_next' results in a dereference of a null pointer (loaded from field 'lru_prev')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name lruhash.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sbin/unwind/obj -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sbin/unwind -I /usr/src/sbin/unwind -I /usr/src/sbin/unwind/libunbound/libunbound -I /usr/src/sbin/unwind/libunbound -internal-isystem /usr/local/llvm16/lib/clang/16/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/sbin/unwind/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fno-jump-tables -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/scan/2024-01-11-140451-98009-1 -x c /usr/src/sbin/unwind/libunbound/util/storage/lruhash.c
1/*
2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains a hashtable with LRU keeping of entries.
40 *
41 */
42
43#include "config.h"
44#include "util/storage/lruhash.h"
45#include "util/fptr_wlist.h"
46
47void
48bin_init(struct lruhash_bin* array, size_t size)
49{
50 size_t i;
51#ifdef THREADS_DISABLED1
52 (void)array;
53#endif
54 for(i=0; i<size; i++) {
55 lock_quick_init(&array[i].lock);
56 lock_protect(&array[i].lock, &array[i],
57 sizeof(struct lruhash_bin));
58 }
59}
60
61struct lruhash*
62lruhash_create(size_t start_size, size_t maxmem,
63 lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
64 lruhash_delkeyfunc_type delkeyfunc,
65 lruhash_deldatafunc_type deldatafunc, void* arg)
66{
67 struct lruhash* table = (struct lruhash*)calloc(1,
68 sizeof(struct lruhash));
69 if(!table)
70 return NULL((void *)0);
71 lock_quick_init(&table->lock);
72 table->sizefunc = sizefunc;
73 table->compfunc = compfunc;
74 table->delkeyfunc = delkeyfunc;
75 table->deldatafunc = deldatafunc;
76 table->cb_arg = arg;
77 table->size = start_size;
78 table->size_mask = (int)(start_size-1);
79 table->lru_start = NULL((void *)0);
80 table->lru_end = NULL((void *)0);
81 table->num = 0;
82 table->space_used = 0;
83 table->space_max = maxmem;
84 table->max_collisions = 0;
85 table->array = calloc(table->size, sizeof(struct lruhash_bin));
86 if(!table->array) {
87 lock_quick_destroy(&table->lock);
88 free(table);
89 return NULL((void *)0);
90 }
91 bin_init(table->array, table->size);
92 lock_protect(&table->lock, table, sizeof(*table));
93 lock_protect(&table->lock, table->array,
94 table->size*sizeof(struct lruhash_bin));
95 return table;
96}
97
98void
99bin_delete(struct lruhash* table, struct lruhash_bin* bin)
100{
101 struct lruhash_entry* p, *np;
102 void *d;
103 if(!bin)
104 return;
105 lock_quick_destroy(&bin->lock);
106 p = bin->overflow_list;
107 bin->overflow_list = NULL((void *)0);
108 while(p) {
109 np = p->overflow_next;
110 d = p->data;
111 (*table->delkeyfunc)(p->key, table->cb_arg);
112 (*table->deldatafunc)(d, table->cb_arg);
113 p = np;
114 }
115}
116
117void
118bin_split(struct lruhash* table, struct lruhash_bin* newa,
119 int newmask)
120{
121 size_t i;
122 struct lruhash_entry *p, *np;
123 struct lruhash_bin* newbin;
124 /* move entries to new table. Notice that since hash x is mapped to
125 * bin x & mask, and new mask uses one more bit, so all entries in
126 * one bin will go into the old bin or bin | newbit */
127#ifndef THREADS_DISABLED1
128 int newbit = newmask - table->size_mask;
129#endif
130 /* so, really, this task could also be threaded, per bin. */
131 /* LRU list is not changed */
132 for(i=0; i<table->size; i++)
133 {
134 lock_quick_lock(&table->array[i].lock);
135 p = table->array[i].overflow_list;
136 /* lock both destination bins */
137 lock_quick_lock(&newa[i].lock);
138 lock_quick_lock(&newa[newbit|i].lock);
139 while(p) {
140 np = p->overflow_next;
141 /* link into correct new bin */
142 newbin = &newa[p->hash & newmask];
143 p->overflow_next = newbin->overflow_list;
144 newbin->overflow_list = p;
145 p=np;
146 }
147 lock_quick_unlock(&newa[i].lock);
148 lock_quick_unlock(&newa[newbit|i].lock);
149 lock_quick_unlock(&table->array[i].lock);
150 }
151}
152
153void
154lruhash_delete(struct lruhash* table)
155{
156 size_t i;
157 if(!table)
158 return;
159 /* delete lock on hashtable to force check its OK */
160 lock_quick_destroy(&table->lock);
161 for(i=0; i<table->size; i++)
162 bin_delete(table, &table->array[i]);
163 free(table->array);
164 free(table);
165}
166
167void
168bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
169{
170 struct lruhash_entry* p = bin->overflow_list;
171 struct lruhash_entry** prevp = &bin->overflow_list;
172 while(p) {
173 if(p == entry) {
174 *prevp = p->overflow_next;
175 return;
176 }
177 prevp = &p->overflow_next;
178 p = p->overflow_next;
179 }
180}
181
182void
183reclaim_space(struct lruhash* table, struct lruhash_entry** list)
184{
185 struct lruhash_entry* d;
186 struct lruhash_bin* bin;
187 log_assert(table);
188 /* does not delete MRU entry, so table will not be empty. */
189 while(table->num > 1 && table->space_used
15.1
Field 'space_used' is > field 'space_max'
> table->space_max) {
15
Assuming field 'num' is > 1
16
Loop condition is true. Entering loop body
190 /* notice that since we hold the hashtable lock, nobody
191 can change the lru chain. So it cannot be deleted underneath
192 us. We still need the hashbin and entry write lock to make
193 sure we flush all users away from the entry.
194 which is unlikely, since it is LRU, if someone got a rdlock
195 it would be moved to front, but to be sure. */
196 d = table->lru_end;
197 /* specialised, delete from end of double linked list,
198 and we know num>1, so there is a previous lru entry. */
199 log_assert(d && d->lru_prev);
200 table->lru_end = d->lru_prev;
201 d->lru_prev->lru_next = NULL((void *)0);
17
Access to field 'lru_next' results in a dereference of a null pointer (loaded from field 'lru_prev')
202 /* schedule entry for deletion */
203 bin = &table->array[d->hash & table->size_mask];
204 table->num --;
205 lock_quick_lock(&bin->lock);
206 bin_overflow_remove(bin, d);
207 d->overflow_next = *list;
208 *list = d;
209 lock_rw_wrlock(&d->lock);
210 table->space_used -= table->sizefunc(d->key, d->data);
211 if(table->markdelfunc)
212 (*table->markdelfunc)(d->key);
213 lock_rw_unlock(&d->lock);
214 lock_quick_unlock(&bin->lock);
215 }
216}
217
218struct lruhash_entry*
219bin_find_entry(struct lruhash* table,
220 struct lruhash_bin* bin, hashvalue_type hash, void* key, size_t* collisions)
221{
222 size_t c = 0;
223 struct lruhash_entry* p = bin->overflow_list;
224 while(p) {
225 if(p->hash == hash && table->compfunc(p->key, key) == 0)
226 break;
227 c++;
228 p = p->overflow_next;
229 }
230 if (collisions != NULL((void *)0))
231 *collisions = c;
232 return p;
233}
234
235void
236table_grow(struct lruhash* table)
237{
238 struct lruhash_bin* newa;
239 int newmask;
240 size_t i;
241 if(table->size_mask == (int)(((size_t)-1)>>1)) {
242 log_err("hash array malloc: size_t too small");
243 return;
244 }
245 /* try to allocate new array, if not fail */
246 newa = calloc(table->size*2, sizeof(struct lruhash_bin));
247 if(!newa) {
248 log_err("hash grow: malloc failed");
249 /* continue with smaller array. Though its slower. */
250 return;
251 }
252 bin_init(newa, table->size*2);
253 newmask = (table->size_mask << 1) | 1;
254 bin_split(table, newa, newmask);
255 /* delete the old bins */
256 lock_unprotect(&table->lock, table->array);
257 for(i=0; i<table->size; i++) {
258 lock_quick_destroy(&table->array[i].lock);
259 }
260 free(table->array);
261
262 table->size *= 2;
263 table->size_mask = newmask;
264 table->array = newa;
265 lock_protect(&table->lock, table->array,
266 table->size*sizeof(struct lruhash_bin));
267 return;
268}
269
270void
271lru_front(struct lruhash* table, struct lruhash_entry* entry)
272{
273 entry->lru_prev = NULL((void *)0);
6
Null pointer value stored to field 'lru_prev'
274 entry->lru_next = table->lru_start;
275 if(!table->lru_start)
7
Assuming field 'lru_start' is null
8
Taking true branch
276 table->lru_end = entry;
277 else table->lru_start->lru_prev = entry;
278 table->lru_start = entry;
279}
280
281void
282lru_remove(struct lruhash* table, struct lruhash_entry* entry)
283{
284 if(entry->lru_prev)
285 entry->lru_prev->lru_next = entry->lru_next;
286 else table->lru_start = entry->lru_next;
287 if(entry->lru_next)
288 entry->lru_next->lru_prev = entry->lru_prev;
289 else table->lru_end = entry->lru_prev;
290}
291
292void
293lru_touch(struct lruhash* table, struct lruhash_entry* entry)
294{
295 log_assert(table && entry);
296 if(entry == table->lru_start)
297 return; /* nothing to do */
298 /* remove from current lru position */
299 lru_remove(table, entry);
300 /* add at front */
301 lru_front(table, entry);
302}
303
304void
305lruhash_insert(struct lruhash* table, hashvalue_type hash,
306 struct lruhash_entry* entry, void* data, void* cb_arg)
307{
308 struct lruhash_bin* bin;
309 struct lruhash_entry* found, *reclaimlist=NULL((void *)0);
310 size_t need_size;
311 size_t collisions;
312 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
313 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
314 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
315 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
316 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
317 need_size = table->sizefunc(entry->key, data);
318 if(cb_arg == NULL((void *)0)) cb_arg = table->cb_arg;
319
320 /* find bin */
321 lock_quick_lock(&table->lock);
322 bin = &table->array[hash & table->size_mask];
323 lock_quick_lock(&bin->lock);
324
325 /* see if entry exists already */
326 if(!(found=bin_find_entry(table, bin, hash, entry->key, &collisions))) {
327 /* if not: add to bin */
328 entry->overflow_next = bin->overflow_list;
329 bin->overflow_list = entry;
330 lru_front(table, entry);
331 table->num++;
332 if (table->max_collisions < collisions)
333 table->max_collisions = collisions;
334 table->space_used += need_size;
335 } else {
336 /* if so: update data - needs a writelock */
337 table->space_used += need_size -
338 (*table->sizefunc)(found->key, found->data);
339 (*table->delkeyfunc)(entry->key, cb_arg);
340 lru_touch(table, found);
341 lock_rw_wrlock(&found->lock);
342 (*table->deldatafunc)(found->data, cb_arg);
343 found->data = data;
344 lock_rw_unlock(&found->lock);
345 }
346 lock_quick_unlock(&bin->lock);
347 if(table->space_used > table->space_max)
348 reclaim_space(table, &reclaimlist);
349 if(table->num >= table->size)
350 table_grow(table);
351 lock_quick_unlock(&table->lock);
352
353 /* finish reclaim if any (outside of critical region) */
354 while(reclaimlist) {
355 struct lruhash_entry* n = reclaimlist->overflow_next;
356 void* d = reclaimlist->data;
357 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
358 (*table->deldatafunc)(d, cb_arg);
359 reclaimlist = n;
360 }
361}
362
363struct lruhash_entry*
364lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
365{
366 struct lruhash_entry* entry;
367 struct lruhash_bin* bin;
368 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
369
370 lock_quick_lock(&table->lock);
371 bin = &table->array[hash & table->size_mask];
372 lock_quick_lock(&bin->lock);
373 if((entry=bin_find_entry(table, bin, hash, key, NULL((void *)0))))
374 lru_touch(table, entry);
375 lock_quick_unlock(&table->lock);
376
377 if(entry) {
378 if(wr) { lock_rw_wrlock(&entry->lock); }
379 else { lock_rw_rdlock(&entry->lock); }
380 }
381 lock_quick_unlock(&bin->lock);
382 return entry;
383}
384
385void
386lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
387{
388 struct lruhash_entry* entry;
389 struct lruhash_bin* bin;
390 void *d;
391 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
392 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
393 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
394 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
395 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
396
397 lock_quick_lock(&table->lock);
398 bin = &table->array[hash & table->size_mask];
399 lock_quick_lock(&bin->lock);
400 if((entry=bin_find_entry(table, bin, hash, key, NULL((void *)0)))) {
401 bin_overflow_remove(bin, entry);
402 lru_remove(table, entry);
403 } else {
404 lock_quick_unlock(&table->lock);
405 lock_quick_unlock(&bin->lock);
406 return;
407 }
408 table->num--;
409 table->space_used -= (*table->sizefunc)(entry->key, entry->data);
410 lock_rw_wrlock(&entry->lock);
411 if(table->markdelfunc)
412 (*table->markdelfunc)(entry->key);
413 lock_rw_unlock(&entry->lock);
414 lock_quick_unlock(&bin->lock);
415 lock_quick_unlock(&table->lock);
416 /* finish removal */
417 d = entry->data;
418 (*table->delkeyfunc)(entry->key, table->cb_arg);
419 (*table->deldatafunc)(d, table->cb_arg);
420}
421
422/** clear bin, respecting locks, does not do space, LRU */
423static void
424bin_clear(struct lruhash* table, struct lruhash_bin* bin)
425{
426 struct lruhash_entry* p, *np;
427 void *d;
428 lock_quick_lock(&bin->lock);
429 p = bin->overflow_list;
430 while(p) {
431 lock_rw_wrlock(&p->lock);
432 np = p->overflow_next;
433 d = p->data;
434 if(table->markdelfunc)
435 (*table->markdelfunc)(p->key);
436 lock_rw_unlock(&p->lock);
437 (*table->delkeyfunc)(p->key, table->cb_arg);
438 (*table->deldatafunc)(d, table->cb_arg);
439 p = np;
440 }
441 bin->overflow_list = NULL((void *)0);
442 lock_quick_unlock(&bin->lock);
443}
444
445void
446lruhash_clear(struct lruhash* table)
447{
448 size_t i;
449 if(!table)
450 return;
451 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
452 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
453 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
454
455 lock_quick_lock(&table->lock);
456 for(i=0; i<table->size; i++) {
457 bin_clear(table, &table->array[i]);
458 }
459 table->lru_start = NULL((void *)0);
460 table->lru_end = NULL((void *)0);
461 table->num = 0;
462 table->space_used = 0;
463 lock_quick_unlock(&table->lock);
464}
465
466void
467lruhash_status(struct lruhash* table, const char* id, int extended)
468{
469 lock_quick_lock(&table->lock);
470 log_info("%s: %u entries, memory %u / %u",
471 id, (unsigned)table->num, (unsigned)table->space_used,
472 (unsigned)table->space_max);
473 log_info(" itemsize %u, array %u, mask %d",
474 (unsigned)(table->num? table->space_used/table->num : 0),
475 (unsigned)table->size, table->size_mask);
476 if(extended) {
477 size_t i;
478 int min=(int)table->size*2, max=-2;
479 for(i=0; i<table->size; i++) {
480 int here = 0;
481 struct lruhash_entry *en;
482 lock_quick_lock(&table->array[i].lock);
483 en = table->array[i].overflow_list;
484 while(en) {
485 here ++;
486 en = en->overflow_next;
487 }
488 lock_quick_unlock(&table->array[i].lock);
489 if(extended >= 2)
490 log_info("bin[%d] %d", (int)i, here);
491 if(here > max) max = here;
492 if(here < min) min = here;
493 }
494 log_info(" bin min %d, avg %.2lf, max %d", min,
495 (double)table->num/(double)table->size, max);
496 }
497 lock_quick_unlock(&table->lock);
498}
499
500size_t
501lruhash_get_mem(struct lruhash* table)
502{
503 size_t s;
504 lock_quick_lock(&table->lock);
505 s = sizeof(struct lruhash) + table->space_used;
506#ifdef USE_THREAD_DEBUG
507 if(table->size != 0) {
508 size_t i;
509 for(i=0; i<table->size; i++)
510 s += sizeof(struct lruhash_bin) +
511 lock_get_mem(&table->array[i].lock)(0);
512 }
513#else /* no THREAD_DEBUG */
514 if(table->size != 0)
515 s += (table->size)*(sizeof(struct lruhash_bin) +
516 lock_get_mem(&table->array[0].lock)(0));
517#endif
518 lock_quick_unlock(&table->lock);
519 s += lock_get_mem(&table->lock)(0);
520 return s;
521}
522
523void
524lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
525{
526 lock_quick_lock(&table->lock);
527 table->markdelfunc = md;
528 lock_quick_unlock(&table->lock);
529}
530
531void
532lruhash_traverse(struct lruhash* h, int wr,
533 void (*func)(struct lruhash_entry*, void*), void* arg)
534{
535 size_t i;
536 struct lruhash_entry* e;
537
538 lock_quick_lock(&h->lock);
539 for(i=0; i<h->size; i++) {
540 lock_quick_lock(&h->array[i].lock);
541 for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
542 if(wr) {
543 lock_rw_wrlock(&e->lock);
544 } else {
545 lock_rw_rdlock(&e->lock);
546 }
547 (*func)(e, arg);
548 lock_rw_unlock(&e->lock);
549 }
550 lock_quick_unlock(&h->array[i].lock);
551 }
552 lock_quick_unlock(&h->lock);
553}
554
555/*
556 * Demote: the opposite of touch, move an entry to the bottom
557 * of the LRU pile.
558 */
559
560void
561lru_demote(struct lruhash* table, struct lruhash_entry* entry)
562{
563 log_assert(table && entry);
564 if (entry == table->lru_end)
565 return; /* nothing to do */
566 /* remove from current lru position */
567 lru_remove(table, entry);
568 /* add at end */
569 entry->lru_next = NULL((void *)0);
570 entry->lru_prev = table->lru_end;
571
572 if (table->lru_end == NULL((void *)0))
573 {
574 table->lru_start = entry;
575 }
576 else
577 {
578 table->lru_end->lru_next = entry;
579 }
580 table->lru_end = entry;
581}
582
583struct lruhash_entry*
584lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash,
585 struct lruhash_entry* entry, void* data, void* cb_arg)
586{
587 struct lruhash_bin* bin;
588 struct lruhash_entry* found, *reclaimlist = NULL((void *)0);
589 size_t need_size;
590 size_t collisions;
591 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
592 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
593 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
594 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
595 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
596 need_size = table->sizefunc(entry->key, data);
597 if (cb_arg == NULL((void *)0)) cb_arg = table->cb_arg;
1
Assuming 'cb_arg' is not equal to NULL
2
Taking false branch
598
599 /* find bin */
600 lock_quick_lock(&table->lock);
601 bin = &table->array[hash & table->size_mask];
602 lock_quick_lock(&bin->lock);
603
604 /* see if entry exists already */
605 if ((found = bin_find_entry(table, bin, hash, entry->key, &collisions)) != NULL((void *)0)) {
3
Assuming the condition is false
4
Taking false branch
606 /* if so: keep the existing data - acquire a writelock */
607 lock_rw_wrlock(&found->lock);
608 }
609 else
610 {
611 /* if not: add to bin */
612 entry->overflow_next = bin->overflow_list;
613 bin->overflow_list = entry;
614 lru_front(table, entry);
5
Calling 'lru_front'
9
Returning from 'lru_front'
615 table->num++;
616 if (table->max_collisions < collisions)
10
Assuming 'collisions' is <= field 'max_collisions'
11
Taking false branch
617 table->max_collisions = collisions;
618 table->space_used += need_size;
619 /* return the entry that was presented, and lock it */
620 found = entry;
621 lock_rw_wrlock(&found->lock);
622 }
623 lock_quick_unlock(&bin->lock);
624 if (table->space_used > table->space_max)
12
Assuming field 'space_used' is > field 'space_max'
13
Taking true branch
625 reclaim_space(table, &reclaimlist);
14
Calling 'reclaim_space'
626 if (table->num >= table->size)
627 table_grow(table);
628 lock_quick_unlock(&table->lock);
629
630 /* finish reclaim if any (outside of critical region) */
631 while (reclaimlist) {
632 struct lruhash_entry* n = reclaimlist->overflow_next;
633 void* d = reclaimlist->data;
634 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
635 (*table->deldatafunc)(d, cb_arg);
636 reclaimlist = n;
637 }
638
639 /* return the entry that was selected */
640 return found;
641}
642