Bug Summary

File:src/usr.sbin/unbound/util/storage/lruhash.c
Warning:line 200, column 25
Access to field 'lru_next' results in a dereference of a null pointer (loaded from field 'lru_prev')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name lruhash.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/unbound/obj -resource-dir /usr/local/lib/clang/13.0.0 -I . -I /usr/src/usr.sbin/unbound -D SRCDIR=/usr/src/usr.sbin/unbound -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/unbound/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/unbound/util/storage/lruhash.c
1/*
2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains a hashtable with LRU keeping of entries.
40 *
41 */
42
43#include "config.h"
44#include "util/storage/lruhash.h"
45#include "util/fptr_wlist.h"
46
47void
48bin_init(struct lruhash_bin* array, size_t size)
49{
50 size_t i;
51#ifdef THREADS_DISABLED1
52 (void)array;
53#endif
54 for(i=0; i<size; i++) {
55 lock_quick_init(&array[i].lock);
56 lock_protect(&array[i].lock, &array[i],
57 sizeof(struct lruhash_bin));
58 }
59}
60
61struct lruhash*
62lruhash_create(size_t start_size, size_t maxmem,
63 lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
64 lruhash_delkeyfunc_type delkeyfunc,
65 lruhash_deldatafunc_type deldatafunc, void* arg)
66{
67 struct lruhash* table = (struct lruhash*)calloc(1,
68 sizeof(struct lruhash));
69 if(!table)
70 return NULL((void*)0);
71 lock_quick_init(&table->lock);
72 table->sizefunc = sizefunc;
73 table->compfunc = compfunc;
74 table->delkeyfunc = delkeyfunc;
75 table->deldatafunc = deldatafunc;
76 table->cb_arg = arg;
77 table->size = start_size;
78 table->size_mask = (int)(start_size-1);
79 table->lru_start = NULL((void*)0);
80 table->lru_end = NULL((void*)0);
81 table->num = 0;
82 table->space_used = 0;
83 table->space_max = maxmem;
84 table->array = calloc(table->size, sizeof(struct lruhash_bin));
85 if(!table->array) {
86 lock_quick_destroy(&table->lock);
87 free(table);
88 return NULL((void*)0);
89 }
90 bin_init(table->array, table->size);
91 lock_protect(&table->lock, table, sizeof(*table));
92 lock_protect(&table->lock, table->array,
93 table->size*sizeof(struct lruhash_bin));
94 return table;
95}
96
97void
98bin_delete(struct lruhash* table, struct lruhash_bin* bin)
99{
100 struct lruhash_entry* p, *np;
101 void *d;
102 if(!bin)
103 return;
104 lock_quick_destroy(&bin->lock);
105 p = bin->overflow_list;
106 bin->overflow_list = NULL((void*)0);
107 while(p) {
108 np = p->overflow_next;
109 d = p->data;
110 (*table->delkeyfunc)(p->key, table->cb_arg);
111 (*table->deldatafunc)(d, table->cb_arg);
112 p = np;
113 }
114}
115
116void
117bin_split(struct lruhash* table, struct lruhash_bin* newa,
118 int newmask)
119{
120 size_t i;
121 struct lruhash_entry *p, *np;
122 struct lruhash_bin* newbin;
123 /* move entries to new table. Notice that since hash x is mapped to
124 * bin x & mask, and new mask uses one more bit, so all entries in
125 * one bin will go into the old bin or bin | newbit */
126#ifndef THREADS_DISABLED1
127 int newbit = newmask - table->size_mask;
128#endif
129 /* so, really, this task could also be threaded, per bin. */
130 /* LRU list is not changed */
131 for(i=0; i<table->size; i++)
132 {
133 lock_quick_lock(&table->array[i].lock);
134 p = table->array[i].overflow_list;
135 /* lock both destination bins */
136 lock_quick_lock(&newa[i].lock);
137 lock_quick_lock(&newa[newbit|i].lock);
138 while(p) {
139 np = p->overflow_next;
140 /* link into correct new bin */
141 newbin = &newa[p->hash & newmask];
142 p->overflow_next = newbin->overflow_list;
143 newbin->overflow_list = p;
144 p=np;
145 }
146 lock_quick_unlock(&newa[i].lock);
147 lock_quick_unlock(&newa[newbit|i].lock);
148 lock_quick_unlock(&table->array[i].lock);
149 }
150}
151
152void
153lruhash_delete(struct lruhash* table)
154{
155 size_t i;
156 if(!table)
157 return;
158 /* delete lock on hashtable to force check its OK */
159 lock_quick_destroy(&table->lock);
160 for(i=0; i<table->size; i++)
161 bin_delete(table, &table->array[i]);
162 free(table->array);
163 free(table);
164}
165
166void
167bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
168{
169 struct lruhash_entry* p = bin->overflow_list;
170 struct lruhash_entry** prevp = &bin->overflow_list;
171 while(p) {
172 if(p == entry) {
173 *prevp = p->overflow_next;
174 return;
175 }
176 prevp = &p->overflow_next;
177 p = p->overflow_next;
178 }
179}
180
181void
182reclaim_space(struct lruhash* table, struct lruhash_entry** list)
183{
184 struct lruhash_entry* d;
185 struct lruhash_bin* bin;
186 log_assert(table);
187 /* does not delete MRU entry, so table will not be empty. */
188 while(table->num > 1 && table->space_used
13.1
Field 'space_used' is > field 'space_max'
> table->space_max) {
13
Assuming field 'num' is > 1
14
Loop condition is true. Entering loop body
189 /* notice that since we hold the hashtable lock, nobody
190 can change the lru chain. So it cannot be deleted underneath
191 us. We still need the hashbin and entry write lock to make
192 sure we flush all users away from the entry.
193 which is unlikely, since it is LRU, if someone got a rdlock
194 it would be moved to front, but to be sure. */
195 d = table->lru_end;
196 /* specialised, delete from end of double linked list,
197 and we know num>1, so there is a previous lru entry. */
198 log_assert(d && d->lru_prev);
199 table->lru_end = d->lru_prev;
200 d->lru_prev->lru_next = NULL((void*)0);
15
Access to field 'lru_next' results in a dereference of a null pointer (loaded from field 'lru_prev')
201 /* schedule entry for deletion */
202 bin = &table->array[d->hash & table->size_mask];
203 table->num --;
204 lock_quick_lock(&bin->lock);
205 bin_overflow_remove(bin, d);
206 d->overflow_next = *list;
207 *list = d;
208 lock_rw_wrlock(&d->lock);
209 table->space_used -= table->sizefunc(d->key, d->data);
210 if(table->markdelfunc)
211 (*table->markdelfunc)(d->key);
212 lock_rw_unlock(&d->lock);
213 lock_quick_unlock(&bin->lock);
214 }
215}
216
217struct lruhash_entry*
218bin_find_entry(struct lruhash* table,
219 struct lruhash_bin* bin, hashvalue_type hash, void* key)
220{
221 struct lruhash_entry* p = bin->overflow_list;
222 while(p) {
223 if(p->hash == hash && table->compfunc(p->key, key) == 0)
224 return p;
225 p = p->overflow_next;
226 }
227 return NULL((void*)0);
228}
229
230void
231table_grow(struct lruhash* table)
232{
233 struct lruhash_bin* newa;
234 int newmask;
235 size_t i;
236 if(table->size_mask == (int)(((size_t)-1)>>1)) {
237 log_err("hash array malloc: size_t too small");
238 return;
239 }
240 /* try to allocate new array, if not fail */
241 newa = calloc(table->size*2, sizeof(struct lruhash_bin));
242 if(!newa) {
243 log_err("hash grow: malloc failed");
244 /* continue with smaller array. Though its slower. */
245 return;
246 }
247 bin_init(newa, table->size*2);
248 newmask = (table->size_mask << 1) | 1;
249 bin_split(table, newa, newmask);
250 /* delete the old bins */
251 lock_unprotect(&table->lock, table->array);
252 for(i=0; i<table->size; i++) {
253 lock_quick_destroy(&table->array[i].lock);
254 }
255 free(table->array);
256
257 table->size *= 2;
258 table->size_mask = newmask;
259 table->array = newa;
260 lock_protect(&table->lock, table->array,
261 table->size*sizeof(struct lruhash_bin));
262 return;
263}
264
265void
266lru_front(struct lruhash* table, struct lruhash_entry* entry)
267{
268 entry->lru_prev = NULL((void*)0);
6
Null pointer value stored to field 'lru_prev'
269 entry->lru_next = table->lru_start;
270 if(!table->lru_start)
7
Assuming field 'lru_start' is null
8
Taking true branch
271 table->lru_end = entry;
272 else table->lru_start->lru_prev = entry;
273 table->lru_start = entry;
274}
275
276void
277lru_remove(struct lruhash* table, struct lruhash_entry* entry)
278{
279 if(entry->lru_prev)
280 entry->lru_prev->lru_next = entry->lru_next;
281 else table->lru_start = entry->lru_next;
282 if(entry->lru_next)
283 entry->lru_next->lru_prev = entry->lru_prev;
284 else table->lru_end = entry->lru_prev;
285}
286
287void
288lru_touch(struct lruhash* table, struct lruhash_entry* entry)
289{
290 log_assert(table && entry);
291 if(entry == table->lru_start)
292 return; /* nothing to do */
293 /* remove from current lru position */
294 lru_remove(table, entry);
295 /* add at front */
296 lru_front(table, entry);
297}
298
299void
300lruhash_insert(struct lruhash* table, hashvalue_type hash,
301 struct lruhash_entry* entry, void* data, void* cb_arg)
302{
303 struct lruhash_bin* bin;
304 struct lruhash_entry* found, *reclaimlist=NULL((void*)0);
305 size_t need_size;
306 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
307 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
308 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
309 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
310 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
311 need_size = table->sizefunc(entry->key, data);
312 if(cb_arg == NULL((void*)0)) cb_arg = table->cb_arg;
313
314 /* find bin */
315 lock_quick_lock(&table->lock);
316 bin = &table->array[hash & table->size_mask];
317 lock_quick_lock(&bin->lock);
318
319 /* see if entry exists already */
320 if(!(found=bin_find_entry(table, bin, hash, entry->key))) {
321 /* if not: add to bin */
322 entry->overflow_next = bin->overflow_list;
323 bin->overflow_list = entry;
324 lru_front(table, entry);
325 table->num++;
326 table->space_used += need_size;
327 } else {
328 /* if so: update data - needs a writelock */
329 table->space_used += need_size -
330 (*table->sizefunc)(found->key, found->data);
331 (*table->delkeyfunc)(entry->key, cb_arg);
332 lru_touch(table, found);
333 lock_rw_wrlock(&found->lock);
334 (*table->deldatafunc)(found->data, cb_arg);
335 found->data = data;
336 lock_rw_unlock(&found->lock);
337 }
338 lock_quick_unlock(&bin->lock);
339 if(table->space_used > table->space_max)
340 reclaim_space(table, &reclaimlist);
341 if(table->num >= table->size)
342 table_grow(table);
343 lock_quick_unlock(&table->lock);
344
345 /* finish reclaim if any (outside of critical region) */
346 while(reclaimlist) {
347 struct lruhash_entry* n = reclaimlist->overflow_next;
348 void* d = reclaimlist->data;
349 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
350 (*table->deldatafunc)(d, cb_arg);
351 reclaimlist = n;
352 }
353}
354
355struct lruhash_entry*
356lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
357{
358 struct lruhash_entry* entry;
359 struct lruhash_bin* bin;
360 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
361
362 lock_quick_lock(&table->lock);
363 bin = &table->array[hash & table->size_mask];
364 lock_quick_lock(&bin->lock);
365 if((entry=bin_find_entry(table, bin, hash, key)))
366 lru_touch(table, entry);
367 lock_quick_unlock(&table->lock);
368
369 if(entry) {
370 if(wr) { lock_rw_wrlock(&entry->lock); }
371 else { lock_rw_rdlock(&entry->lock); }
372 }
373 lock_quick_unlock(&bin->lock);
374 return entry;
375}
376
377void
378lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
379{
380 struct lruhash_entry* entry;
381 struct lruhash_bin* bin;
382 void *d;
383 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
384 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
385 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
386 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
387 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
388
389 lock_quick_lock(&table->lock);
390 bin = &table->array[hash & table->size_mask];
391 lock_quick_lock(&bin->lock);
392 if((entry=bin_find_entry(table, bin, hash, key))) {
393 bin_overflow_remove(bin, entry);
394 lru_remove(table, entry);
395 } else {
396 lock_quick_unlock(&table->lock);
397 lock_quick_unlock(&bin->lock);
398 return;
399 }
400 table->num--;
401 table->space_used -= (*table->sizefunc)(entry->key, entry->data);
402 lock_rw_wrlock(&entry->lock);
403 if(table->markdelfunc)
404 (*table->markdelfunc)(entry->key);
405 lock_rw_unlock(&entry->lock);
406 lock_quick_unlock(&bin->lock);
407 lock_quick_unlock(&table->lock);
408 /* finish removal */
409 d = entry->data;
410 (*table->delkeyfunc)(entry->key, table->cb_arg);
411 (*table->deldatafunc)(d, table->cb_arg);
412}
413
414/** clear bin, respecting locks, does not do space, LRU */
415static void
416bin_clear(struct lruhash* table, struct lruhash_bin* bin)
417{
418 struct lruhash_entry* p, *np;
419 void *d;
420 lock_quick_lock(&bin->lock);
421 p = bin->overflow_list;
422 while(p) {
423 lock_rw_wrlock(&p->lock);
424 np = p->overflow_next;
425 d = p->data;
426 if(table->markdelfunc)
427 (*table->markdelfunc)(p->key);
428 lock_rw_unlock(&p->lock);
429 (*table->delkeyfunc)(p->key, table->cb_arg);
430 (*table->deldatafunc)(d, table->cb_arg);
431 p = np;
432 }
433 bin->overflow_list = NULL((void*)0);
434 lock_quick_unlock(&bin->lock);
435}
436
437void
438lruhash_clear(struct lruhash* table)
439{
440 size_t i;
441 if(!table)
442 return;
443 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
444 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
445 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
446
447 lock_quick_lock(&table->lock);
448 for(i=0; i<table->size; i++) {
449 bin_clear(table, &table->array[i]);
450 }
451 table->lru_start = NULL((void*)0);
452 table->lru_end = NULL((void*)0);
453 table->num = 0;
454 table->space_used = 0;
455 lock_quick_unlock(&table->lock);
456}
457
458void
459lruhash_status(struct lruhash* table, const char* id, int extended)
460{
461 lock_quick_lock(&table->lock);
462 log_info("%s: %u entries, memory %u / %u",
463 id, (unsigned)table->num, (unsigned)table->space_used,
464 (unsigned)table->space_max);
465 log_info(" itemsize %u, array %u, mask %d",
466 (unsigned)(table->num? table->space_used/table->num : 0),
467 (unsigned)table->size, table->size_mask);
468 if(extended) {
469 size_t i;
470 int min=(int)table->size*2, max=-2;
471 for(i=0; i<table->size; i++) {
472 int here = 0;
473 struct lruhash_entry *en;
474 lock_quick_lock(&table->array[i].lock);
475 en = table->array[i].overflow_list;
476 while(en) {
477 here ++;
478 en = en->overflow_next;
479 }
480 lock_quick_unlock(&table->array[i].lock);
481 if(extended >= 2)
482 log_info("bin[%d] %d", (int)i, here);
483 if(here > max) max = here;
484 if(here < min) min = here;
485 }
486 log_info(" bin min %d, avg %.2lf, max %d", min,
487 (double)table->num/(double)table->size, max);
488 }
489 lock_quick_unlock(&table->lock);
490}
491
492size_t
493lruhash_get_mem(struct lruhash* table)
494{
495 size_t s;
496 lock_quick_lock(&table->lock);
497 s = sizeof(struct lruhash) + table->space_used;
498#ifdef USE_THREAD_DEBUG
499 if(table->size != 0) {
500 size_t i;
501 for(i=0; i<table->size; i++)
502 s += sizeof(struct lruhash_bin) +
503 lock_get_mem(&table->array[i].lock)(0);
504 }
505#else /* no THREAD_DEBUG */
506 if(table->size != 0)
507 s += (table->size)*(sizeof(struct lruhash_bin) +
508 lock_get_mem(&table->array[0].lock)(0));
509#endif
510 lock_quick_unlock(&table->lock);
511 s += lock_get_mem(&table->lock)(0);
512 return s;
513}
514
515void
516lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
517{
518 lock_quick_lock(&table->lock);
519 table->markdelfunc = md;
520 lock_quick_unlock(&table->lock);
521}
522
523void
524lruhash_traverse(struct lruhash* h, int wr,
525 void (*func)(struct lruhash_entry*, void*), void* arg)
526{
527 size_t i;
528 struct lruhash_entry* e;
529
530 lock_quick_lock(&h->lock);
531 for(i=0; i<h->size; i++) {
532 lock_quick_lock(&h->array[i].lock);
533 for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
534 if(wr) {
535 lock_rw_wrlock(&e->lock);
536 } else {
537 lock_rw_rdlock(&e->lock);
538 }
539 (*func)(e, arg);
540 lock_rw_unlock(&e->lock);
541 }
542 lock_quick_unlock(&h->array[i].lock);
543 }
544 lock_quick_unlock(&h->lock);
545}
546
547/*
548 * Demote: the opposite of touch, move an entry to the bottom
549 * of the LRU pile.
550 */
551
552void
553lru_demote(struct lruhash* table, struct lruhash_entry* entry)
554{
555 log_assert(table && entry);
556 if (entry == table->lru_end)
557 return; /* nothing to do */
558 /* remove from current lru position */
559 lru_remove(table, entry);
560 /* add at end */
561 entry->lru_next = NULL((void*)0);
562 entry->lru_prev = table->lru_end;
563
564 if (table->lru_end == NULL((void*)0))
565 {
566 table->lru_start = entry;
567 }
568 else
569 {
570 table->lru_end->lru_next = entry;
571 }
572 table->lru_end = entry;
573}
574
575struct lruhash_entry*
576lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash,
577 struct lruhash_entry* entry, void* data, void* cb_arg)
578{
579 struct lruhash_bin* bin;
580 struct lruhash_entry* found, *reclaimlist = NULL((void*)0);
581 size_t need_size;
582 fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
583 fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
584 fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
585 fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
586 fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
587 need_size = table->sizefunc(entry->key, data);
588 if (cb_arg == NULL((void*)0)) cb_arg = table->cb_arg;
1
Assuming 'cb_arg' is not equal to NULL
2
Taking false branch
589
590 /* find bin */
591 lock_quick_lock(&table->lock);
592 bin = &table->array[hash & table->size_mask];
593 lock_quick_lock(&bin->lock);
594
595 /* see if entry exists already */
596 if ((found = bin_find_entry(table, bin, hash, entry->key)) != NULL((void*)0)) {
3
Assuming the condition is false
4
Taking false branch
597 /* if so: keep the existing data - acquire a writelock */
598 lock_rw_wrlock(&found->lock);
599 }
600 else
601 {
602 /* if not: add to bin */
603 entry->overflow_next = bin->overflow_list;
604 bin->overflow_list = entry;
605 lru_front(table, entry);
5
Calling 'lru_front'
9
Returning from 'lru_front'
606 table->num++;
607 table->space_used += need_size;
608 /* return the entry that was presented, and lock it */
609 found = entry;
610 lock_rw_wrlock(&found->lock);
611 }
612 lock_quick_unlock(&bin->lock);
613 if (table->space_used > table->space_max)
10
Assuming field 'space_used' is > field 'space_max'
11
Taking true branch
614 reclaim_space(table, &reclaimlist);
12
Calling 'reclaim_space'
615 if (table->num >= table->size)
616 table_grow(table);
617 lock_quick_unlock(&table->lock);
618
619 /* finish reclaim if any (outside of critical region) */
620 while (reclaimlist) {
621 struct lruhash_entry* n = reclaimlist->overflow_next;
622 void* d = reclaimlist->data;
623 (*table->delkeyfunc)(reclaimlist->key, cb_arg);
624 (*table->deldatafunc)(d, cb_arg);
625 reclaimlist = n;
626 }
627
628 /* return the entry that was selected */
629 return found;
630}
631