Bug Summary

File:src/usr.sbin/unbound/validator/val_neg.c
Warning:line 276, column 3
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name val_neg.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/unbound/obj -resource-dir /usr/local/lib/clang/13.0.0 -I . -I /usr/src/usr.sbin/unbound -D SRCDIR=/usr/src/usr.sbin/unbound -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/unbound/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/unbound/validator/val_neg.c
1/*
2 * validator/val_neg.c - validator aggressive negative caching functions.
3 *
4 * Copyright (c) 2008, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains helper functions for the validator module.
40 * The functions help with aggressive negative caching.
41 * This creates new denials of existence, and proofs for absence of types
42 * from cached NSEC records.
43 */
44#include "config.h"
45#ifdef HAVE_OPENSSL_SSL_H1
46#include "openssl/ssl.h"
47#define NSEC3_SHA_LEN20 SHA_DIGEST_LENGTH20
48#else
49#define NSEC3_SHA_LEN20 20
50#endif
51#include "validator/val_neg.h"
52#include "validator/val_nsec.h"
53#include "validator/val_nsec3.h"
54#include "validator/val_utils.h"
55#include "util/data/dname.h"
56#include "util/data/msgreply.h"
57#include "util/log.h"
58#include "util/net_help.h"
59#include "util/config_file.h"
60#include "services/cache/rrset.h"
61#include "services/cache/dns.h"
62#include "sldns/rrdef.h"
63#include "sldns/sbuffer.h"
64
65int val_neg_data_compare(const void* a, const void* b)
66{
67 struct val_neg_data* x = (struct val_neg_data*)a;
68 struct val_neg_data* y = (struct val_neg_data*)b;
69 int m;
70 return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
71}
72
73int val_neg_zone_compare(const void* a, const void* b)
74{
75 struct val_neg_zone* x = (struct val_neg_zone*)a;
76 struct val_neg_zone* y = (struct val_neg_zone*)b;
77 int m;
78 if(x->dclass != y->dclass) {
79 if(x->dclass < y->dclass)
80 return -1;
81 return 1;
82 }
83 return dname_canon_lab_cmp(x->name, x->labs, y->name, y->labs, &m);
84}
85
86struct val_neg_cache* val_neg_create(struct config_file* cfg, size_t maxiter)
87{
88 struct val_neg_cache* neg = (struct val_neg_cache*)calloc(1,
89 sizeof(*neg));
90 if(!neg) {
91 log_err("Could not create neg cache: out of memory");
92 return NULL((void*)0);
93 }
94 neg->nsec3_max_iter = maxiter;
95 neg->max = 1024*1024; /* 1 M is thousands of entries */
96 if(cfg) neg->max = cfg->neg_cache_size;
97 rbtree_init(&neg->tree, &val_neg_zone_compare);
98 lock_basic_init(&neg->lock);
99 lock_protect(&neg->lock, neg, sizeof(*neg));
100 return neg;
101}
102
103size_t val_neg_get_mem(struct val_neg_cache* neg)
104{
105 size_t result;
106 lock_basic_lock(&neg->lock);
107 result = sizeof(*neg) + neg->use;
108 lock_basic_unlock(&neg->lock);
109 return result;
110}
111
112/** clear datas on cache deletion */
113static void
114neg_clear_datas(rbnode_type* n, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
115{
116 struct val_neg_data* d = (struct val_neg_data*)n;
117 free(d->name);
118 free(d);
119}
120
121/** clear zones on cache deletion */
122static void
123neg_clear_zones(rbnode_type* n, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
124{
125 struct val_neg_zone* z = (struct val_neg_zone*)n;
126 /* delete all the rrset entries in the tree */
127 traverse_postorder(&z->tree, &neg_clear_datas, NULL((void*)0));
128 free(z->nsec3_salt);
129 free(z->name);
130 free(z);
131}
132
133void neg_cache_delete(struct val_neg_cache* neg)
134{
135 if(!neg) return;
136 lock_basic_destroy(&neg->lock);
137 /* delete all the zones in the tree */
138 traverse_postorder(&neg->tree, &neg_clear_zones, NULL((void*)0));
139 free(neg);
140}
141
142/**
143 * Put data element at the front of the LRU list.
144 * @param neg: negative cache with LRU start and end.
145 * @param data: this data is fronted.
146 */
147static void neg_lru_front(struct val_neg_cache* neg,
148 struct val_neg_data* data)
149{
150 data->prev = NULL((void*)0);
151 data->next = neg->first;
152 if(!neg->first)
153 neg->last = data;
154 else neg->first->prev = data;
155 neg->first = data;
156}
157
158/**
159 * Remove data element from LRU list.
160 * @param neg: negative cache with LRU start and end.
161 * @param data: this data is removed from the list.
162 */
163static void neg_lru_remove(struct val_neg_cache* neg,
164 struct val_neg_data* data)
165{
166 if(data->prev)
167 data->prev->next = data->next;
168 else neg->first = data->next;
169 if(data->next)
170 data->next->prev = data->prev;
171 else neg->last = data->prev;
172}
173
174/**
175 * Touch LRU for data element, put it at the start of the LRU list.
176 * @param neg: negative cache with LRU start and end.
177 * @param data: this data is used.
178 */
179static void neg_lru_touch(struct val_neg_cache* neg,
180 struct val_neg_data* data)
181{
182 if(data == neg->first)
183 return; /* nothing to do */
184 /* remove from current lru position */
185 neg_lru_remove(neg, data);
186 /* add at front */
187 neg_lru_front(neg, data);
188}
189
190/**
191 * Delete a zone element from the negative cache.
192 * May delete other zone elements to keep tree coherent, or
193 * only mark the element as 'not in use'.
194 * @param neg: negative cache.
195 * @param z: zone element to delete.
196 */
197static void neg_delete_zone(struct val_neg_cache* neg, struct val_neg_zone* z)
198{
199 struct val_neg_zone* p, *np;
200 if(!z) return;
201 log_assert(z->in_use);
202 log_assert(z->count > 0);
203 z->in_use = 0;
204
205 /* go up the tree and reduce counts */
206 p = z;
207 while(p) {
208 log_assert(p->count > 0);
209 p->count --;
210 p = p->parent;
211 }
212
213 /* remove zones with zero count */
214 p = z;
215 while(p && p->count == 0) {
216 np = p->parent;
217 (void)rbtree_delete(&neg->tree, &p->node);
218 neg->use -= p->len + sizeof(*p);
219 free(p->nsec3_salt);
220 free(p->name);
221 free(p);
222 p = np;
223 }
224}
225
226void neg_delete_data(struct val_neg_cache* neg, struct val_neg_data* el)
227{
228 struct val_neg_zone* z;
229 struct val_neg_data* p, *np;
230 if(!el
9.1
'el' is non-null
) return;
10
Taking false branch
231 z = el->zone;
232 log_assert(el->in_use);
233 log_assert(el->count > 0);
234 el->in_use = 0;
235
236 /* remove it from the lru list */
237 neg_lru_remove(neg, el);
238 log_assert(neg->first != el && neg->last != el);
239
240 /* go up the tree and reduce counts */
241 p = el;
242 while(p) {
11
Loop condition is true. Entering loop body
12
Loop condition is false. Execution continues on line 249
243 log_assert(p->count > 0);
244 p->count --;
245 p = p->parent;
246 }
247
248 /* delete 0 count items from tree */
249 p = el;
250 while(p
12.1
'p' is non-null
15.1
'p' is null
&& p->count == 0) {
13
Assuming field 'count' is equal to 0
14
Loop condition is true. Entering loop body
251 np = p->parent;
252 (void)rbtree_delete(&z->tree, &p->node);
253 neg->use -= p->len + sizeof(*p);
254 free(p->name);
255 free(p);
15
Memory is released
256 p = np;
257 }
258
259 /* check if the zone is now unused */
260 if(z->tree.count == 0) {
16
Assuming field 'count' is not equal to 0
17
Taking false branch
261 neg_delete_zone(neg, z);
262 }
263}
264
265/**
266 * Create more space in negative cache
267 * The oldest elements are deleted until enough space is present.
268 * Empty zones are deleted.
269 * @param neg: negative cache.
270 * @param need: how many bytes are needed.
271 */
272static void neg_make_space(struct val_neg_cache* neg, size_t need)
273{
274 /* delete elements until enough space or its empty */
275 while(neg->last
18.1
Field 'last' is non-null
&& neg->max < neg->use + need) {
6
Assuming field 'last' is non-null
7
Assuming the condition is true
8
Loop condition is true. Entering loop body
19
Assuming the condition is true
20
Loop condition is true. Entering loop body
276 neg_delete_data(neg, neg->last);
9
Calling 'neg_delete_data'
18
Returning; memory was released via 2nd parameter
21
Use of memory after it is freed
277 }
278}
279
280struct val_neg_zone* neg_find_zone(struct val_neg_cache* neg,
281 uint8_t* nm, size_t len, uint16_t dclass)
282{
283 struct val_neg_zone lookfor;
284 struct val_neg_zone* result;
285 lookfor.node.key = &lookfor;
286 lookfor.name = nm;
287 lookfor.len = len;
288 lookfor.labs = dname_count_labels(lookfor.name);
289 lookfor.dclass = dclass;
290
291 result = (struct val_neg_zone*)
292 rbtree_search(&neg->tree, lookfor.node.key);
293 return result;
294}
295
296/**
297 * Find the given data
298 * @param zone: negative zone
299 * @param nm: what to look for.
300 * @param len: length of nm
301 * @param labs: labels in nm
302 * @return data or NULL if not found.
303 */
304static struct val_neg_data* neg_find_data(struct val_neg_zone* zone,
305 uint8_t* nm, size_t len, int labs)
306{
307 struct val_neg_data lookfor;
308 struct val_neg_data* result;
309 lookfor.node.key = &lookfor;
310 lookfor.name = nm;
311 lookfor.len = len;
312 lookfor.labs = labs;
313
314 result = (struct val_neg_data*)
315 rbtree_search(&zone->tree, lookfor.node.key);
316 return result;
317}
318
319/**
320 * Calculate space needed for the data and all its parents
321 * @param rep: NSEC entries.
322 * @return size.
323 */
324static size_t calc_data_need(struct reply_info* rep)
325{
326 uint8_t* d;
327 size_t i, len, res = 0;
328
329 for(i=rep->an_numrrsets; i<rep->an_numrrsets+rep->ns_numrrsets; i++) {
330 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
== LDNS_RR_TYPE_NSEC) {
331 d = rep->rrsets[i]->rk.dname;
332 len = rep->rrsets[i]->rk.dname_len;
333 res = sizeof(struct val_neg_data) + len;
334 while(!dname_is_root(d)) {
335 log_assert(len > 1); /* not root label */
336 dname_remove_label(&d, &len);
337 res += sizeof(struct val_neg_data) + len;
338 }
339 }
340 }
341 return res;
342}
343
344/**
345 * Calculate space needed for zone and all its parents
346 * @param d: name of zone
347 * @param len: length of name
348 * @return size.
349 */
350static size_t calc_zone_need(uint8_t* d, size_t len)
351{
352 size_t res = sizeof(struct val_neg_zone) + len;
353 while(!dname_is_root(d)) {
354 log_assert(len > 1); /* not root label */
355 dname_remove_label(&d, &len);
356 res += sizeof(struct val_neg_zone) + len;
357 }
358 return res;
359}
360
361/**
362 * Find closest existing parent zone of the given name.
363 * @param neg: negative cache.
364 * @param nm: name to look for
365 * @param nm_len: length of nm
366 * @param labs: labelcount of nm.
367 * @param qclass: class.
368 * @return the zone or NULL if none found.
369 */
370static struct val_neg_zone* neg_closest_zone_parent(struct val_neg_cache* neg,
371 uint8_t* nm, size_t nm_len, int labs, uint16_t qclass)
372{
373 struct val_neg_zone key;
374 struct val_neg_zone* result;
375 rbnode_type* res = NULL((void*)0);
376 key.node.key = &key;
377 key.name = nm;
378 key.len = nm_len;
379 key.labs = labs;
380 key.dclass = qclass;
381 if(rbtree_find_less_equal(&neg->tree, &key, &res)) {
382 /* exact match */
383 result = (struct val_neg_zone*)res;
384 } else {
385 /* smaller element (or no element) */
386 int m;
387 result = (struct val_neg_zone*)res;
388 if(!result || result->dclass != qclass)
389 return NULL((void*)0);
390 /* count number of labels matched */
391 (void)dname_lab_cmp(result->name, result->labs, key.name,
392 key.labs, &m);
393 while(result) { /* go up until qname is subdomain of stub */
394 if(result->labs <= m)
395 break;
396 result = result->parent;
397 }
398 }
399 return result;
400}
401
402/**
403 * Find closest existing parent data for the given name.
404 * @param zone: to look in.
405 * @param nm: name to look for
406 * @param nm_len: length of nm
407 * @param labs: labelcount of nm.
408 * @return the data or NULL if none found.
409 */
410static struct val_neg_data* neg_closest_data_parent(
411 struct val_neg_zone* zone, uint8_t* nm, size_t nm_len, int labs)
412{
413 struct val_neg_data key;
414 struct val_neg_data* result;
415 rbnode_type* res = NULL((void*)0);
416 key.node.key = &key;
417 key.name = nm;
418 key.len = nm_len;
419 key.labs = labs;
420 if(rbtree_find_less_equal(&zone->tree, &key, &res)) {
421 /* exact match */
422 result = (struct val_neg_data*)res;
423 } else {
424 /* smaller element (or no element) */
425 int m;
426 result = (struct val_neg_data*)res;
427 if(!result)
428 return NULL((void*)0);
429 /* count number of labels matched */
430 (void)dname_lab_cmp(result->name, result->labs, key.name,
431 key.labs, &m);
432 while(result) { /* go up until qname is subdomain of stub */
433 if(result->labs <= m)
434 break;
435 result = result->parent;
436 }
437 }
438 return result;
439}
440
441/**
442 * Create a single zone node
443 * @param nm: name for zone (copied)
444 * @param nm_len: length of name
445 * @param labs: labels in name.
446 * @param dclass: class of zone, host order.
447 * @return new zone or NULL on failure
448 */
449static struct val_neg_zone* neg_setup_zone_node(
450 uint8_t* nm, size_t nm_len, int labs, uint16_t dclass)
451{
452 struct val_neg_zone* zone =
453 (struct val_neg_zone*)calloc(1, sizeof(*zone));
454 if(!zone) {
455 return NULL((void*)0);
456 }
457 zone->node.key = zone;
458 zone->name = memdup(nm, nm_len);
459 if(!zone->name) {
460 free(zone);
461 return NULL((void*)0);
462 }
463 zone->len = nm_len;
464 zone->labs = labs;
465 zone->dclass = dclass;
466
467 rbtree_init(&zone->tree, &val_neg_data_compare);
468 return zone;
469}
470
471/**
472 * Create a linked list of parent zones, starting at longname ending on
473 * the parent (can be NULL, creates to the root).
474 * @param nm: name for lowest in chain
475 * @param nm_len: length of name
476 * @param labs: labels in name.
477 * @param dclass: class of zone.
478 * @param parent: NULL for to root, else so it fits under here.
479 * @return zone; a chain of zones and their parents up to the parent.
480 * or NULL on malloc failure
481 */
482static struct val_neg_zone* neg_zone_chain(
483 uint8_t* nm, size_t nm_len, int labs, uint16_t dclass,
484 struct val_neg_zone* parent)
485{
486 int i;
487 int tolabs = parent?parent->labs:0;
488 struct val_neg_zone* zone, *prev = NULL((void*)0), *first = NULL((void*)0);
489
490 /* create the new subtree, i is labelcount of current creation */
491 /* this creates a 'first' to z->parent=NULL list of zones */
492 for(i=labs; i!=tolabs; i--) {
493 /* create new item */
494 zone = neg_setup_zone_node(nm, nm_len, i, dclass);
495 if(!zone) {
496 /* need to delete other allocations in this routine!*/
497 struct val_neg_zone* p=first, *np;
498 while(p) {
499 np = p->parent;
500 free(p->name);
501 free(p);
502 p = np;
503 }
504 return NULL((void*)0);
505 }
506 if(i == labs) {
507 first = zone;
508 } else {
509 prev->parent = zone;
510 }
511 /* prepare for next name */
512 prev = zone;
513 dname_remove_label(&nm, &nm_len);
514 }
515 return first;
516}
517
518void val_neg_zone_take_inuse(struct val_neg_zone* zone)
519{
520 if(!zone->in_use) {
521 struct val_neg_zone* p;
522 zone->in_use = 1;
523 /* increase usage count of all parents */
524 for(p=zone; p; p = p->parent) {
525 p->count++;
526 }
527 }
528}
529
530struct val_neg_zone* neg_create_zone(struct val_neg_cache* neg,
531 uint8_t* nm, size_t nm_len, uint16_t dclass)
532{
533 struct val_neg_zone* zone;
534 struct val_neg_zone* parent;
535 struct val_neg_zone* p, *np;
536 int labs = dname_count_labels(nm);
537
538 /* find closest enclosing parent zone that (still) exists */
539 parent = neg_closest_zone_parent(neg, nm, nm_len, labs, dclass);
540 if(parent && query_dname_compare(parent->name, nm) == 0)
541 return parent; /* already exists, weird */
542 /* if parent exists, it is in use */
543 log_assert(!parent || parent->count > 0);
544 zone = neg_zone_chain(nm, nm_len, labs, dclass, parent);
545 if(!zone) {
546 return NULL((void*)0);
547 }
548
549 /* insert the list of zones into the tree */
550 p = zone;
551 while(p) {
552 np = p->parent;
553 /* mem use */
554 neg->use += sizeof(struct val_neg_zone) + p->len;
555 /* insert in tree */
556 (void)rbtree_insert(&neg->tree, &p->node);
557 /* last one needs proper parent pointer */
558 if(np == NULL((void*)0))
559 p->parent = parent;
560 p = np;
561 }
562 return zone;
563}
564
565/** find zone name of message, returns the SOA record */
566static struct ub_packed_rrset_key* reply_find_soa(struct reply_info* rep)
567{
568 size_t i;
569 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
570 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
== LDNS_RR_TYPE_SOA)
571 return rep->rrsets[i];
572 }
573 return NULL((void*)0);
574}
575
576/** see if the reply has NSEC records worthy of caching */
577static int reply_has_nsec(struct reply_info* rep)
578{
579 size_t i;
580 struct packed_rrset_data* d;
581 if(rep->security != sec_status_secure)
582 return 0;
583 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
584 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
== LDNS_RR_TYPE_NSEC) {
585 d = (struct packed_rrset_data*)rep->rrsets[i]->
586 entry.data;
587 if(d->security == sec_status_secure)
588 return 1;
589 }
590 }
591 return 0;
592}
593
594
595/**
596 * Create single node of data element.
597 * @param nm: name (copied)
598 * @param nm_len: length of name
599 * @param labs: labels in name.
600 * @return element with name nm, or NULL malloc failure.
601 */
602static struct val_neg_data* neg_setup_data_node(
603 uint8_t* nm, size_t nm_len, int labs)
604{
605 struct val_neg_data* el;
606 el = (struct val_neg_data*)calloc(1, sizeof(*el));
607 if(!el) {
608 return NULL((void*)0);
609 }
610 el->node.key = el;
611 el->name = memdup(nm, nm_len);
612 if(!el->name) {
613 free(el);
614 return NULL((void*)0);
615 }
616 el->len = nm_len;
617 el->labs = labs;
618 return el;
619}
620
621/**
622 * Create chain of data element and parents
623 * @param nm: name
624 * @param nm_len: length of name
625 * @param labs: labels in name.
626 * @param parent: up to where to make, if NULL up to root label.
627 * @return lowest element with name nm, or NULL malloc failure.
628 */
629static struct val_neg_data* neg_data_chain(
630 uint8_t* nm, size_t nm_len, int labs, struct val_neg_data* parent)
631{
632 int i;
633 int tolabs = parent?parent->labs:0;
634 struct val_neg_data* el, *first = NULL((void*)0), *prev = NULL((void*)0);
635
636 /* create the new subtree, i is labelcount of current creation */
637 /* this creates a 'first' to z->parent=NULL list of zones */
638 for(i=labs; i!=tolabs; i--) {
639 /* create new item */
640 el = neg_setup_data_node(nm, nm_len, i);
641 if(!el) {
642 /* need to delete other allocations in this routine!*/
643 struct val_neg_data* p = first, *np;
644 while(p) {
645 np = p->parent;
646 free(p->name);
647 free(p);
648 p = np;
649 }
650 return NULL((void*)0);
651 }
652 if(i == labs) {
653 first = el;
654 } else {
655 prev->parent = el;
656 }
657
658 /* prepare for next name */
659 prev = el;
660 dname_remove_label(&nm, &nm_len);
661 }
662 return first;
663}
664
665/**
666 * Remove NSEC records between start and end points.
667 * By walking the tree, the tree is sorted canonically.
668 * @param neg: negative cache.
669 * @param zone: the zone
670 * @param el: element to start walking at.
671 * @param nsec: the nsec record with the end point
672 */
673static void wipeout(struct val_neg_cache* neg, struct val_neg_zone* zone,
674 struct val_neg_data* el, struct ub_packed_rrset_key* nsec)
675{
676 struct packed_rrset_data* d = (struct packed_rrset_data*)nsec->
677 entry.data;
678 uint8_t* end;
679 size_t end_len;
680 int end_labs, m;
681 rbnode_type* walk, *next;
682 struct val_neg_data* cur;
683 uint8_t buf[257];
684 /* get endpoint */
685 if(!d || d->count == 0 || d->rr_len[0] < 2+1)
686 return;
687 if(ntohs(nsec->rk.type)(__uint16_t)(__builtin_constant_p(nsec->rk.type) ? (__uint16_t
)(((__uint16_t)(nsec->rk.type) & 0xffU) << 8 | (
(__uint16_t)(nsec->rk.type) & 0xff00U) >> 8) : __swap16md
(nsec->rk.type))
== LDNS_RR_TYPE_NSEC) {
688 end = d->rr_data[0]+2;
689 end_len = dname_valid(end, d->rr_len[0]-2);
690 end_labs = dname_count_labels(end);
691 } else {
692 /* NSEC3 */
693 if(!nsec3_get_nextowner_b32(nsec, 0, buf, sizeof(buf)))
694 return;
695 end = buf;
696 end_labs = dname_count_size_labels(end, &end_len);
697 }
698
699 /* sanity check, both owner and end must be below the zone apex */
700 if(!dname_subdomain_c(el->name, zone->name) ||
701 !dname_subdomain_c(end, zone->name))
702 return;
703
704 /* detect end of zone NSEC ; wipe until the end of zone */
705 if(query_dname_compare(end, zone->name) == 0) {
706 end = NULL((void*)0);
707 }
708
709 walk = rbtree_next(&el->node);
710 while(walk && walk != RBTREE_NULL&rbtree_null_node) {
711 cur = (struct val_neg_data*)walk;
712 /* sanity check: must be larger than start */
713 if(dname_canon_lab_cmp(cur->name, cur->labs,
714 el->name, el->labs, &m) <= 0) {
715 /* r == 0 skip original record. */
716 /* r < 0 too small! */
717 walk = rbtree_next(walk);
718 continue;
719 }
720 /* stop at endpoint, also data at empty nonterminals must be
721 * removed (no NSECs there) so everything between
722 * start and end */
723 if(end && dname_canon_lab_cmp(cur->name, cur->labs,
724 end, end_labs, &m) >= 0) {
725 break;
726 }
727 /* this element has to be deleted, but we cannot do it
728 * now, because we are walking the tree still ... */
729 /* get the next element: */
730 next = rbtree_next(walk);
731 /* now delete the original element, this may trigger
732 * rbtree rebalances, but really, the next element is
733 * the one we need.
734 * But it may trigger delete of other data and the
735 * entire zone. However, if that happens, this is done
736 * by deleting the *parents* of the element for deletion,
737 * and maybe also the entire zone if it is empty.
738 * But parents are smaller in canonical compare, thus,
739 * if a larger element exists, then it is not a parent,
740 * it cannot get deleted, the zone cannot get empty.
741 * If the next==NULL, then zone can be empty. */
742 if(cur->in_use)
743 neg_delete_data(neg, cur);
744 walk = next;
745 }
746}
747
748void neg_insert_data(struct val_neg_cache* neg,
749 struct val_neg_zone* zone, struct ub_packed_rrset_key* nsec)
750{
751 struct packed_rrset_data* d;
752 struct val_neg_data* parent;
753 struct val_neg_data* el;
754 uint8_t* nm = nsec->rk.dname;
755 size_t nm_len = nsec->rk.dname_len;
756 int labs = dname_count_labels(nsec->rk.dname);
757
758 d = (struct packed_rrset_data*)nsec->entry.data;
759 if( !(d->security == sec_status_secure ||
760 (d->security == sec_status_unchecked && d->rrsig_count > 0)))
761 return;
762 log_nametypeclass(VERB_ALGO, "negcache rr",
763 nsec->rk.dname, ntohs(nsec->rk.type)(__uint16_t)(__builtin_constant_p(nsec->rk.type) ? (__uint16_t
)(((__uint16_t)(nsec->rk.type) & 0xffU) << 8 | (
(__uint16_t)(nsec->rk.type) & 0xff00U) >> 8) : __swap16md
(nsec->rk.type))
,
764 ntohs(nsec->rk.rrset_class)(__uint16_t)(__builtin_constant_p(nsec->rk.rrset_class) ? (
__uint16_t)(((__uint16_t)(nsec->rk.rrset_class) & 0xffU
) << 8 | ((__uint16_t)(nsec->rk.rrset_class) & 0xff00U
) >> 8) : __swap16md(nsec->rk.rrset_class))
);
765
766 /* find closest enclosing parent data that (still) exists */
767 parent = neg_closest_data_parent(zone, nm, nm_len, labs);
768 if(parent && query_dname_compare(parent->name, nm) == 0) {
769 /* perfect match already exists */
770 log_assert(parent->count > 0);
771 el = parent;
772 } else {
773 struct val_neg_data* p, *np;
774
775 /* create subtree for perfect match */
776 /* if parent exists, it is in use */
777 log_assert(!parent || parent->count > 0);
778
779 el = neg_data_chain(nm, nm_len, labs, parent);
780 if(!el) {
781 log_err("out of memory inserting NSEC negative cache");
782 return;
783 }
784 el->in_use = 0; /* set on below */
785
786 /* insert the list of zones into the tree */
787 p = el;
788 while(p) {
789 np = p->parent;
790 /* mem use */
791 neg->use += sizeof(struct val_neg_data) + p->len;
792 /* insert in tree */
793 p->zone = zone;
794 (void)rbtree_insert(&zone->tree, &p->node);
795 /* last one needs proper parent pointer */
796 if(np == NULL((void*)0))
797 p->parent = parent;
798 p = np;
799 }
800 }
801
802 if(!el->in_use) {
803 struct val_neg_data* p;
804
805 el->in_use = 1;
806 /* increase usage count of all parents */
807 for(p=el; p; p = p->parent) {
808 p->count++;
809 }
810
811 neg_lru_front(neg, el);
812 } else {
813 /* in use, bring to front, lru */
814 neg_lru_touch(neg, el);
815 }
816
817 /* if nsec3 store last used parameters */
818 if(ntohs(nsec->rk.type)(__uint16_t)(__builtin_constant_p(nsec->rk.type) ? (__uint16_t
)(((__uint16_t)(nsec->rk.type) & 0xffU) << 8 | (
(__uint16_t)(nsec->rk.type) & 0xff00U) >> 8) : __swap16md
(nsec->rk.type))
== LDNS_RR_TYPE_NSEC3) {
819 int h;
820 uint8_t* s;
821 size_t slen, it;
822 if(nsec3_get_params(nsec, 0, &h, &it, &s, &slen) &&
823 it <= neg->nsec3_max_iter &&
824 (h != zone->nsec3_hash || it != zone->nsec3_iter ||
825 slen != zone->nsec3_saltlen ||
826 memcmp(zone->nsec3_salt, s, slen) != 0)) {
827
828 if(slen > 0) {
829 uint8_t* sa = memdup(s, slen);
830 if(sa) {
831 free(zone->nsec3_salt);
832 zone->nsec3_salt = sa;
833 zone->nsec3_saltlen = slen;
834 zone->nsec3_iter = it;
835 zone->nsec3_hash = h;
836 }
837 } else {
838 free(zone->nsec3_salt);
839 zone->nsec3_salt = NULL((void*)0);
840 zone->nsec3_saltlen = 0;
841 zone->nsec3_iter = it;
842 zone->nsec3_hash = h;
843 }
844 }
845 }
846
847 /* wipe out the cache items between NSEC start and end */
848 wipeout(neg, zone, el, nsec);
849}
850
851/** see if the reply has signed NSEC records and return the signer */
852static uint8_t* reply_nsec_signer(struct reply_info* rep, size_t* signer_len,
853 uint16_t* dclass)
854{
855 size_t i;
856 struct packed_rrset_data* d;
857 uint8_t* s;
858 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
859 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
== LDNS_RR_TYPE_NSEC ||
860 ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
== LDNS_RR_TYPE_NSEC3) {
861 d = (struct packed_rrset_data*)rep->rrsets[i]->
862 entry.data;
863 /* return first signer name of first NSEC */
864 if(d->rrsig_count != 0) {
865 val_find_rrset_signer(rep->rrsets[i],
866 &s, signer_len);
867 if(s && *signer_len) {
868 *dclass = ntohs(rep->rrsets[i]->(__uint16_t)(__builtin_constant_p(rep->rrsets[i]-> rk.rrset_class
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]-> rk.rrset_class
) & 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.rrset_class) & 0xff00U) >> 8) : __swap16md(rep->
rrsets[i]-> rk.rrset_class))
869 rk.rrset_class)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]-> rk.rrset_class
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]-> rk.rrset_class
) & 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.rrset_class) & 0xff00U) >> 8) : __swap16md(rep->
rrsets[i]-> rk.rrset_class))
;
870 return s;
871 }
872 }
873 }
874 }
875 return 0;
876}
877
878void val_neg_addreply(struct val_neg_cache* neg, struct reply_info* rep)
879{
880 size_t i, need;
881 struct ub_packed_rrset_key* soa;
882 uint8_t* dname = NULL((void*)0);
883 size_t dname_len;
884 uint16_t rrset_class;
885 struct val_neg_zone* zone;
886 /* see if secure nsecs inside */
887 if(!reply_has_nsec(rep))
888 return;
889 /* find the zone name in message */
890 if((soa = reply_find_soa(rep))) {
891 dname = soa->rk.dname;
892 dname_len = soa->rk.dname_len;
893 rrset_class = ntohs(soa->rk.rrset_class)(__uint16_t)(__builtin_constant_p(soa->rk.rrset_class) ? (
__uint16_t)(((__uint16_t)(soa->rk.rrset_class) & 0xffU
) << 8 | ((__uint16_t)(soa->rk.rrset_class) & 0xff00U
) >> 8) : __swap16md(soa->rk.rrset_class))
;
894 }
895 else {
896 /* No SOA in positive (wildcard) answer. Use signer from the
897 * validated answer RRsets' signature. */
898 if(!(dname = reply_nsec_signer(rep, &dname_len, &rrset_class)))
899 return;
900 }
901
902 log_nametypeclass(VERB_ALGO, "negcache insert for zone",
903 dname, LDNS_RR_TYPE_SOA, rrset_class);
904
905 /* ask for enough space to store all of it */
906 need = calc_data_need(rep) +
907 calc_zone_need(dname, dname_len);
908 lock_basic_lock(&neg->lock);
909 neg_make_space(neg, need);
910
911 /* find or create the zone entry */
912 zone = neg_find_zone(neg, dname, dname_len, rrset_class);
913 if(!zone) {
914 if(!(zone = neg_create_zone(neg, dname, dname_len,
915 rrset_class))) {
916 lock_basic_unlock(&neg->lock);
917 log_err("out of memory adding negative zone");
918 return;
919 }
920 }
921 val_neg_zone_take_inuse(zone);
922
923 /* insert the NSECs */
924 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
925 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
!= LDNS_RR_TYPE_NSEC)
926 continue;
927 if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
928 zone->name)) continue;
929 /* insert NSEC into this zone's tree */
930 neg_insert_data(neg, zone, rep->rrsets[i]);
931 }
932 if(zone->tree.count == 0) {
933 /* remove empty zone if inserts failed */
934 neg_delete_zone(neg, zone);
935 }
936 lock_basic_unlock(&neg->lock);
937}
938
939/**
940 * Lookup closest data record. For NSEC denial.
941 * @param zone: zone to look in
942 * @param qname: name to look for.
943 * @param len: length of name
944 * @param labs: labels in name
945 * @param data: data element, exact or smaller or NULL
946 * @return true if exact match.
947 */
948static int neg_closest_data(struct val_neg_zone* zone,
949 uint8_t* qname, size_t len, int labs, struct val_neg_data** data)
950{
951 struct val_neg_data key;
952 rbnode_type* r;
953 key.node.key = &key;
954 key.name = qname;
955 key.len = len;
956 key.labs = labs;
957 if(rbtree_find_less_equal(&zone->tree, &key, &r)) {
958 /* exact match */
959 *data = (struct val_neg_data*)r;
960 return 1;
961 } else {
962 /* smaller match */
963 *data = (struct val_neg_data*)r;
964 return 0;
965 }
966}
967
968void val_neg_addreferral(struct val_neg_cache* neg, struct reply_info* rep,
969 uint8_t* zone_name)
970{
971 size_t i, need;
972 uint8_t* signer;
973 size_t signer_len;
974 uint16_t dclass;
975 struct val_neg_zone* zone;
976 /* no SOA in this message, find RRSIG over NSEC's signer name.
977 * note the NSEC records are maybe not validated yet */
978 signer = reply_nsec_signer(rep, &signer_len, &dclass);
979 if(!signer)
1
Assuming 'signer' is non-null
2
Taking false branch
980 return;
981 if(!dname_subdomain_c(signer, zone_name)) {
3
Assuming the condition is false
4
Taking false branch
982 /* the signer is not in the bailiwick, throw it out */
983 return;
984 }
985
986 log_nametypeclass(VERB_ALGO, "negcache insert referral ",
987 signer, LDNS_RR_TYPE_NS, dclass);
988
989 /* ask for enough space to store all of it */
990 need = calc_data_need(rep) + calc_zone_need(signer, signer_len);
991 lock_basic_lock(&neg->lock);
992 neg_make_space(neg, need);
5
Calling 'neg_make_space'
993
994 /* find or create the zone entry */
995 zone = neg_find_zone(neg, signer, signer_len, dclass);
996 if(!zone) {
997 if(!(zone = neg_create_zone(neg, signer, signer_len,
998 dclass))) {
999 lock_basic_unlock(&neg->lock);
1000 log_err("out of memory adding negative zone");
1001 return;
1002 }
1003 }
1004 val_neg_zone_take_inuse(zone);
1005
1006 /* insert the NSECs */
1007 for(i=rep->an_numrrsets; i< rep->an_numrrsets+rep->ns_numrrsets; i++){
1008 if(ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
!= LDNS_RR_TYPE_NSEC &&
1009 ntohs(rep->rrsets[i]->rk.type)(__uint16_t)(__builtin_constant_p(rep->rrsets[i]->rk.type
) ? (__uint16_t)(((__uint16_t)(rep->rrsets[i]->rk.type)
& 0xffU) << 8 | ((__uint16_t)(rep->rrsets[i]->
rk.type) & 0xff00U) >> 8) : __swap16md(rep->rrsets
[i]->rk.type))
!= LDNS_RR_TYPE_NSEC3)
1010 continue;
1011 if(!dname_subdomain_c(rep->rrsets[i]->rk.dname,
1012 zone->name)) continue;
1013 /* insert NSEC into this zone's tree */
1014 neg_insert_data(neg, zone, rep->rrsets[i]);
1015 }
1016 if(zone->tree.count == 0) {
1017 /* remove empty zone if inserts failed */
1018 neg_delete_zone(neg, zone);
1019 }
1020 lock_basic_unlock(&neg->lock);
1021}
1022
1023/**
1024 * Check that an NSEC3 rrset does not have a type set.
1025 * None of the nsec3s in a hash-collision are allowed to have the type.
1026 * (since we do not know which one is the nsec3 looked at, flags, ..., we
1027 * ignore the cached item and let it bypass negative caching).
1028 * @param k: the nsec3 rrset to check.
1029 * @param t: type to check
1030 * @return true if no RRs have the type.
1031 */
1032static int nsec3_no_type(struct ub_packed_rrset_key* k, uint16_t t)
1033{
1034 int count = (int)((struct packed_rrset_data*)k->entry.data)->count;
1035 int i;
1036 for(i=0; i<count; i++)
1037 if(nsec3_has_type(k, i, t))
1038 return 0;
1039 return 1;
1040}
1041
1042/**
1043 * See if rrset exists in rrset cache.
1044 * If it does, the bit is checked, and if not expired, it is returned
1045 * allocated in region.
1046 * @param rrset_cache: rrset cache
1047 * @param qname: to lookup rrset name
1048 * @param qname_len: length of qname.
1049 * @param qtype: type of rrset to lookup, host order
1050 * @param qclass: class of rrset to lookup, host order
1051 * @param flags: flags for rrset to lookup
1052 * @param region: where to alloc result
1053 * @param checkbit: if true, a bit in the nsec typemap is checked for absence.
1054 * @param checktype: which bit to check
1055 * @param now: to check ttl against
1056 * @return rrset or NULL
1057 */
1058static struct ub_packed_rrset_key*
1059grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len,
1060 uint16_t qtype, uint16_t qclass, uint32_t flags,
1061 struct regional* region, int checkbit, uint16_t checktype,
1062 time_t now)
1063{
1064 struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache,
1065 qname, qname_len, qtype, qclass, flags, now, 0);
1066 struct packed_rrset_data* d;
1067 if(!k) return NULL((void*)0);
1068 d = (struct packed_rrset_data*)k->entry.data;
1069 if(d->ttl < now) {
1070 lock_rw_unlock(&k->entry.lock);
1071 return NULL((void*)0);
1072 }
1073 /* only secure or unchecked records that have signatures. */
1074 if( ! ( d->security == sec_status_secure ||
1075 (d->security == sec_status_unchecked &&
1076 d->rrsig_count > 0) ) ) {
1077 lock_rw_unlock(&k->entry.lock);
1078 return NULL((void*)0);
1079 }
1080 /* check if checktype is absent */
1081 if(checkbit && (
1082 (qtype == LDNS_RR_TYPE_NSEC && nsec_has_type(k, checktype)) ||
1083 (qtype == LDNS_RR_TYPE_NSEC3 && !nsec3_no_type(k, checktype))
1084 )) {
1085 lock_rw_unlock(&k->entry.lock);
1086 return NULL((void*)0);
1087 }
1088 /* looks OK! copy to region and return it */
1089 r = packed_rrset_copy_region(k, region, now);
1090 /* if it failed, we return the NULL */
1091 lock_rw_unlock(&k->entry.lock);
1092 return r;
1093}
1094
1095/**
1096 * Get best NSEC record for qname. Might be matching, covering or totally
1097 * useless.
1098 * @param neg_cache: neg cache
1099 * @param qname: to lookup rrset name
1100 * @param qname_len: length of qname.
1101 * @param qclass: class of rrset to lookup, host order
1102 * @param rrset_cache: rrset cache
1103 * @param now: to check ttl against
1104 * @param region: where to alloc result
1105 * @return rrset or NULL
1106 */
1107static struct ub_packed_rrset_key*
1108neg_find_nsec(struct val_neg_cache* neg_cache, uint8_t* qname, size_t qname_len,
1109 uint16_t qclass, struct rrset_cache* rrset_cache, time_t now,
1110 struct regional* region)
1111{
1112 int labs;
1113 uint32_t flags;
1114 struct val_neg_zone* zone;
1115 struct val_neg_data* data;
1116 struct ub_packed_rrset_key* nsec;
1117
1118 labs = dname_count_labels(qname);
1119 lock_basic_lock(&neg_cache->lock);
1120 zone = neg_closest_zone_parent(neg_cache, qname, qname_len, labs,
1121 qclass);
1122 while(zone && !zone->in_use)
1123 zone = zone->parent;
1124 if(!zone) {
1125 lock_basic_unlock(&neg_cache->lock);
1126 return NULL((void*)0);
1127 }
1128
1129 /* NSEC only for now */
1130 if(zone->nsec3_hash) {
1131 lock_basic_unlock(&neg_cache->lock);
1132 return NULL((void*)0);
1133 }
1134
1135 /* ignore return value, don't care if it is an exact or smaller match */
1136 (void)neg_closest_data(zone, qname, qname_len, labs, &data);
1137 if(!data) {
1138 lock_basic_unlock(&neg_cache->lock);
1139 return NULL((void*)0);
1140 }
1141
1142 /* ENT nodes are not in use, try the previous node. If the previous node
1143 * is not in use, we don't have an useful NSEC and give up. */
1144 if(!data->in_use) {
1145 data = (struct val_neg_data*)rbtree_previous((rbnode_type*)data);
1146 if((rbnode_type*)data == RBTREE_NULL&rbtree_null_node || !data->in_use) {
1147 lock_basic_unlock(&neg_cache->lock);
1148 return NULL((void*)0);
1149 }
1150 }
1151
1152 flags = 0;
1153 if(query_dname_compare(data->name, zone->name) == 0)
1154 flags = PACKED_RRSET_NSEC_AT_APEX0x1;
1155
1156 nsec = grab_nsec(rrset_cache, data->name, data->len, LDNS_RR_TYPE_NSEC,
1157 zone->dclass, flags, region, 0, 0, now);
1158 lock_basic_unlock(&neg_cache->lock);
1159 return nsec;
1160}
1161
1162/** find nsec3 closest encloser in neg cache */
1163static struct val_neg_data*
1164neg_find_nsec3_ce(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1165 int qlabs, sldns_buffer* buf, uint8_t* hashnc, size_t* nclen)
1166{
1167 struct val_neg_data* data;
1168 uint8_t hashce[NSEC3_SHA_LEN20];
1169 uint8_t b32[257];
1170 size_t celen, b32len;
1171
1172 *nclen = 0;
1173 while(qlabs > 0) {
1174 /* hash */
1175 if(!(celen=nsec3_get_hashed(buf, qname, qname_len,
1176 zone->nsec3_hash, zone->nsec3_iter, zone->nsec3_salt,
1177 zone->nsec3_saltlen, hashce, sizeof(hashce))))
1178 return NULL((void*)0);
1179 if(!(b32len=nsec3_hash_to_b32(hashce, celen, zone->name,
1180 zone->len, b32, sizeof(b32))))
1181 return NULL((void*)0);
1182
1183 /* lookup (exact match only) */
1184 data = neg_find_data(zone, b32, b32len, zone->labs+1);
1185 if(data && data->in_use) {
1186 /* found ce match! */
1187 return data;
1188 }
1189
1190 *nclen = celen;
1191 memmove(hashnc, hashce, celen);
1192 dname_remove_label(&qname, &qname_len);
1193 qlabs --;
1194 }
1195 return NULL((void*)0);
1196}
1197
1198/** check nsec3 parameters on nsec3 rrset with current zone values */
1199static int
1200neg_params_ok(struct val_neg_zone* zone, struct ub_packed_rrset_key* rrset)
1201{
1202 int h;
1203 uint8_t* s;
1204 size_t slen, it;
1205 if(!nsec3_get_params(rrset, 0, &h, &it, &s, &slen))
1206 return 0;
1207 return (h == zone->nsec3_hash && it == zone->nsec3_iter &&
1208 slen == zone->nsec3_saltlen &&
1209 memcmp(zone->nsec3_salt, s, slen) == 0);
1210}
1211
1212/** get next closer for nsec3 proof */
1213static struct ub_packed_rrset_key*
1214neg_nsec3_getnc(struct val_neg_zone* zone, uint8_t* hashnc, size_t nclen,
1215 struct rrset_cache* rrset_cache, struct regional* region,
1216 time_t now, uint8_t* b32, size_t maxb32)
1217{
1218 struct ub_packed_rrset_key* nc_rrset;
1219 struct val_neg_data* data;
1220 size_t b32len;
1221
1222 if(!(b32len=nsec3_hash_to_b32(hashnc, nclen, zone->name,
1223 zone->len, b32, maxb32)))
1224 return NULL((void*)0);
1225 (void)neg_closest_data(zone, b32, b32len, zone->labs+1, &data);
1226 if(!data && zone->tree.count != 0) {
1227 /* could be before the first entry ; return the last
1228 * entry (possibly the rollover nsec3 at end) */
1229 data = (struct val_neg_data*)rbtree_last(&zone->tree);
1230 }
1231 while(data && !data->in_use)
1232 data = data->parent;
1233 if(!data)
1234 return NULL((void*)0);
1235 /* got a data element in tree, grab it */
1236 nc_rrset = grab_nsec(rrset_cache, data->name, data->len,
1237 LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 0, 0, now);
1238 if(!nc_rrset)
1239 return NULL((void*)0);
1240 if(!neg_params_ok(zone, nc_rrset))
1241 return NULL((void*)0);
1242 return nc_rrset;
1243}
1244
1245/** neg cache nsec3 proof procedure*/
1246static struct dns_msg*
1247neg_nsec3_proof_ds(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
1248 int qlabs, sldns_buffer* buf, struct rrset_cache* rrset_cache,
1249 struct regional* region, time_t now, uint8_t* topname)
1250{
1251 struct dns_msg* msg;
1252 struct val_neg_data* data;
1253 uint8_t hashnc[NSEC3_SHA_LEN20];
1254 size_t nclen;
1255 struct ub_packed_rrset_key* ce_rrset, *nc_rrset;
1256 struct nsec3_cached_hash c;
1257 uint8_t nc_b32[257];
1258
1259 /* for NSEC3 ; determine the closest encloser for which we
1260 * can find an exact match. Remember the hashed lower name,
1261 * since that is the one we need a closest match for.
1262 * If we find a match straight away, then it becomes NODATA.
1263 * Otherwise, NXDOMAIN or if OPTOUT, an insecure delegation.
1264 * Also check that parameters are the same on closest encloser
1265 * and on closest match.
1266 */
1267 if(!zone->nsec3_hash)
1268 return NULL((void*)0); /* not nsec3 zone */
1269
1270 if(!(data=neg_find_nsec3_ce(zone, qname, qname_len, qlabs, buf,
1271 hashnc, &nclen))) {
1272 return NULL((void*)0);
1273 }
1274
1275 /* grab the ce rrset */
1276 ce_rrset = grab_nsec(rrset_cache, data->name, data->len,
1277 LDNS_RR_TYPE_NSEC3, zone->dclass, 0, region, 1,
1278 LDNS_RR_TYPE_DS, now);
1279 if(!ce_rrset)
1280 return NULL((void*)0);
1281 if(!neg_params_ok(zone, ce_rrset))
1282 return NULL((void*)0);
1283
1284 if(nclen == 0) {
1285 /* exact match, just check the type bits */
1286 /* need: -SOA, -DS, +NS */
1287 if(nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_SOA) ||
1288 nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_DS) ||
1289 !nsec3_has_type(ce_rrset, 0, LDNS_RR_TYPE_NS))
1290 return NULL((void*)0);
1291 if(!(msg = dns_msg_create(qname, qname_len,
1292 LDNS_RR_TYPE_DS, zone->dclass, region, 1)))
1293 return NULL((void*)0);
1294 /* TTL reduced in grab_nsec */
1295 if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1296 return NULL((void*)0);
1297 return msg;
1298 }
1299
1300 /* optout is not allowed without knowing the trust-anchor in use,
1301 * otherwise the optout could spoof away that anchor */
1302 if(!topname)
1303 return NULL((void*)0);
1304
1305 /* if there is no exact match, it must be in an optout span
1306 * (an existing DS implies an NSEC3 must exist) */
1307 nc_rrset = neg_nsec3_getnc(zone, hashnc, nclen, rrset_cache,
1308 region, now, nc_b32, sizeof(nc_b32));
1309 if(!nc_rrset)
1310 return NULL((void*)0);
1311 if(!neg_params_ok(zone, nc_rrset))
1312 return NULL((void*)0);
1313 if(!nsec3_has_optout(nc_rrset, 0))
1314 return NULL((void*)0);
1315 c.hash = hashnc;
1316 c.hash_len = nclen;
1317 c.b32 = nc_b32+1;
1318 c.b32_len = (size_t)nc_b32[0];
1319 if(nsec3_covers(zone->name, &c, nc_rrset, 0, buf)) {
1320 /* nc_rrset covers the next closer name.
1321 * ce_rrset equals a closer encloser.
1322 * nc_rrset is optout.
1323 * No need to check wildcard for type DS */
1324 /* capacity=3: ce + nc + soa(if needed) */
1325 if(!(msg = dns_msg_create(qname, qname_len,
1326 LDNS_RR_TYPE_DS, zone->dclass, region, 3)))
1327 return NULL((void*)0);
1328 /* now=0 because TTL was reduced in grab_nsec */
1329 if(!dns_msg_authadd(msg, region, ce_rrset, 0))
1330 return NULL((void*)0);
1331 if(!dns_msg_authadd(msg, region, nc_rrset, 0))
1332 return NULL((void*)0);
1333 return msg;
1334 }
1335 return NULL((void*)0);
1336}
1337
1338/**
1339 * Add SOA record for external responses.
1340 * @param rrset_cache: to look into.
1341 * @param now: current time.
1342 * @param region: where to perform the allocation
1343 * @param msg: current msg with NSEC.
1344 * @param zone: val_neg_zone if we have one.
1345 * @return false on lookup or alloc failure.
1346 */
1347static int add_soa(struct rrset_cache* rrset_cache, time_t now,
1348 struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone)
1349{
1350 struct ub_packed_rrset_key* soa;
1351 uint8_t* nm;
1352 size_t nmlen;
1353 uint16_t dclass;
1354 if(zone) {
1355 nm = zone->name;
1356 nmlen = zone->len;
1357 dclass = zone->dclass;
1358 } else {
1359 /* Assumes the signer is the zone SOA to add */
1360 nm = reply_nsec_signer(msg->rep, &nmlen, &dclass);
1361 if(!nm)
1362 return 0;
1363 }
1364 soa = rrset_cache_lookup(rrset_cache, nm, nmlen, LDNS_RR_TYPE_SOA,
1365 dclass, PACKED_RRSET_SOA_NEG0x4, now, 0);
1366 if(!soa)
1367 return 0;
1368 if(!dns_msg_authadd(msg, region, soa, now)) {
1369 lock_rw_unlock(&soa->entry.lock);
1370 return 0;
1371 }
1372 lock_rw_unlock(&soa->entry.lock);
1373 return 1;
1374}
1375
1376struct dns_msg*
1377val_neg_getmsg(struct val_neg_cache* neg, struct query_info* qinfo,
1378 struct regional* region, struct rrset_cache* rrset_cache,
1379 sldns_buffer* buf, time_t now, int addsoa, uint8_t* topname,
1380 struct config_file* cfg)
1381{
1382 struct dns_msg* msg;
1383 struct ub_packed_rrset_key* nsec; /* qname matching/covering nsec */
1384 struct ub_packed_rrset_key* wcrr; /* wildcard record or nsec */
1385 uint8_t* nodata_wc = NULL((void*)0);
1386 uint8_t* ce = NULL((void*)0);
1387 size_t ce_len;
1388 uint8_t wc_ce[LDNS_MAX_DOMAINLEN255+3];
1389 struct query_info wc_qinfo;
1390 struct ub_packed_rrset_key* cache_wc;
1391 struct packed_rrset_data* wcrr_data;
1392 int rcode = LDNS_RCODE_NOERROR;
1393 uint8_t* zname;
1394 size_t zname_len;
1395 int zname_labs;
1396 struct val_neg_zone* zone;
1397
1398 /* only for DS queries when aggressive use of NSEC is disabled */
1399 if(qinfo->qtype != LDNS_RR_TYPE_DS && !cfg->aggressive_nsec)
1400 return NULL((void*)0);
1401 log_assert(!topname || dname_subdomain_c(qinfo->qname, topname));
1402
1403 /* Get best available NSEC for qname */
1404 nsec = neg_find_nsec(neg, qinfo->qname, qinfo->qname_len, qinfo->qclass,
1405 rrset_cache, now, region);
1406
1407 /* Matching NSEC, use to generate No Data answer. Not creating answers
1408 * yet for No Data proven using wildcard. */
1409 if(nsec && nsec_proves_nodata(nsec, qinfo, &nodata_wc) && !nodata_wc) {
1410 if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len,
1411 qinfo->qtype, qinfo->qclass, region, 2)))
1412 return NULL((void*)0);
1413 if(!dns_msg_authadd(msg, region, nsec, 0))
1414 return NULL((void*)0);
1415 if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL((void*)0)))
1416 return NULL((void*)0);
1417
1418 lock_basic_lock(&neg->lock);
1419 neg->num_neg_cache_noerror++;
1420 lock_basic_unlock(&neg->lock);
1421 return msg;
1422 } else if(nsec && val_nsec_proves_name_error(nsec, qinfo->qname)) {
1423 if(!(msg = dns_msg_create(qinfo->qname, qinfo->qname_len,
1424 qinfo->qtype, qinfo->qclass, region, 3)))
1425 return NULL((void*)0);
1426 if(!(ce = nsec_closest_encloser(qinfo->qname, nsec)))
1427 return NULL((void*)0);
1428 dname_count_size_labels(ce, &ce_len);
1429
1430 /* No extra extra NSEC required if both nameerror qname and
1431 * nodata *.ce. are proven already. */
1432 if(!nodata_wc || query_dname_compare(nodata_wc, ce) != 0) {
1433 /* Qname proven non existing, get wildcard record for
1434 * QTYPE or NSEC covering or matching wildcard. */
1435
1436 /* Num labels in ce is always smaller than in qname,
1437 * therefore adding the wildcard label cannot overflow
1438 * buffer. */
1439 wc_ce[0] = 1;
1440 wc_ce[1] = (uint8_t)'*';
1441 memmove(wc_ce+2, ce, ce_len);
1442 wc_qinfo.qname = wc_ce;
1443 wc_qinfo.qname_len = ce_len + 2;
1444 wc_qinfo.qtype = qinfo->qtype;
1445
1446
1447 if((cache_wc = rrset_cache_lookup(rrset_cache, wc_qinfo.qname,
1448 wc_qinfo.qname_len, wc_qinfo.qtype,
1449 qinfo->qclass, 0/*flags*/, now, 0/*read only*/))) {
1450 /* Synthesize wildcard answer */
1451 wcrr_data = (struct packed_rrset_data*)cache_wc->entry.data;
1452 if(!(wcrr_data->security == sec_status_secure ||
1453 (wcrr_data->security == sec_status_unchecked &&
1454 wcrr_data->rrsig_count > 0))) {
1455 lock_rw_unlock(&cache_wc->entry.lock);
1456 return NULL((void*)0);
1457 }
1458 if(!(wcrr = packed_rrset_copy_region(cache_wc,
1459 region, now))) {
1460 lock_rw_unlock(&cache_wc->entry.lock);
1461 return NULL((void*)0);
1462 };
1463 lock_rw_unlock(&cache_wc->entry.lock);
1464 wcrr->rk.dname = qinfo->qname;
1465 wcrr->rk.dname_len = qinfo->qname_len;
1466 if(!dns_msg_ansadd(msg, region, wcrr, 0))
1467 return NULL((void*)0);
1468 /* No SOA needed for wildcard synthesised
1469 * answer. */
1470 addsoa = 0;
1471 } else {
1472 /* Get wildcard NSEC for possible non existence
1473 * proof */
1474 if(!(wcrr = neg_find_nsec(neg, wc_qinfo.qname,
1475 wc_qinfo.qname_len, qinfo->qclass,
1476 rrset_cache, now, region)))
1477 return NULL((void*)0);
1478
1479 nodata_wc = NULL((void*)0);
1480 if(val_nsec_proves_name_error(wcrr, wc_ce))
1481 rcode = LDNS_RCODE_NXDOMAIN;
1482 else if(!nsec_proves_nodata(wcrr, &wc_qinfo,
1483 &nodata_wc) || nodata_wc)
1484 /* &nodata_wc shouldn't be set, wc_qinfo
1485 * already contains wildcard domain. */
1486 /* NSEC doesn't prove anything for
1487 * wildcard. */
1488 return NULL((void*)0);
1489 if(query_dname_compare(wcrr->rk.dname,
1490 nsec->rk.dname) != 0)
1491 if(!dns_msg_authadd(msg, region, wcrr, 0))
1492 return NULL((void*)0);
1493 }
1494 }
1495
1496 if(!dns_msg_authadd(msg, region, nsec, 0))
1497 return NULL((void*)0);
1498 if(addsoa && !add_soa(rrset_cache, now, region, msg, NULL((void*)0)))
1499 return NULL((void*)0);
1500
1501 /* Increment statistic counters */
1502 lock_basic_lock(&neg->lock);
1503 if(rcode == LDNS_RCODE_NOERROR)
1504 neg->num_neg_cache_noerror++;
1505 else if(rcode == LDNS_RCODE_NXDOMAIN)
1506 neg->num_neg_cache_nxdomain++;
1507 lock_basic_unlock(&neg->lock);
1508
1509 FLAGS_SET_RCODE(msg->rep->flags, rcode)(msg->rep->flags = (((msg->rep->flags) & 0xfff0
) | (rcode)))
;
1510 return msg;
1511 }
1512
1513 /* No aggressive use of NSEC3 for now, only proceed for DS types. */
1514 if(qinfo->qtype != LDNS_RR_TYPE_DS){
1515 return NULL((void*)0);
1516 }
1517 /* check NSEC3 neg cache for type DS */
1518 /* need to look one zone higher for DS type */
1519 zname = qinfo->qname;
1520 zname_len = qinfo->qname_len;
1521 dname_remove_label(&zname, &zname_len);
1522 zname_labs = dname_count_labels(zname);
1523
1524 /* lookup closest zone */
1525 lock_basic_lock(&neg->lock);
1526 zone = neg_closest_zone_parent(neg, zname, zname_len, zname_labs,
1527 qinfo->qclass);
1528 while(zone && !zone->in_use)
1529 zone = zone->parent;
1530 /* check that the zone is not too high up so that we do not pick data
1531 * out of a zone that is above the last-seen key (or trust-anchor). */
1532 if(zone && topname) {
1533 if(!dname_subdomain_c(zone->name, topname))
1534 zone = NULL((void*)0);
1535 }
1536 if(!zone) {
1537 lock_basic_unlock(&neg->lock);
1538 return NULL((void*)0);
1539 }
1540
1541 msg = neg_nsec3_proof_ds(zone, qinfo->qname, qinfo->qname_len,
1542 zname_labs+1, buf, rrset_cache, region, now, topname);
1543 if(msg && addsoa && !add_soa(rrset_cache, now, region, msg, zone)) {
1544 lock_basic_unlock(&neg->lock);
1545 return NULL((void*)0);
1546 }
1547 lock_basic_unlock(&neg->lock);
1548 return msg;
1549}