Bug Summary

File:src/usr.sbin/unbound/services/outside_network.c
Warning:line 3398, column 3
Value stored to 'pnum' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name outside_network.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/unbound/obj -resource-dir /usr/local/lib/clang/13.0.0 -I . -I /usr/src/usr.sbin/unbound -D SRCDIR=/usr/src/usr.sbin/unbound -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/unbound/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/unbound/services/outside_network.c
1/*
2 * services/outside_network.c - implement sending of queries and wait answer.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file has functions to send queries to authoritative servers and
40 * wait for the pending answer events.
41 */
42#include "config.h"
43#include <ctype.h>
44#ifdef HAVE_SYS_TYPES_H1
45# include <sys/types.h>
46#endif
47#include <sys/time.h>
48#include "services/outside_network.h"
49#include "services/listen_dnsport.h"
50#include "services/cache/infra.h"
51#include "iterator/iterator.h"
52#include "util/data/msgparse.h"
53#include "util/data/msgreply.h"
54#include "util/data/msgencode.h"
55#include "util/data/dname.h"
56#include "util/netevent.h"
57#include "util/log.h"
58#include "util/net_help.h"
59#include "util/random.h"
60#include "util/fptr_wlist.h"
61#include "util/edns.h"
62#include "sldns/sbuffer.h"
63#include "dnstap/dnstap.h"
64#ifdef HAVE_OPENSSL_SSL_H1
65#include <openssl/ssl.h>
66#endif
67#ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST1
68#include <openssl/x509v3.h>
69#endif
70
71#ifdef HAVE_NETDB_H1
72#include <netdb.h>
73#endif
74#include <fcntl.h>
75
76/** number of times to retry making a random ID that is unique. */
77#define MAX_ID_RETRY1000 1000
78/** number of times to retry finding interface, port that can be opened. */
79#define MAX_PORT_RETRY10000 10000
80/** number of retries on outgoing UDP queries */
81#define OUTBOUND_UDP_RETRY1 1
82
83/** initiate TCP transaction for serviced query */
84static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85/** with a fd available, randomize and send UDP */
86static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 int timeout);
88
89/** remove waiting tcp from the outnet waiting list */
90static void waiting_list_remove(struct outside_network* outnet,
91 struct waiting_tcp* w);
92
93/** select a DNS ID for a TCP stream */
94static uint16_t tcp_select_id(struct outside_network* outnet,
95 struct reuse_tcp* reuse);
96
97int
98pending_cmp(const void* key1, const void* key2)
99{
100 struct pending *p1 = (struct pending*)key1;
101 struct pending *p2 = (struct pending*)key2;
102 if(p1->id < p2->id)
103 return -1;
104 if(p1->id > p2->id)
105 return 1;
106 log_assert(p1->id == p2->id);
107 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
108}
109
110int
111serviced_cmp(const void* key1, const void* key2)
112{
113 struct serviced_query* q1 = (struct serviced_query*)key1;
114 struct serviced_query* q2 = (struct serviced_query*)key2;
115 int r;
116 if(q1->qbuflen < q2->qbuflen)
117 return -1;
118 if(q1->qbuflen > q2->qbuflen)
119 return 1;
120 log_assert(q1->qbuflen == q2->qbuflen);
121 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
122 /* alternate casing of qname is still the same query */
123 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
124 return r;
125 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
126 return r;
127 if(q1->dnssec != q2->dnssec) {
128 if(q1->dnssec < q2->dnssec)
129 return -1;
130 return 1;
131 }
132 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
133 return r;
134 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
135 return r;
136 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
137}
138
139/** compare if the reuse element has the same address, port and same ssl-is
140 * used-for-it characteristic */
141static int
142reuse_cmp_addrportssl(const void* key1, const void* key2)
143{
144 struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
145 struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
146 int r;
147 /* compare address and port */
148 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
149 if(r != 0)
150 return r;
151
152 /* compare if SSL-enabled */
153 if(r1->is_ssl && !r2->is_ssl)
154 return 1;
155 if(!r1->is_ssl && r2->is_ssl)
156 return -1;
157 return 0;
158}
159
160int
161reuse_cmp(const void* key1, const void* key2)
162{
163 int r;
164 r = reuse_cmp_addrportssl(key1, key2);
165 if(r != 0)
166 return r;
167
168 /* compare ptr value */
169 if(key1 < key2) return -1;
170 if(key1 > key2) return 1;
171 return 0;
172}
173
174int reuse_id_cmp(const void* key1, const void* key2)
175{
176 struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
177 struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
178 if(w1->id < w2->id)
179 return -1;
180 if(w1->id > w2->id)
181 return 1;
182 return 0;
183}
184
185/** delete waiting_tcp entry. Does not unlink from waiting list.
186 * @param w: to delete.
187 */
188static void
189waiting_tcp_delete(struct waiting_tcp* w)
190{
191 if(!w) return;
192 if(w->timer)
193 comm_timer_delete(w->timer);
194 free(w);
195}
196
197/**
198 * Pick random outgoing-interface of that family, and bind it.
199 * port set to 0 so OS picks a port number for us.
200 * if it is the ANY address, do not bind.
201 * @param pend: pending tcp structure, for storing the local address choice.
202 * @param w: tcp structure with destination address.
203 * @param s: socket fd.
204 * @return false on error, socket closed.
205 */
206static int
207pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
208{
209 struct port_if* pi = NULL((void*)0);
210 int num;
211 pend->pi = NULL((void*)0);
212#ifdef INET6
213 if(addr_is_ip6(&w->addr, w->addrlen))
214 num = w->outnet->num_ip6;
215 else
216#endif
217 num = w->outnet->num_ip4;
218 if(num == 0) {
219 log_err("no TCP outgoing interfaces of family");
220 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
221 sock_close(s);
222 return 0;
223 }
224#ifdef INET6
225 if(addr_is_ip6(&w->addr, w->addrlen))
226 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
227 else
228#endif
229 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
230 log_assert(pi);
231 pend->pi = pi;
232 if(addr_is_any(&pi->addr, pi->addrlen)) {
233 /* binding to the ANY interface is for listening sockets */
234 return 1;
235 }
236 /* set port to 0 */
237 if(addr_is_ip6(&pi->addr, pi->addrlen))
238 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
239 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0;
240 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
241#ifndef USE_WINSOCK
242#ifdef EADDRNOTAVAIL49
243 if(!(verbosity < 4 && errno(*__errno()) == EADDRNOTAVAIL49))
244#endif
245#else /* USE_WINSOCK */
246 if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
247#endif
248 log_err("outgoing tcp: bind: %s", sock_strerror(errno(*__errno())));
249 sock_close(s);
250 return 0;
251 }
252 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
253 return 1;
254}
255
256/** get TCP file descriptor for address, returns -1 on failure,
257 * tcp_mss is 0 or maxseg size to set for TCP packets. */
258int
259outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
260{
261 int s;
262 int af;
263 char* err;
264#ifdef SO_REUSEADDR0x0004
265 int on = 1;
266#endif
267#ifdef INET6
268 if(addr_is_ip6(addr, addrlen)){
269 s = socket(PF_INET624, SOCK_STREAM1, IPPROTO_TCP6);
270 af = AF_INET624;
271 } else {
272#else
273 {
274#endif
275 af = AF_INET2;
276 s = socket(PF_INET2, SOCK_STREAM1, IPPROTO_TCP6);
277 }
278 if(s == -1) {
279 log_err_addr("outgoing tcp: socket", sock_strerror(errno(*__errno())),
280 addr, addrlen);
281 return -1;
282 }
283
284#ifdef SO_REUSEADDR0x0004
285 if(setsockopt(s, SOL_SOCKET0xffff, SO_REUSEADDR0x0004, (void*)&on,
286 (socklen_t)sizeof(on)) < 0) {
287 verbose(VERB_ALGO, "outgoing tcp:"
288 " setsockopt(.. SO_REUSEADDR ..) failed");
289 }
290#endif
291
292 err = set_ip_dscp(s, af, dscp);
293 if(err != NULL((void*)0)) {
294 verbose(VERB_ALGO, "outgoing tcp:"
295 "error setting IP DiffServ codepoint on socket");
296 }
297
298 if(tcp_mss > 0) {
299#if defined(IPPROTO_TCP6) && defined(TCP_MAXSEG0x02)
300 if(setsockopt(s, IPPROTO_TCP6, TCP_MAXSEG0x02,
301 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
302 verbose(VERB_ALGO, "outgoing tcp:"
303 " setsockopt(.. TCP_MAXSEG ..) failed");
304 }
305#else
306 verbose(VERB_ALGO, "outgoing tcp:"
307 " setsockopt(TCP_MAXSEG) unsupported");
308#endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
309 }
310
311 return s;
312}
313
314/** connect tcp connection to addr, 0 on failure */
315int
316outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
317{
318 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
319#ifndef USE_WINSOCK
320#ifdef EINPROGRESS36
321 if(errno(*__errno()) != EINPROGRESS36) {
322#endif
323 if(tcp_connect_errno_needs_log(
324 (struct sockaddr*)addr, addrlen))
325 log_err_addr("outgoing tcp: connect",
326 strerror(errno(*__errno())), addr, addrlen);
327 close(s);
328 return 0;
329#ifdef EINPROGRESS36
330 }
331#endif
332#else /* USE_WINSOCK */
333 if(WSAGetLastError() != WSAEINPROGRESS &&
334 WSAGetLastError() != WSAEWOULDBLOCK) {
335 closesocket(s);
336 return 0;
337 }
338#endif
339 }
340 return 1;
341}
342
343/** log reuse item addr and ptr with message */
344static void
345log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
346{
347 uint16_t port;
348 char addrbuf[128];
349 if(verbosity < v) return;
350 if(!reuse || !reuse->pending || !reuse->pending->c)
351 return;
352 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
353 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port)(__uint16_t)(__builtin_constant_p(((struct sockaddr_in*)&
reuse->addr)->sin_port) ? (__uint16_t)(((__uint16_t)(((
struct sockaddr_in*)&reuse->addr)->sin_port) & 0xffU
) << 8 | ((__uint16_t)(((struct sockaddr_in*)&reuse
->addr)->sin_port) & 0xff00U) >> 8) : __swap16md
(((struct sockaddr_in*)&reuse->addr)->sin_port))
;
354 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
355 reuse->pending->c->fd);
356}
357
358/** pop the first element from the writewait list */
359static struct waiting_tcp* reuse_write_wait_pop(struct reuse_tcp* reuse)
360{
361 struct waiting_tcp* w = reuse->write_wait_first;
362 if(!w)
363 return NULL((void*)0);
364 log_assert(w->write_wait_queued);
365 log_assert(!w->write_wait_prev);
366 reuse->write_wait_first = w->write_wait_next;
367 if(w->write_wait_next)
368 w->write_wait_next->write_wait_prev = NULL((void*)0);
369 else reuse->write_wait_last = NULL((void*)0);
370 w->write_wait_queued = 0;
371 w->write_wait_next = NULL((void*)0);
372 w->write_wait_prev = NULL((void*)0);
373 return w;
374}
375
376/** remove the element from the writewait list */
377static void reuse_write_wait_remove(struct reuse_tcp* reuse,
378 struct waiting_tcp* w)
379{
380 log_assert(w);
381 log_assert(w->write_wait_queued);
382 if(!w)
383 return;
384 if(!w->write_wait_queued)
385 return;
386 if(w->write_wait_prev)
387 w->write_wait_prev->write_wait_next = w->write_wait_next;
388 else reuse->write_wait_first = w->write_wait_next;
389 log_assert(!w->write_wait_prev ||
390 w->write_wait_prev->write_wait_next != w->write_wait_prev);
391 if(w->write_wait_next)
392 w->write_wait_next->write_wait_prev = w->write_wait_prev;
393 else reuse->write_wait_last = w->write_wait_prev;
394 log_assert(!w->write_wait_next
395 || w->write_wait_next->write_wait_prev != w->write_wait_next);
396 w->write_wait_queued = 0;
397 w->write_wait_next = NULL((void*)0);
398 w->write_wait_prev = NULL((void*)0);
399}
400
401/** push the element after the last on the writewait list */
402static void reuse_write_wait_push_back(struct reuse_tcp* reuse,
403 struct waiting_tcp* w)
404{
405 if(!w) return;
406 log_assert(!w->write_wait_queued);
407 if(reuse->write_wait_last) {
408 reuse->write_wait_last->write_wait_next = w;
409 log_assert(reuse->write_wait_last->write_wait_next !=
410 reuse->write_wait_last);
411 w->write_wait_prev = reuse->write_wait_last;
412 } else {
413 reuse->write_wait_first = w;
414 }
415 reuse->write_wait_last = w;
416 w->write_wait_queued = 1;
417}
418
419/** insert element in tree by id */
420void
421reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
422{
423#ifdef UNBOUND_DEBUG
424 rbnode_type* added;
425#endif
426 log_assert(w->id_node.key == NULL);
427 w->id_node.key = w;
428#ifdef UNBOUND_DEBUG
429 added =
430#else
431 (void)
432#endif
433 rbtree_insert(&reuse->tree_by_id, &w->id_node);
434 log_assert(added); /* should have been added */
435}
436
437/** find element in tree by id */
438struct waiting_tcp*
439reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
440{
441 struct waiting_tcp key_w;
442 rbnode_type* n;
443 memset(&key_w, 0, sizeof(key_w));
444 key_w.id_node.key = &key_w;
445 key_w.id = id;
446 n = rbtree_search(&reuse->tree_by_id, &key_w);
447 if(!n) return NULL((void*)0);
448 return (struct waiting_tcp*)n->key;
449}
450
451/** return ID value of rbnode in tree_by_id */
452static uint16_t
453tree_by_id_get_id(rbnode_type* node)
454{
455 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
456 return w->id;
457}
458
459/** insert into reuse tcp tree and LRU, false on failure (duplicate) */
460int
461reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
462{
463 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
464 if(pend_tcp->reuse.item_on_lru_list) {
465 if(!pend_tcp->reuse.node.key)
466 log_err("internal error: reuse_tcp_insert: "
467 "in lru list without key");
468 return 1;
469 }
470 pend_tcp->reuse.node.key = &pend_tcp->reuse;
471 pend_tcp->reuse.pending = pend_tcp;
472 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
473 /* We are not in the LRU list but we are already in the
474 * tcp_reuse tree, strange.
475 * Continue to add ourselves to the LRU list. */
476 log_err("internal error: reuse_tcp_insert: in lru list but "
477 "not in the tree");
478 }
479 /* insert into LRU, first is newest */
480 pend_tcp->reuse.lru_prev = NULL((void*)0);
481 if(outnet->tcp_reuse_first) {
482 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
483 log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
484 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
485 log_assert(outnet->tcp_reuse_first->lru_prev !=
486 outnet->tcp_reuse_first);
487 } else {
488 pend_tcp->reuse.lru_next = NULL((void*)0);
489 outnet->tcp_reuse_last = &pend_tcp->reuse;
490 }
491 outnet->tcp_reuse_first = &pend_tcp->reuse;
492 pend_tcp->reuse.item_on_lru_list = 1;
493 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
494 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
495 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
496 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
497 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
498 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
499 return 1;
500}
501
502/** find reuse tcp stream to destination for query, or NULL if none */
503static struct reuse_tcp*
504reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
505 socklen_t addrlen, int use_ssl)
506{
507 struct waiting_tcp key_w;
508 struct pending_tcp key_p;
509 struct comm_point c;
510 rbnode_type* result = NULL((void*)0), *prev;
511 verbose(VERB_CLIENT, "reuse_tcp_find");
512 memset(&key_w, 0, sizeof(key_w));
513 memset(&key_p, 0, sizeof(key_p));
514 memset(&c, 0, sizeof(c));
515 key_p.query = &key_w;
516 key_p.c = &c;
517 key_p.reuse.pending = &key_p;
518 key_p.reuse.node.key = &key_p.reuse;
519 if(use_ssl)
520 key_p.reuse.is_ssl = 1;
521 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
522 return NULL((void*)0);
523 memmove(&key_p.reuse.addr, addr, addrlen);
524 key_p.reuse.addrlen = addrlen;
525
526 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
527 (unsigned)outnet->tcp_reuse.count);
528 if(outnet->tcp_reuse.root == NULL((void*)0) ||
529 outnet->tcp_reuse.root == RBTREE_NULL&rbtree_null_node)
530 return NULL((void*)0);
531 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
532 &result)) {
533 /* exact match */
534 /* but the key is on stack, and ptr is compared, impossible */
535 log_assert(&key_p.reuse != (struct reuse_tcp*)result);
536 log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
537 }
538 /* not found, return null */
539 if(!result || result == RBTREE_NULL&rbtree_null_node)
540 return NULL((void*)0);
541 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
542 /* inexact match, find one of possibly several connections to the
543 * same destination address, with the correct port, ssl, and
544 * also less than max number of open queries, or else, fail to open
545 * a new one */
546 /* rewind to start of sequence of same address,port,ssl */
547 prev = rbtree_previous(result);
548 while(prev && prev != RBTREE_NULL&rbtree_null_node &&
549 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
550 result = prev;
551 prev = rbtree_previous(result);
552 }
553
554 /* loop to find first one that has correct characteristics */
555 while(result && result != RBTREE_NULL&rbtree_null_node &&
556 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
557 if(((struct reuse_tcp*)result)->tree_by_id.count <
558 outnet->max_reuse_tcp_queries) {
559 /* same address, port, ssl-yes-or-no, and has
560 * space for another query */
561 return (struct reuse_tcp*)result;
562 }
563 result = rbtree_next(result);
564 }
565 return NULL((void*)0);
566}
567
568/** use the buffer to setup writing the query */
569static void
570outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
571 struct waiting_tcp* w)
572{
573 struct timeval tv;
574 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
575 "len %d timeout %d msec",
576 (int)w->pkt_len, w->timeout);
577 pend->c->tcp_write_pkt = w->pkt;
578 pend->c->tcp_write_pkt_len = w->pkt_len;
579 pend->c->tcp_write_and_read = 1;
580 pend->c->tcp_write_byte_count = 0;
581 pend->c->tcp_is_reading = 0;
582 comm_point_start_listening(pend->c, s, -1);
583 /* set timer on the waiting_tcp entry, this is the write timeout
584 * for the written packet. The timer on pend->c is the timer
585 * for when there is no written packet and we have readtimeouts */
586#ifndef S_SPLINT_S
587 tv.tv_sec = w->timeout/1000;
588 tv.tv_usec = (w->timeout%1000)*1000;
589#endif
590 /* if the waiting_tcp was previously waiting for a buffer in the
591 * outside_network.tcpwaitlist, then the timer is reset now that
592 * we start writing it */
593 comm_timer_set(w->timer, &tv);
594}
595
596/** use next free buffer to service a tcp query */
597static int
598outnet_tcp_take_into_use(struct waiting_tcp* w)
599{
600 struct pending_tcp* pend = w->outnet->tcp_free;
601 int s;
602 log_assert(pend);
603 log_assert(w->pkt);
604 log_assert(w->pkt_len > 0);
605 log_assert(w->addrlen > 0);
606 pend->c->tcp_do_toggle_rw = 0;
607 pend->c->tcp_do_close = 0;
608 /* open socket */
609 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
610
611 if(s == -1)
612 return 0;
613
614 if(!pick_outgoing_tcp(pend, w, s))
615 return 0;
616
617 fd_set_nonblock(s);
618#ifdef USE_OSX_MSG_FASTOPEN
619 /* API for fast open is different here. We use a connectx() function and
620 then writes can happen as normal even using SSL.*/
621 /* connectx requires that the len be set in the sockaddr struct*/
622 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
623 addr_in->sin_len = w->addrlen;
624 sa_endpoints_t endpoints;
625 endpoints.sae_srcif = 0;
626 endpoints.sae_srcaddr = NULL((void*)0);
627 endpoints.sae_srcaddrlen = 0;
628 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
629 endpoints.sae_dstaddrlen = w->addrlen;
630 if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
631 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
632 NULL((void*)0), 0, NULL((void*)0), NULL((void*)0)) == -1) {
633 /* if fails, failover to connect for OSX 10.10 */
634#ifdef EINPROGRESS36
635 if(errno(*__errno()) != EINPROGRESS36) {
636#else
637 if(1) {
638#endif
639 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
640#else /* USE_OSX_MSG_FASTOPEN*/
641#ifdef USE_MSG_FASTOPEN
642 pend->c->tcp_do_fastopen = 1;
643 /* Only do TFO for TCP in which case no connect() is required here.
644 Don't combine client TFO with SSL, since OpenSSL can't
645 currently support doing a handshake on fd that already isn't connected*/
646 if (w->outnet->sslctx && w->ssl_upstream) {
647 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
648#else /* USE_MSG_FASTOPEN*/
649 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
650#endif /* USE_MSG_FASTOPEN*/
651#endif /* USE_OSX_MSG_FASTOPEN*/
652#ifndef USE_WINSOCK
653#ifdef EINPROGRESS36
654 if(errno(*__errno()) != EINPROGRESS36) {
655#else
656 if(1) {
657#endif
658 if(tcp_connect_errno_needs_log(
659 (struct sockaddr*)&w->addr, w->addrlen))
660 log_err_addr("outgoing tcp: connect",
661 strerror(errno(*__errno())), &w->addr, w->addrlen);
662 close(s);
663#else /* USE_WINSOCK */
664 if(WSAGetLastError() != WSAEINPROGRESS &&
665 WSAGetLastError() != WSAEWOULDBLOCK) {
666 closesocket(s);
667#endif
668 return 0;
669 }
670 }
671#ifdef USE_MSG_FASTOPEN
672 }
673#endif /* USE_MSG_FASTOPEN */
674#ifdef USE_OSX_MSG_FASTOPEN
675 }
676 }
677#endif /* USE_OSX_MSG_FASTOPEN */
678 if(w->outnet->sslctx && w->ssl_upstream) {
679 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
680 if(!pend->c->ssl) {
681 pend->c->fd = s;
682 comm_point_close(pend->c);
683 return 0;
684 }
685 verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
686 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
687#ifdef USE_WINSOCK
688 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
689#endif
690 pend->c->ssl_shake_state = comm_ssl_shake_write;
691 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
692 w->outnet->tls_use_sni)) {
693 pend->c->fd = s;
694#ifdef HAVE_SSL
695 SSL_free(pend->c->ssl);
696#endif
697 pend->c->ssl = NULL((void*)0);
698 comm_point_close(pend->c);
699 return 0;
700 }
701 }
702 w->next_waiting = (void*)pend;
703 w->outnet->num_tcp_outgoing++;
704 w->outnet->tcp_free = pend->next_free;
705 pend->next_free = NULL((void*)0);
706 pend->query = w;
707 pend->reuse.outnet = w->outnet;
708 pend->c->repinfo.addrlen = w->addrlen;
709 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
710 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
711 pend->reuse.cp_more_read_again = 0;
712 pend->reuse.cp_more_write_again = 0;
713 memcpy(&pend->c->repinfo.addr, &w->addr, w->addrlen);
714 pend->reuse.pending = pend;
715
716 /* Remove from tree in case the is_ssl will be different and causes the
717 * identity of the reuse_tcp to change; could result in nodes not being
718 * deleted from the tree (because the new identity does not match the
719 * previous node) but their ->key would be changed to NULL. */
720 if(pend->reuse.node.key)
721 reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
722
723 if(pend->c->ssl)
724 pend->reuse.is_ssl = 1;
725 else pend->reuse.is_ssl = 0;
726 /* insert in reuse by address tree if not already inserted there */
727 (void)reuse_tcp_insert(w->outnet, pend);
728 reuse_tree_by_id_insert(&pend->reuse, w);
729 outnet_tcp_take_query_setup(s, pend, w);
730 return 1;
731}
732
733/** Touch the lru of a reuse_tcp element, it is in use.
734 * This moves it to the front of the list, where it is not likely to
735 * be closed. Items at the back of the list are closed to make space. */
736void
737reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
738{
739 if(!reuse->item_on_lru_list) {
740 log_err("internal error: we need to touch the lru_list but item not in list");
741 return; /* not on the list, no lru to modify */
742 }
743 log_assert(reuse->lru_prev ||
744 (!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
745 if(!reuse->lru_prev)
746 return; /* already first in the list */
747 /* remove at current position */
748 /* since it is not first, there is a previous element */
749 reuse->lru_prev->lru_next = reuse->lru_next;
750 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
751 if(reuse->lru_next)
752 reuse->lru_next->lru_prev = reuse->lru_prev;
753 else outnet->tcp_reuse_last = reuse->lru_prev;
754 log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
755 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
756 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
757 /* insert at the front */
758 reuse->lru_prev = NULL((void*)0);
759 reuse->lru_next = outnet->tcp_reuse_first;
760 if(outnet->tcp_reuse_first) {
761 outnet->tcp_reuse_first->lru_prev = reuse;
762 }
763 log_assert(reuse->lru_next != reuse);
764 /* since it is not first, it is not the only element and
765 * lru_next is thus not NULL and thus reuse is now not the last in
766 * the list, so outnet->tcp_reuse_last does not need to be modified */
767 outnet->tcp_reuse_first = reuse;
768 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
769 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
770 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
771 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
772}
773
774/** Snip the last reuse_tcp element off of the LRU list */
775struct reuse_tcp*
776reuse_tcp_lru_snip(struct outside_network* outnet)
777{
778 struct reuse_tcp* reuse = outnet->tcp_reuse_last;
779 if(!reuse) return NULL((void*)0);
780 /* snip off of LRU */
781 log_assert(reuse->lru_next == NULL);
782 if(reuse->lru_prev) {
783 outnet->tcp_reuse_last = reuse->lru_prev;
784 reuse->lru_prev->lru_next = NULL((void*)0);
785 } else {
786 outnet->tcp_reuse_last = NULL((void*)0);
787 outnet->tcp_reuse_first = NULL((void*)0);
788 }
789 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
790 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
791 reuse->item_on_lru_list = 0;
792 reuse->lru_next = NULL((void*)0);
793 reuse->lru_prev = NULL((void*)0);
794 return reuse;
795}
796
797/** call callback on waiting_tcp, if not NULL */
798static void
799waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
800 struct comm_reply* reply_info)
801{
802 if(w && w->cb) {
803 fptr_ok(fptr_whitelist_pending_tcp(w->cb));
804 (void)(*w->cb)(c, w->cb_arg, error, reply_info);
805 }
806}
807
808/** add waiting_tcp element to the outnet tcp waiting list */
809static void
810outnet_add_tcp_waiting(struct outside_network* outnet, struct waiting_tcp* w)
811{
812 struct timeval tv;
813 log_assert(!w->on_tcp_waiting_list);
814 if(w->on_tcp_waiting_list)
815 return;
816 w->next_waiting = NULL((void*)0);
817 if(outnet->tcp_wait_last)
818 outnet->tcp_wait_last->next_waiting = w;
819 else outnet->tcp_wait_first = w;
820 outnet->tcp_wait_last = w;
821 w->on_tcp_waiting_list = 1;
822#ifndef S_SPLINT_S
823 tv.tv_sec = w->timeout/1000;
824 tv.tv_usec = (w->timeout%1000)*1000;
825#endif
826 comm_timer_set(w->timer, &tv);
827}
828
829/** add waiting_tcp element as first to the outnet tcp waiting list */
830static void
831outnet_add_tcp_waiting_first(struct outside_network* outnet,
832 struct waiting_tcp* w, int reset_timer)
833{
834 struct timeval tv;
835 log_assert(!w->on_tcp_waiting_list);
836 if(w->on_tcp_waiting_list)
837 return;
838 w->next_waiting = outnet->tcp_wait_first;
839 if(!outnet->tcp_wait_last)
840 outnet->tcp_wait_last = w;
841 outnet->tcp_wait_first = w;
842 w->on_tcp_waiting_list = 1;
843 if(reset_timer) {
844#ifndef S_SPLINT_S
845 tv.tv_sec = w->timeout/1000;
846 tv.tv_usec = (w->timeout%1000)*1000;
847#endif
848 comm_timer_set(w->timer, &tv);
849 }
850 log_assert(
851 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
852 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
853}
854
855/** see if buffers can be used to service TCP queries */
856static void
857use_free_buffer(struct outside_network* outnet)
858{
859 struct waiting_tcp* w;
860 while(outnet->tcp_wait_first && !outnet->want_to_quit) {
861#ifdef USE_DNSTAP
862 struct pending_tcp* pend_tcp = NULL((void*)0);
863#endif
864 struct reuse_tcp* reuse = NULL((void*)0);
865 w = outnet->tcp_wait_first;
866 log_assert(w->on_tcp_waiting_list);
867 outnet->tcp_wait_first = w->next_waiting;
868 if(outnet->tcp_wait_last == w)
869 outnet->tcp_wait_last = NULL((void*)0);
870 log_assert(
871 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
872 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
873 w->on_tcp_waiting_list = 0;
874 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
875 w->ssl_upstream);
876 /* re-select an ID when moving to a new TCP buffer */
877 w->id = tcp_select_id(outnet, reuse);
878 LDNS_ID_SET(w->pkt, w->id)(sldns_write_uint16(w->pkt, w->id));
879 if(reuse) {
880 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
881 "found reuse", reuse);
882#ifdef USE_DNSTAP
883 pend_tcp = reuse->pending;
884#endif
885 reuse_tcp_lru_touch(outnet, reuse);
886 comm_timer_disable(w->timer);
887 w->next_waiting = (void*)reuse->pending;
888 reuse_tree_by_id_insert(reuse, w);
889 if(reuse->pending->query) {
890 /* on the write wait list */
891 reuse_write_wait_push_back(reuse, w);
892 } else {
893 /* write straight away */
894 /* stop the timer on read of the fd */
895 comm_point_stop_listening(reuse->pending->c);
896 reuse->pending->query = w;
897 outnet_tcp_take_query_setup(
898 reuse->pending->c->fd, reuse->pending,
899 w);
900 }
901 } else if(outnet->tcp_free) {
902 struct pending_tcp* pend = w->outnet->tcp_free;
903 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
904 pend->reuse.pending = pend;
905 memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
906 pend->reuse.addrlen = w->addrlen;
907 if(!outnet_tcp_take_into_use(w)) {
908 waiting_tcp_callback(w, NULL((void*)0), NETEVENT_CLOSED-1,
909 NULL((void*)0));
910 waiting_tcp_delete(w);
911#ifdef USE_DNSTAP
912 w = NULL((void*)0);
913#endif
914 }
915#ifdef USE_DNSTAP
916 pend_tcp = pend;
917#endif
918 } else {
919 /* no reuse and no free buffer, put back at the start */
920 outnet_add_tcp_waiting_first(outnet, w, 0);
921 break;
922 }
923#ifdef USE_DNSTAP
924 if(outnet->dtenv && pend_tcp && w && w->sq &&
925 (outnet->dtenv->log_resolver_query_messages ||
926 outnet->dtenv->log_forwarder_query_messages)) {
927 sldns_buffer tmp;
928 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
929 dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
930 &pend_tcp->pi->addr, comm_tcp, w->sq->zone,
931 w->sq->zonelen, &tmp);
932 }
933#endif
934 }
935}
936
937/** delete element from tree by id */
938static void
939reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
940{
941#ifdef UNBOUND_DEBUG
942 rbnode_type* rem;
943#endif
944 log_assert(w->id_node.key != NULL);
945#ifdef UNBOUND_DEBUG
946 rem =
947#else
948 (void)
949#endif
950 rbtree_delete(&reuse->tree_by_id, w);
951 log_assert(rem); /* should have been there */
952 w->id_node.key = NULL((void*)0);
953}
954
955/** move writewait list to go for another connection. */
956static void
957reuse_move_writewait_away(struct outside_network* outnet,
958 struct pending_tcp* pend)
959{
960 /* the writewait list has not been written yet, so if the
961 * stream was closed, they have not actually been failed, only
962 * the queries written. Other queries can get written to another
963 * stream. For upstreams that do not support multiple queries
964 * and answers, the stream can get closed, and then the queries
965 * can get written on a new socket */
966 struct waiting_tcp* w;
967 if(pend->query && pend->query->error_count == 0 &&
968 pend->c->tcp_write_pkt == pend->query->pkt &&
969 pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
970 /* since the current query is not written, it can also
971 * move to a free buffer */
972 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
973 LDNS_QDCOUNT(pend->query->pkt)(sldns_read_uint16(pend->query->pkt+4)) > 0 &&
974 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
975 char buf[LDNS_MAX_DOMAINLEN255+1];
976 dname_str(pend->query->pkt+12, buf);
977 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
978 buf, (int)pend->c->tcp_write_byte_count);
979 }
980 pend->c->tcp_write_pkt = NULL((void*)0);
981 pend->c->tcp_write_pkt_len = 0;
982 pend->c->tcp_write_and_read = 0;
983 pend->reuse.cp_more_read_again = 0;
984 pend->reuse.cp_more_write_again = 0;
985 pend->c->tcp_is_reading = 1;
986 w = pend->query;
987 pend->query = NULL((void*)0);
988 /* increase error count, so that if the next socket fails too
989 * the server selection is run again with this query failed
990 * and it can select a different server (if possible), or
991 * fail the query */
992 w->error_count ++;
993 reuse_tree_by_id_delete(&pend->reuse, w);
994 outnet_add_tcp_waiting(outnet, w);
995 }
996 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL((void*)0)) {
997 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
998 LDNS_QDCOUNT(w->pkt)(sldns_read_uint16(w->pkt+4)) > 0 &&
999 dname_valid(w->pkt+12, w->pkt_len-12)) {
1000 char buf[LDNS_MAX_DOMAINLEN255+1];
1001 dname_str(w->pkt+12, buf);
1002 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1003 }
1004 reuse_tree_by_id_delete(&pend->reuse, w);
1005 outnet_add_tcp_waiting(outnet, w);
1006 }
1007}
1008
1009/** remove reused element from tree and lru list */
1010void
1011reuse_tcp_remove_tree_list(struct outside_network* outnet,
1012 struct reuse_tcp* reuse)
1013{
1014 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1015 if(reuse->node.key) {
1016 /* delete it from reuse tree */
1017 if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1018 /* should not be possible, it should be there */
1019 char buf[256];
1020 addr_to_str(&reuse->addr, reuse->addrlen, buf,
1021 sizeof(buf));
1022 log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1023 }
1024 reuse->node.key = NULL((void*)0);
1025 /* defend against loops on broken tree by zeroing the
1026 * rbnode structure */
1027 memset(&reuse->node, 0, sizeof(reuse->node));
1028 }
1029 /* delete from reuse list */
1030 if(reuse->item_on_lru_list) {
1031 if(reuse->lru_prev) {
1032 /* assert that members of the lru list are waiting
1033 * and thus have a pending pointer to the struct */
1034 log_assert(reuse->lru_prev->pending);
1035 reuse->lru_prev->lru_next = reuse->lru_next;
1036 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1037 } else {
1038 log_assert(!reuse->lru_next || reuse->lru_next->pending);
1039 outnet->tcp_reuse_first = reuse->lru_next;
1040 log_assert(!outnet->tcp_reuse_first ||
1041 (outnet->tcp_reuse_first !=
1042 outnet->tcp_reuse_first->lru_next &&
1043 outnet->tcp_reuse_first !=
1044 outnet->tcp_reuse_first->lru_prev));
1045 }
1046 if(reuse->lru_next) {
1047 /* assert that members of the lru list are waiting
1048 * and thus have a pending pointer to the struct */
1049 log_assert(reuse->lru_next->pending);
1050 reuse->lru_next->lru_prev = reuse->lru_prev;
1051 log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1052 } else {
1053 log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1054 outnet->tcp_reuse_last = reuse->lru_prev;
1055 log_assert(!outnet->tcp_reuse_last ||
1056 (outnet->tcp_reuse_last !=
1057 outnet->tcp_reuse_last->lru_next &&
1058 outnet->tcp_reuse_last !=
1059 outnet->tcp_reuse_last->lru_prev));
1060 }
1061 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1062 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1063 reuse->item_on_lru_list = 0;
1064 reuse->lru_next = NULL((void*)0);
1065 reuse->lru_prev = NULL((void*)0);
1066 }
1067 reuse->pending = NULL((void*)0);
1068}
1069
1070/** helper function that deletes an element from the tree of readwait
1071 * elements in tcp reuse structure */
1072static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
1073{
1074 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1075 waiting_tcp_delete(w);
1076}
1077
1078/** delete readwait waiting_tcp elements, deletes the elements in the list */
1079void reuse_del_readwait(rbtree_type* tree_by_id)
1080{
1081 if(tree_by_id->root == NULL((void*)0) ||
1082 tree_by_id->root == RBTREE_NULL&rbtree_null_node)
1083 return;
1084 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL((void*)0));
1085 rbtree_init(tree_by_id, reuse_id_cmp);
1086}
1087
1088/** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1089static void
1090decommission_pending_tcp(struct outside_network* outnet,
1091 struct pending_tcp* pend)
1092{
1093 verbose(VERB_CLIENT, "decommission_pending_tcp");
1094 /* A certain code path can lead here twice for the same pending_tcp
1095 * creating a loop in the free pending_tcp list. */
1096 if(outnet->tcp_free != pend) {
1097 pend->next_free = outnet->tcp_free;
1098 outnet->tcp_free = pend;
1099 }
1100 if(pend->reuse.node.key) {
1101 /* needs unlink from the reuse tree to get deleted */
1102 reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1103 }
1104 /* free SSL structure after remove from outnet tcp reuse tree,
1105 * because the c->ssl null or not is used for sorting in the tree */
1106 if(pend->c->ssl) {
1107#ifdef HAVE_SSL
1108 SSL_shutdown(pend->c->ssl);
1109 SSL_free(pend->c->ssl);
1110 pend->c->ssl = NULL((void*)0);
1111#endif
1112 }
1113 comm_point_close(pend->c);
1114 pend->reuse.cp_more_read_again = 0;
1115 pend->reuse.cp_more_write_again = 0;
1116 /* unlink the query and writewait list, it is part of the tree
1117 * nodes and is deleted */
1118 pend->query = NULL((void*)0);
1119 pend->reuse.write_wait_first = NULL((void*)0);
1120 pend->reuse.write_wait_last = NULL((void*)0);
1121 reuse_del_readwait(&pend->reuse.tree_by_id);
1122}
1123
1124/** perform failure callbacks for waiting queries in reuse read rbtree */
1125static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1126{
1127 rbnode_type* node;
1128 if(tree_by_id->root == NULL((void*)0) ||
1129 tree_by_id->root == RBTREE_NULL&rbtree_null_node)
1130 return;
1131 node = rbtree_first(tree_by_id);
1132 while(node && node != RBTREE_NULL&rbtree_null_node) {
1133 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1134 waiting_tcp_callback(w, NULL((void*)0), err, NULL((void*)0));
1135 node = rbtree_next(node);
1136 }
1137}
1138
1139/** perform callbacks for failure and also decommission pending tcp.
1140 * the callbacks remove references in sq->pending to the waiting_tcp
1141 * members of the tree_by_id in the pending tcp. The pending_tcp is
1142 * removed before the callbacks, so that the callbacks do not modify
1143 * the pending_tcp due to its reference in the outside_network reuse tree */
1144static void reuse_cb_and_decommission(struct outside_network* outnet,
1145 struct pending_tcp* pend, int error)
1146{
1147 rbtree_type store;
1148 store = pend->reuse.tree_by_id;
1149 pend->query = NULL((void*)0);
1150 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1151 pend->reuse.write_wait_first = NULL((void*)0);
1152 pend->reuse.write_wait_last = NULL((void*)0);
1153 decommission_pending_tcp(outnet, pend);
1154 reuse_cb_readwait_for_failure(&store, error);
1155 reuse_del_readwait(&store);
1156}
1157
1158/** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1159static void
1160reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1161{
1162 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1163 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1164}
1165
1166/** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1167static void
1168reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1169{
1170 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1171 sldns_buffer_clear(pend_tcp->c->buffer);
1172 pend_tcp->c->tcp_is_reading = 1;
1173 pend_tcp->c->tcp_byte_count = 0;
1174 comm_point_stop_listening(pend_tcp->c);
1175 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1176}
1177
1178int
1179outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1180 struct comm_reply *reply_info)
1181{
1182 struct pending_tcp* pend = (struct pending_tcp*)arg;
1183 struct outside_network* outnet = pend->reuse.outnet;
1184 struct waiting_tcp* w = NULL((void*)0);
1185 log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1186 verbose(VERB_ALGO, "outnettcp cb");
1187 if(error == NETEVENT_TIMEOUT-2) {
1188 if(pend->c->tcp_write_and_read) {
1189 verbose(VERB_QUERY, "outnettcp got tcp timeout "
1190 "for read, ignored because write underway");
1191 /* if we are writing, ignore readtimer, wait for write timer
1192 * or write is done */
1193 return 0;
1194 } else {
1195 verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1196 (pend->reuse.tree_by_id.count?"for reading pkt":
1197 "for keepalive for reuse"));
1198 }
1199 /* must be timeout for reading or keepalive reuse,
1200 * close it. */
1201 reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1202 } else if(error == NETEVENT_PKT_WRITTEN-5) {
1203 /* the packet we want to write has been written. */
1204 verbose(VERB_ALGO, "outnet tcp pkt was written event");
1205 log_assert(c == pend->c);
1206 log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1207 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1208 pend->c->tcp_write_pkt = NULL((void*)0);
1209 pend->c->tcp_write_pkt_len = 0;
1210 /* the pend.query is already in tree_by_id */
1211 log_assert(pend->query->id_node.key);
1212 pend->query = NULL((void*)0);
1213 /* setup to write next packet or setup read timeout */
1214 if(pend->reuse.write_wait_first) {
1215 verbose(VERB_ALGO, "outnet tcp setup next pkt");
1216 /* we can write it straight away perhaps, set flag
1217 * because this callback called after a tcp write
1218 * succeeded and likely more buffer space is available
1219 * and we can write some more. */
1220 pend->reuse.cp_more_write_again = 1;
1221 pend->query = reuse_write_wait_pop(&pend->reuse);
1222 comm_point_stop_listening(pend->c);
1223 outnet_tcp_take_query_setup(pend->c->fd, pend,
1224 pend->query);
1225 } else {
1226 verbose(VERB_ALGO, "outnet tcp writes done, wait");
1227 pend->c->tcp_write_and_read = 0;
1228 pend->reuse.cp_more_read_again = 0;
1229 pend->reuse.cp_more_write_again = 0;
1230 pend->c->tcp_is_reading = 1;
1231 comm_point_stop_listening(pend->c);
1232 reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1233 }
1234 return 0;
1235 } else if(error != NETEVENT_NOERROR0) {
1236 verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1237 reuse_move_writewait_away(outnet, pend);
1238 /* pass error below and exit */
1239 } else {
1240 /* check ID */
1241 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1242 log_addr(VERB_QUERY,
1243 "outnettcp: bad ID in reply, too short, from:",
1244 &pend->reuse.addr, pend->reuse.addrlen);
1245 error = NETEVENT_CLOSED-1;
1246 } else {
1247 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin((sldns_read_uint16(sldns_buffer_begin( c->buffer)))
1248 c->buffer))(sldns_read_uint16(sldns_buffer_begin( c->buffer)));
1249 /* find the query the reply is for */
1250 w = reuse_tcp_by_id_find(&pend->reuse, id);
1251 }
1252 }
1253 if(error == NETEVENT_NOERROR0 && !w) {
1254 /* no struct waiting found in tree, no reply to call */
1255 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1256 &pend->reuse.addr, pend->reuse.addrlen);
1257 error = NETEVENT_CLOSED-1;
1258 }
1259 if(error == NETEVENT_NOERROR0) {
1260 /* add to reuse tree so it can be reused, if not a failure.
1261 * This is possible if the state machine wants to make a tcp
1262 * query again to the same destination. */
1263 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1264 (void)reuse_tcp_insert(outnet, pend);
1265 }
1266 }
1267 if(w) {
1268 reuse_tree_by_id_delete(&pend->reuse, w);
1269 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1270 error, (int)sldns_buffer_limit(c->buffer));
1271 waiting_tcp_callback(w, c, error, reply_info);
1272 waiting_tcp_delete(w);
1273 }
1274 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1275 if(error == NETEVENT_NOERROR0 && pend->reuse.node.key) {
1276 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1277 /* it is in the reuse_tcp tree, with other queries, or
1278 * on the empty list. do not decommission it */
1279 /* if there are more outstanding queries, we could try to
1280 * read again, to see if it is on the input,
1281 * because this callback called after a successful read
1282 * and there could be more bytes to read on the input */
1283 if(pend->reuse.tree_by_id.count != 0)
1284 pend->reuse.cp_more_read_again = 1;
1285 reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1286 return 0;
1287 }
1288 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1289 /* no queries on it, no space to keep it. or timeout or closed due
1290 * to error. Close it */
1291 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT-2?
1292 NETEVENT_TIMEOUT-2:NETEVENT_CLOSED-1));
1293 use_free_buffer(outnet);
1294 return 0;
1295}
1296
1297/** lower use count on pc, see if it can be closed */
1298static void
1299portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1300{
1301 struct port_if* pif;
1302 pc->num_outstanding--;
1303 if(pc->num_outstanding > 0) {
1304 return;
1305 }
1306 /* close it and replace in unused list */
1307 verbose(VERB_ALGO, "close of port %d", pc->number);
1308 comm_point_close(pc->cp);
1309 pif = pc->pif;
1310 log_assert(pif->inuse > 0);
1311#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1312 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1313#endif
1314 pif->inuse--;
1315 pif->out[pc->index] = pif->out[pif->inuse];
1316 pif->out[pc->index]->index = pc->index;
1317 pc->next = outnet->unused_fds;
1318 outnet->unused_fds = pc;
1319}
1320
1321/** try to send waiting UDP queries */
1322static void
1323outnet_send_wait_udp(struct outside_network* outnet)
1324{
1325 struct pending* pend;
1326 /* process waiting queries */
1327 while(outnet->udp_wait_first && outnet->unused_fds
1328 && !outnet->want_to_quit) {
1329 pend = outnet->udp_wait_first;
1330 outnet->udp_wait_first = pend->next_waiting;
1331 if(!pend->next_waiting) outnet->udp_wait_last = NULL((void*)0);
1332 sldns_buffer_clear(outnet->udp_buff);
1333 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1334 sldns_buffer_flip(outnet->udp_buff);
1335 free(pend->pkt); /* freeing now makes get_mem correct */
1336 pend->pkt = NULL((void*)0);
1337 pend->pkt_len = 0;
1338 if(!randomize_and_send_udp(pend, outnet->udp_buff,
1339 pend->timeout)) {
1340 /* callback error on pending */
1341 if(pend->cb) {
1342 fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1343 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1344 NETEVENT_CLOSED-1, NULL((void*)0));
1345 }
1346 pending_delete(outnet, pend);
1347 }
1348 }
1349}
1350
1351int
1352outnet_udp_cb(struct comm_point* c, void* arg, int error,
1353 struct comm_reply *reply_info)
1354{
1355 struct outside_network* outnet = (struct outside_network*)arg;
1356 struct pending key;
1357 struct pending* p;
1358 verbose(VERB_ALGO, "answer cb");
1359
1360 if(error != NETEVENT_NOERROR0) {
1361 verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1362 return 0;
1363 }
1364 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE12) {
1365 verbose(VERB_QUERY, "outnetudp udp too short");
1366 return 0;
1367 }
1368 log_assert(reply_info);
1369
1370 /* setup lookup key */
1371 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer))(sldns_read_uint16(sldns_buffer_begin(c->buffer)));
1372 memcpy(&key.addr, &reply_info->addr, reply_info->addrlen);
1373 key.addrlen = reply_info->addrlen;
1374 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1375 log_addr(VERB_ALGO, "Incoming reply addr =",
1376 &reply_info->addr, reply_info->addrlen);
1377
1378 /* find it, see if this thing is a valid query response */
1379 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1380 p = (struct pending*)rbtree_search(outnet->pending, &key);
1381 if(!p) {
1382 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1383 log_buf(VERB_ALGO, "dropped message", c->buffer);
1384 outnet->unwanted_replies++;
1385 if(outnet->unwanted_threshold && ++outnet->unwanted_total
1386 >= outnet->unwanted_threshold) {
1387 log_warn("unwanted reply total reached threshold (%u)"
1388 " you may be under attack."
1389 " defensive action: clearing the cache",
1390 (unsigned)outnet->unwanted_threshold);
1391 fptr_ok(fptr_whitelist_alloc_cleanup(
1392 outnet->unwanted_action));
1393 (*outnet->unwanted_action)(outnet->unwanted_param);
1394 outnet->unwanted_total = 0;
1395 }
1396 return 0;
1397 }
1398
1399 verbose(VERB_ALGO, "received udp reply.");
1400 log_buf(VERB_ALGO, "udp message", c->buffer);
1401 if(p->pc->cp != c) {
1402 verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1403 "dropped.");
1404 outnet->unwanted_replies++;
1405 if(outnet->unwanted_threshold && ++outnet->unwanted_total
1406 >= outnet->unwanted_threshold) {
1407 log_warn("unwanted reply total reached threshold (%u)"
1408 " you may be under attack."
1409 " defensive action: clearing the cache",
1410 (unsigned)outnet->unwanted_threshold);
1411 fptr_ok(fptr_whitelist_alloc_cleanup(
1412 outnet->unwanted_action));
1413 (*outnet->unwanted_action)(outnet->unwanted_param);
1414 outnet->unwanted_total = 0;
1415 }
1416 return 0;
1417 }
1418 comm_timer_disable(p->timer);
1419 verbose(VERB_ALGO, "outnet handle udp reply");
1420 /* delete from tree first in case callback creates a retry */
1421 (void)rbtree_delete(outnet->pending, p->node.key);
1422 if(p->cb) {
1423 fptr_ok(fptr_whitelist_pending_udp(p->cb));
1424 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR0, reply_info);
1425 }
1426 portcomm_loweruse(outnet, p->pc);
1427 pending_delete(NULL((void*)0), p);
1428 outnet_send_wait_udp(outnet);
1429 return 0;
1430}
1431
1432/** calculate number of ip4 and ip6 interfaces*/
1433static void
1434calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1435 int* num_ip4, int* num_ip6)
1436{
1437 int i;
1438 *num_ip4 = 0;
1439 *num_ip6 = 0;
1440 if(num_ifs <= 0) {
1441 if(do_ip4)
1442 *num_ip4 = 1;
1443 if(do_ip6)
1444 *num_ip6 = 1;
1445 return;
1446 }
1447 for(i=0; i<num_ifs; i++)
1448 {
1449 if(str_is_ip6(ifs[i])) {
1450 if(do_ip6)
1451 (*num_ip6)++;
1452 } else {
1453 if(do_ip4)
1454 (*num_ip4)++;
1455 }
1456 }
1457
1458}
1459
1460void
1461pending_udp_timer_delay_cb(void* arg)
1462{
1463 struct pending* p = (struct pending*)arg;
1464 struct outside_network* outnet = p->outnet;
1465 verbose(VERB_ALGO, "timeout udp with delay");
1466 portcomm_loweruse(outnet, p->pc);
1467 pending_delete(outnet, p);
1468 outnet_send_wait_udp(outnet);
1469}
1470
1471void
1472pending_udp_timer_cb(void *arg)
1473{
1474 struct pending* p = (struct pending*)arg;
1475 struct outside_network* outnet = p->outnet;
1476 /* it timed out */
1477 verbose(VERB_ALGO, "timeout udp");
1478 if(p->cb) {
1479 fptr_ok(fptr_whitelist_pending_udp(p->cb));
1480 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT-2, NULL((void*)0));
1481 }
1482 /* if delayclose, keep port open for a longer time.
1483 * But if the udpwaitlist exists, then we are struggling to
1484 * keep up with demand for sockets, so do not wait, but service
1485 * the customer (customer service more important than portICMPs) */
1486 if(outnet->delayclose && !outnet->udp_wait_first) {
1487 p->cb = NULL((void*)0);
1488 p->timer->callback = &pending_udp_timer_delay_cb;
1489 comm_timer_set(p->timer, &outnet->delay_tv);
1490 return;
1491 }
1492 portcomm_loweruse(outnet, p->pc);
1493 pending_delete(outnet, p);
1494 outnet_send_wait_udp(outnet);
1495}
1496
1497/** create pending_tcp buffers */
1498static int
1499create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1500{
1501 size_t i;
1502 if(outnet->num_tcp == 0)
1503 return 1; /* no tcp needed, nothing to do */
1504 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1505 outnet->num_tcp, sizeof(struct pending_tcp*))))
1506 return 0;
1507 for(i=0; i<outnet->num_tcp; i++) {
1508 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1509 sizeof(struct pending_tcp))))
1510 return 0;
1511 outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1512 outnet->tcp_free = outnet->tcp_conns[i];
1513 outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1514 outnet->base, bufsize, outnet_tcp_cb,
1515 outnet->tcp_conns[i]);
1516 if(!outnet->tcp_conns[i]->c)
1517 return 0;
1518 }
1519 return 1;
1520}
1521
1522/** setup an outgoing interface, ready address */
1523static int setup_if(struct port_if* pif, const char* addrstr,
1524 int* avail, int numavail, size_t numfd)
1525{
1526#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1527 pif->avail_total = numavail;
1528 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1529 if(!pif->avail_ports)
1530 return 0;
1531#endif
1532 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT53, &pif->addr, &pif->addrlen) &&
1533 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT53,
1534 &pif->addr, &pif->addrlen, &pif->pfxlen))
1535 return 0;
1536 pif->maxout = (int)numfd;
1537 pif->inuse = 0;
1538 pif->out = (struct port_comm**)calloc(numfd,
1539 sizeof(struct port_comm*));
1540 if(!pif->out)
1541 return 0;
1542 return 1;
1543}
1544
1545struct outside_network*
1546outside_network_create(struct comm_base *base, size_t bufsize,
1547 size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1548 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1549 struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1550 int numavailports, size_t unwanted_threshold, int tcp_mss,
1551 void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1552 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1553 int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1554 int tcp_auth_query_timeout)
1555{
1556 struct outside_network* outnet = (struct outside_network*)
1557 calloc(1, sizeof(struct outside_network));
1558 size_t k;
1559 if(!outnet) {
1560 log_err("malloc failed");
1561 return NULL((void*)0);
1562 }
1563 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1564 outnet->base = base;
1565 outnet->num_tcp = num_tcp;
1566 outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1567 outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1568 outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1569 outnet->num_tcp_outgoing = 0;
1570 outnet->infra = infra;
1571 outnet->rnd = rnd;
1572 outnet->sslctx = sslctx;
1573 outnet->tls_use_sni = tls_use_sni;
1574#ifdef USE_DNSTAP
1575 outnet->dtenv = dtenv;
1576#else
1577 (void)dtenv;
1578#endif
1579 outnet->svcd_overhead = 0;
1580 outnet->want_to_quit = 0;
1581 outnet->unwanted_threshold = unwanted_threshold;
1582 outnet->unwanted_action = unwanted_action;
1583 outnet->unwanted_param = unwanted_param;
1584 outnet->use_caps_for_id = use_caps_for_id;
1585 outnet->do_udp = do_udp;
1586 outnet->tcp_mss = tcp_mss;
1587 outnet->ip_dscp = dscp;
1588#ifndef S_SPLINT_S
1589 if(delayclose) {
1590 outnet->delayclose = 1;
1591 outnet->delay_tv.tv_sec = delayclose/1000;
1592 outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1593 }
1594#endif
1595 if(udp_connect) {
1596 outnet->udp_connect = 1;
1597 }
1598 if(numavailports == 0 || num_ports == 0) {
1599 log_err("no outgoing ports available");
1600 outside_network_delete(outnet);
1601 return NULL((void*)0);
1602 }
1603#ifndef INET6
1604 do_ip6 = 0;
1605#endif
1606 calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1607 &outnet->num_ip4, &outnet->num_ip6);
1608 if(outnet->num_ip4 != 0) {
1609 if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1610 (size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1611 log_err("malloc failed");
1612 outside_network_delete(outnet);
1613 return NULL((void*)0);
1614 }
1615 }
1616 if(outnet->num_ip6 != 0) {
1617 if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1618 (size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1619 log_err("malloc failed");
1620 outside_network_delete(outnet);
1621 return NULL((void*)0);
1622 }
1623 }
1624 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1625 !(outnet->pending = rbtree_create(pending_cmp)) ||
1626 !(outnet->serviced = rbtree_create(serviced_cmp)) ||
1627 !create_pending_tcp(outnet, bufsize)) {
1628 log_err("malloc failed");
1629 outside_network_delete(outnet);
1630 return NULL((void*)0);
1631 }
1632 rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1633 outnet->tcp_reuse_max = num_tcp;
1634
1635 /* allocate commpoints */
1636 for(k=0; k<num_ports; k++) {
1637 struct port_comm* pc;
1638 pc = (struct port_comm*)calloc(1, sizeof(*pc));
1639 if(!pc) {
1640 log_err("malloc failed");
1641 outside_network_delete(outnet);
1642 return NULL((void*)0);
1643 }
1644 pc->cp = comm_point_create_udp(outnet->base, -1,
1645 outnet->udp_buff, outnet_udp_cb, outnet, NULL((void*)0));
1646 if(!pc->cp) {
1647 log_err("malloc failed");
1648 free(pc);
1649 outside_network_delete(outnet);
1650 return NULL((void*)0);
1651 }
1652 pc->next = outnet->unused_fds;
1653 outnet->unused_fds = pc;
1654 }
1655
1656 /* allocate interfaces */
1657 if(num_ifs == 0) {
1658 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1659 availports, numavailports, num_ports)) {
1660 log_err("malloc failed");
1661 outside_network_delete(outnet);
1662 return NULL((void*)0);
1663 }
1664 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1665 availports, numavailports, num_ports)) {
1666 log_err("malloc failed");
1667 outside_network_delete(outnet);
1668 return NULL((void*)0);
1669 }
1670 } else {
1671 size_t done_4 = 0, done_6 = 0;
1672 int i;
1673 for(i=0; i<num_ifs; i++) {
1674 if(str_is_ip6(ifs[i]) && do_ip6) {
1675 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1676 availports, numavailports, num_ports)){
1677 log_err("malloc failed");
1678 outside_network_delete(outnet);
1679 return NULL((void*)0);
1680 }
1681 done_6++;
1682 }
1683 if(!str_is_ip6(ifs[i]) && do_ip4) {
1684 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1685 availports, numavailports, num_ports)){
1686 log_err("malloc failed");
1687 outside_network_delete(outnet);
1688 return NULL((void*)0);
1689 }
1690 done_4++;
1691 }
1692 }
1693 }
1694 return outnet;
1695}
1696
1697/** helper pending delete */
1698static void
1699pending_node_del(rbnode_type* node, void* arg)
1700{
1701 struct pending* pend = (struct pending*)node;
1702 struct outside_network* outnet = (struct outside_network*)arg;
1703 pending_delete(outnet, pend);
1704}
1705
1706/** helper serviced delete */
1707static void
1708serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
1709{
1710 struct serviced_query* sq = (struct serviced_query*)node;
1711 struct service_callback* p = sq->cblist, *np;
1712 free(sq->qbuf);
1713 free(sq->zone);
1714 free(sq->tls_auth_name);
1715 edns_opt_list_free(sq->opt_list);
1716 while(p) {
1717 np = p->next;
1718 free(p);
1719 p = np;
1720 }
1721 free(sq);
1722}
1723
1724void
1725outside_network_quit_prepare(struct outside_network* outnet)
1726{
1727 if(!outnet)
1728 return;
1729 /* prevent queued items from being sent */
1730 outnet->want_to_quit = 1;
1731}
1732
1733void
1734outside_network_delete(struct outside_network* outnet)
1735{
1736 if(!outnet)
1737 return;
1738 outnet->want_to_quit = 1;
1739 /* check every element, since we can be called on malloc error */
1740 if(outnet->pending) {
1741 /* free pending elements, but do no unlink from tree. */
1742 traverse_postorder(outnet->pending, pending_node_del, NULL((void*)0));
1743 free(outnet->pending);
1744 }
1745 if(outnet->serviced) {
1746 traverse_postorder(outnet->serviced, serviced_node_del, NULL((void*)0));
1747 free(outnet->serviced);
1748 }
1749 if(outnet->udp_buff)
1750 sldns_buffer_free(outnet->udp_buff);
1751 if(outnet->unused_fds) {
1752 struct port_comm* p = outnet->unused_fds, *np;
1753 while(p) {
1754 np = p->next;
1755 comm_point_delete(p->cp);
1756 free(p);
1757 p = np;
1758 }
1759 outnet->unused_fds = NULL((void*)0);
1760 }
1761 if(outnet->ip4_ifs) {
1762 int i, k;
1763 for(i=0; i<outnet->num_ip4; i++) {
1764 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1765 struct port_comm* pc = outnet->ip4_ifs[i].
1766 out[k];
1767 comm_point_delete(pc->cp);
1768 free(pc);
1769 }
1770#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1771 free(outnet->ip4_ifs[i].avail_ports);
1772#endif
1773 free(outnet->ip4_ifs[i].out);
1774 }
1775 free(outnet->ip4_ifs);
1776 }
1777 if(outnet->ip6_ifs) {
1778 int i, k;
1779 for(i=0; i<outnet->num_ip6; i++) {
1780 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1781 struct port_comm* pc = outnet->ip6_ifs[i].
1782 out[k];
1783 comm_point_delete(pc->cp);
1784 free(pc);
1785 }
1786#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1787 free(outnet->ip6_ifs[i].avail_ports);
1788#endif
1789 free(outnet->ip6_ifs[i].out);
1790 }
1791 free(outnet->ip6_ifs);
1792 }
1793 if(outnet->tcp_conns) {
1794 size_t i;
1795 for(i=0; i<outnet->num_tcp; i++)
1796 if(outnet->tcp_conns[i]) {
1797 struct pending_tcp* pend;
1798 pend = outnet->tcp_conns[i];
1799 if(pend->reuse.item_on_lru_list) {
1800 /* delete waiting_tcp elements that
1801 * the tcp conn is working on */
1802 decommission_pending_tcp(outnet, pend);
1803 }
1804 comm_point_delete(outnet->tcp_conns[i]->c);
1805 free(outnet->tcp_conns[i]);
1806 outnet->tcp_conns[i] = NULL((void*)0);
1807 }
1808 free(outnet->tcp_conns);
1809 outnet->tcp_conns = NULL((void*)0);
1810 }
1811 if(outnet->tcp_wait_first) {
1812 struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1813 while(p) {
1814 np = p->next_waiting;
1815 waiting_tcp_delete(p);
1816 p = np;
1817 }
1818 }
1819 /* was allocated in struct pending that was deleted above */
1820 rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1821 outnet->tcp_reuse_first = NULL((void*)0);
1822 outnet->tcp_reuse_last = NULL((void*)0);
1823 if(outnet->udp_wait_first) {
1824 struct pending* p = outnet->udp_wait_first, *np;
1825 while(p) {
1826 np = p->next_waiting;
1827 pending_delete(NULL((void*)0), p);
1828 p = np;
1829 }
1830 }
1831 free(outnet);
1832}
1833
1834void
1835pending_delete(struct outside_network* outnet, struct pending* p)
1836{
1837 if(!p)
1838 return;
1839 if(outnet && outnet->udp_wait_first &&
1840 (p->next_waiting || p == outnet->udp_wait_last) ) {
1841 /* delete from waiting list, if it is in the waiting list */
1842 struct pending* prev = NULL((void*)0), *x = outnet->udp_wait_first;
1843 while(x && x != p) {
1844 prev = x;
1845 x = x->next_waiting;
1846 }
1847 if(x) {
1848 log_assert(x == p);
1849 if(prev)
1850 prev->next_waiting = p->next_waiting;
1851 else outnet->udp_wait_first = p->next_waiting;
1852 if(outnet->udp_wait_last == p)
1853 outnet->udp_wait_last = prev;
1854 }
1855 }
1856 if(outnet) {
1857 (void)rbtree_delete(outnet->pending, p->node.key);
1858 }
1859 if(p->timer)
1860 comm_timer_delete(p->timer);
1861 free(p->pkt);
1862 free(p);
1863}
1864
1865static void
1866sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1867{
1868 int i, last;
1869 if(!(pfxlen > 0 && pfxlen < 128))
1870 return;
1871 for(i = 0; i < (128 - pfxlen) / 8; i++) {
1872 sa->sin6_addr.s6_addr__u6_addr.__u6_addr8[15-i] = (uint8_t)ub_random_max(rnd, 256);
1873 }
1874 last = pfxlen & 7;
1875 if(last != 0) {
1876 sa->sin6_addr.s6_addr__u6_addr.__u6_addr8[15-i] |=
1877 ((0xFF >> last) & ub_random_max(rnd, 256));
1878 }
1879}
1880
1881/**
1882 * Try to open a UDP socket for outgoing communication.
1883 * Sets sockets options as needed.
1884 * @param addr: socket address.
1885 * @param addrlen: length of address.
1886 * @param pfxlen: length of network prefix (for address randomisation).
1887 * @param port: port override for addr.
1888 * @param inuse: if -1 is returned, this bool means the port was in use.
1889 * @param rnd: random state (for address randomisation).
1890 * @param dscp: DSCP to use.
1891 * @return fd or -1
1892 */
1893static int
1894udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
1895 int port, int* inuse, struct ub_randstate* rnd, int dscp)
1896{
1897 int fd, noproto;
1898 if(addr_is_ip6(addr, addrlen)) {
1899 int freebind = 0;
1900 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
1901 sa.sin6_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
1902 sa.sin6_flowinfo = 0;
1903 sa.sin6_scope_id = 0;
1904 if(pfxlen != 0) {
1905 freebind = 1;
1906 sai6_putrandom(&sa, pfxlen, rnd);
1907 }
1908 fd = create_udp_sock(AF_INET624, SOCK_DGRAM2,
1909 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
1910 0, 0, 0, NULL((void*)0), 0, freebind, 0, dscp);
1911 } else {
1912 struct sockaddr_in* sa = (struct sockaddr_in*)addr;
1913 sa->sin_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
1914 fd = create_udp_sock(AF_INET2, SOCK_DGRAM2,
1915 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
1916 0, 0, 0, NULL((void*)0), 0, 0, 0, dscp);
1917 }
1918 return fd;
1919}
1920
1921/** Select random ID */
1922static int
1923select_id(struct outside_network* outnet, struct pending* pend,
1924 sldns_buffer* packet)
1925{
1926 int id_tries = 0;
1927 pend->id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
1928 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id)(sldns_write_uint16(sldns_buffer_begin(packet), pend->id));
1929
1930 /* insert in tree */
1931 pend->node.key = pend;
1932 while(!rbtree_insert(outnet->pending, &pend->node)) {
1933 /* change ID to avoid collision */
1934 pend->id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
1935 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id)(sldns_write_uint16(sldns_buffer_begin(packet), pend->id));
1936 id_tries++;
1937 if(id_tries == MAX_ID_RETRY1000) {
1938 pend->id=99999; /* non existant ID */
1939 log_err("failed to generate unique ID, drop msg");
1940 return 0;
1941 }
1942 }
1943 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
1944 return 1;
1945}
1946
1947/** return true is UDP connect error needs to be logged */
1948static int udp_connect_needs_log(int err)
1949{
1950 switch(err) {
1951 case ECONNREFUSED61:
1952# ifdef ENETUNREACH51
1953 case ENETUNREACH51:
1954# endif
1955# ifdef EHOSTDOWN64
1956 case EHOSTDOWN64:
1957# endif
1958# ifdef EHOSTUNREACH65
1959 case EHOSTUNREACH65:
1960# endif
1961# ifdef ENETDOWN50
1962 case ENETDOWN50:
1963# endif
1964 case EPERM1:
1965 if(verbosity >= VERB_ALGO)
1966 return 1;
1967 return 0;
1968 default:
1969 break;
1970 }
1971 return 1;
1972}
1973
1974
1975/** Select random interface and port */
1976static int
1977select_ifport(struct outside_network* outnet, struct pending* pend,
1978 int num_if, struct port_if* ifs)
1979{
1980 int my_if, my_port, fd, portno, inuse, tries=0;
1981 struct port_if* pif;
1982 /* randomly select interface and port */
1983 if(num_if == 0) {
1984 verbose(VERB_QUERY, "Need to send query but have no "
1985 "outgoing interfaces of that family");
1986 return 0;
1987 }
1988 log_assert(outnet->unused_fds);
1989 tries = 0;
1990 while(1) {
1991 my_if = ub_random_max(outnet->rnd, num_if);
1992 pif = &ifs[my_if];
1993#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1994 if(outnet->udp_connect) {
1995 /* if we connect() we cannot reuse fds for a port */
1996 if(pif->inuse >= pif->avail_total) {
1997 tries++;
1998 if(tries < MAX_PORT_RETRY10000)
1999 continue;
2000 log_err("failed to find an open port, drop msg");
2001 return 0;
2002 }
2003 my_port = pif->inuse + ub_random_max(outnet->rnd,
2004 pif->avail_total - pif->inuse);
2005 } else {
2006 my_port = ub_random_max(outnet->rnd, pif->avail_total);
2007 if(my_port < pif->inuse) {
2008 /* port already open */
2009 pend->pc = pif->out[my_port];
2010 verbose(VERB_ALGO, "using UDP if=%d port=%d",
2011 my_if, pend->pc->number);
2012 break;
2013 }
2014 }
2015 /* try to open new port, if fails, loop to try again */
2016 log_assert(pif->inuse < pif->maxout);
2017 portno = pif->avail_ports[my_port - pif->inuse];
2018#else
2019 my_port = portno = 0;
2020#endif
2021 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2022 portno, &inuse, outnet->rnd, outnet->ip_dscp);
2023 if(fd == -1 && !inuse) {
2024 /* nonrecoverable error making socket */
2025 return 0;
2026 }
2027 if(fd != -1) {
2028 verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2029 my_if, portno);
2030 if(outnet->udp_connect) {
2031 /* connect() to the destination */
2032 if(connect(fd, (struct sockaddr*)&pend->addr,
2033 pend->addrlen) < 0) {
2034 if(udp_connect_needs_log(errno(*__errno()))) {
2035 log_err_addr("udp connect failed",
2036 strerror(errno(*__errno())), &pend->addr,
2037 pend->addrlen);
2038 }
2039 sock_close(fd);
2040 return 0;
2041 }
2042 }
2043 /* grab fd */
2044 pend->pc = outnet->unused_fds;
2045 outnet->unused_fds = pend->pc->next;
2046
2047 /* setup portcomm */
2048 pend->pc->next = NULL((void*)0);
2049 pend->pc->number = portno;
2050 pend->pc->pif = pif;
2051 pend->pc->index = pif->inuse;
2052 pend->pc->num_outstanding = 0;
2053 comm_point_start_listening(pend->pc->cp, fd, -1);
2054
2055 /* grab port in interface */
2056 pif->out[pif->inuse] = pend->pc;
2057#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
2058 pif->avail_ports[my_port - pif->inuse] =
2059 pif->avail_ports[pif->avail_total-pif->inuse-1];
2060#endif
2061 pif->inuse++;
2062 break;
2063 }
2064 /* failed, already in use */
2065 verbose(VERB_QUERY, "port %d in use, trying another", portno);
2066 tries++;
2067 if(tries == MAX_PORT_RETRY10000) {
2068 log_err("failed to find an open port, drop msg");
2069 return 0;
2070 }
2071 }
2072 log_assert(pend->pc);
2073 pend->pc->num_outstanding++;
2074
2075 return 1;
2076}
2077
2078static int
2079randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2080{
2081 struct timeval tv;
2082 struct outside_network* outnet = pend->sq->outnet;
2083
2084 /* select id */
2085 if(!select_id(outnet, pend, packet)) {
2086 return 0;
2087 }
2088
2089 /* select src_if, port */
2090 if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2091 if(!select_ifport(outnet, pend,
2092 outnet->num_ip6, outnet->ip6_ifs))
2093 return 0;
2094 } else {
2095 if(!select_ifport(outnet, pend,
2096 outnet->num_ip4, outnet->ip4_ifs))
2097 return 0;
2098 }
2099 log_assert(pend->pc && pend->pc->cp);
2100
2101 /* send it over the commlink */
2102 if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2103 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2104 portcomm_loweruse(outnet, pend->pc);
2105 return 0;
2106 }
2107
2108 /* system calls to set timeout after sending UDP to make roundtrip
2109 smaller. */
2110#ifndef S_SPLINT_S
2111 tv.tv_sec = timeout/1000;
2112 tv.tv_usec = (timeout%1000)*1000;
2113#endif
2114 comm_timer_set(pend->timer, &tv);
2115
2116#ifdef USE_DNSTAP
2117 /*
2118 * sending src (local service)/dst (upstream) addresses over DNSTAP
2119 * There are no chances to get the src (local service) addr if unbound
2120 * is not configured with specific outgoing IP-addresses. So we will
2121 * pass 0.0.0.0 (::) to argument for
2122 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2123 */
2124 if(outnet->dtenv &&
2125 (outnet->dtenv->log_resolver_query_messages ||
2126 outnet->dtenv->log_forwarder_query_messages)) {
2127 log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2128 log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2129 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp,
2130 pend->sq->zone, pend->sq->zonelen, packet);
2131 }
2132#endif
2133 return 1;
2134}
2135
2136struct pending*
2137pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2138 int timeout, comm_point_callback_type* cb, void* cb_arg)
2139{
2140 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2141 if(!pend) return NULL((void*)0);
2142 pend->outnet = sq->outnet;
2143 pend->sq = sq;
2144 pend->addrlen = sq->addrlen;
2145 memmove(&pend->addr, &sq->addr, sq->addrlen);
2146 pend->cb = cb;
2147 pend->cb_arg = cb_arg;
2148 pend->node.key = pend;
2149 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2150 pend);
2151 if(!pend->timer) {
2152 free(pend);
2153 return NULL((void*)0);
2154 }
2155
2156 if(sq->outnet->unused_fds == NULL((void*)0)) {
2157 /* no unused fd, cannot create a new port (randomly) */
2158 verbose(VERB_ALGO, "no fds available, udp query waiting");
2159 pend->timeout = timeout;
2160 pend->pkt_len = sldns_buffer_limit(packet);
2161 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2162 pend->pkt_len);
2163 if(!pend->pkt) {
2164 comm_timer_delete(pend->timer);
2165 free(pend);
2166 return NULL((void*)0);
2167 }
2168 /* put at end of waiting list */
2169 if(sq->outnet->udp_wait_last)
2170 sq->outnet->udp_wait_last->next_waiting = pend;
2171 else
2172 sq->outnet->udp_wait_first = pend;
2173 sq->outnet->udp_wait_last = pend;
2174 return pend;
2175 }
2176 if(!randomize_and_send_udp(pend, packet, timeout)) {
2177 pending_delete(sq->outnet, pend);
2178 return NULL((void*)0);
2179 }
2180 return pend;
2181}
2182
2183void
2184outnet_tcptimer(void* arg)
2185{
2186 struct waiting_tcp* w = (struct waiting_tcp*)arg;
2187 struct outside_network* outnet = w->outnet;
2188 verbose(VERB_CLIENT, "outnet_tcptimer");
2189 if(w->on_tcp_waiting_list) {
2190 /* it is on the waiting list */
2191 waiting_list_remove(outnet, w);
2192 waiting_tcp_callback(w, NULL((void*)0), NETEVENT_TIMEOUT-2, NULL((void*)0));
2193 waiting_tcp_delete(w);
2194 } else {
2195 /* it was in use */
2196 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2197 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT-2);
2198 }
2199 use_free_buffer(outnet);
2200}
2201
2202/** close the oldest reuse_tcp connection to make a fd and struct pend
2203 * available for a new stream connection */
2204static void
2205reuse_tcp_close_oldest(struct outside_network* outnet)
2206{
2207 struct reuse_tcp* reuse;
2208 verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2209 reuse = reuse_tcp_lru_snip(outnet);
2210 if(!reuse) return;
2211 /* free up */
2212 reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED-1);
2213}
2214
2215static uint16_t
2216tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2217{
2218 if(reuse)
2219 return reuse_tcp_select_id(reuse, outnet);
2220 return GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2221}
2222
2223/** find spare ID value for reuse tcp stream. That is random and also does
2224 * not collide with an existing query ID that is in use or waiting */
2225uint16_t
2226reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2227{
2228 uint16_t id = 0, curid, nextid;
2229 const int try_random = 2000;
2230 int i;
2231 unsigned select, count, space;
2232 rbnode_type* node;
2233
2234 /* make really sure the tree is not empty */
2235 if(reuse->tree_by_id.count == 0) {
2236 id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2237 return id;
2238 }
2239
2240 /* try to find random empty spots by picking them */
2241 for(i = 0; i<try_random; i++) {
2242 id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2243 if(!reuse_tcp_by_id_find(reuse, id)) {
2244 return id;
2245 }
2246 }
2247
2248 /* equally pick a random unused element from the tree that is
2249 * not in use. Pick a the n-th index of an ununused number,
2250 * then loop over the empty spaces in the tree and find it */
2251 log_assert(reuse->tree_by_id.count < 0xffff);
2252 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2253 /* select value now in 0 .. num free - 1 */
2254
2255 count = 0; /* number of free spaces passed by */
2256 node = rbtree_first(&reuse->tree_by_id);
2257 log_assert(node && node != RBTREE_NULL); /* tree not empty */
2258 /* see if select is before first node */
2259 if(select < tree_by_id_get_id(node))
2260 return select;
2261 count += tree_by_id_get_id(node);
2262 /* perhaps select is between nodes */
2263 while(node && node != RBTREE_NULL&rbtree_null_node) {
2264 rbnode_type* next = rbtree_next(node);
2265 if(next && next != RBTREE_NULL&rbtree_null_node) {
2266 curid = tree_by_id_get_id(node);
2267 nextid = tree_by_id_get_id(next);
2268 log_assert(curid < nextid);
2269 if(curid != 0xffff && curid + 1 < nextid) {
2270 /* space between nodes */
2271 space = nextid - curid - 1;
2272 log_assert(select >= count);
2273 if(select < count + space) {
2274 /* here it is */
2275 return curid + 1 + (select - count);
2276 }
2277 count += space;
2278 }
2279 }
2280 node = next;
2281 }
2282
2283 /* select is after the last node */
2284 /* count is the number of free positions before the nodes in the
2285 * tree */
2286 node = rbtree_last(&reuse->tree_by_id);
2287 log_assert(node && node != RBTREE_NULL); /* tree not empty */
2288 curid = tree_by_id_get_id(node);
2289 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2290 return curid + 1 + (select - count);
2291}
2292
2293struct waiting_tcp*
2294pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2295 int timeout, comm_point_callback_type* callback, void* callback_arg)
2296{
2297 struct pending_tcp* pend = sq->outnet->tcp_free;
2298 struct reuse_tcp* reuse = NULL((void*)0);
2299 struct waiting_tcp* w;
2300
2301 verbose(VERB_CLIENT, "pending_tcp_query");
2302 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2303 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2304 return NULL((void*)0);
2305 }
2306
2307 /* find out if a reused stream to the target exists */
2308 /* if so, take it into use */
2309 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2310 sq->ssl_upstream);
2311 if(reuse) {
2312 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2313 log_assert(reuse->pending);
2314 pend = reuse->pending;
2315 reuse_tcp_lru_touch(sq->outnet, reuse);
2316 }
2317
2318 log_assert(!reuse || (reuse && pend));
2319 /* if !pend but we have reuse streams, close a reuse stream
2320 * to be able to open a new one to this target, no use waiting
2321 * to reuse a file descriptor while another query needs to use
2322 * that buffer and file descriptor now. */
2323 if(!pend) {
2324 reuse_tcp_close_oldest(sq->outnet);
2325 pend = sq->outnet->tcp_free;
2326 log_assert(!reuse || (pend == reuse->pending));
2327 }
2328
2329 /* allocate space to store query */
2330 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2331 + sldns_buffer_limit(packet));
2332 if(!w) {
2333 return NULL((void*)0);
2334 }
2335 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2336 free(w);
2337 return NULL((void*)0);
2338 }
2339 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2340 w->pkt_len = sldns_buffer_limit(packet);
2341 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2342 w->id = tcp_select_id(sq->outnet, reuse);
2343 LDNS_ID_SET(w->pkt, w->id)(sldns_write_uint16(w->pkt, w->id));
2344 memcpy(&w->addr, &sq->addr, sq->addrlen);
2345 w->addrlen = sq->addrlen;
2346 w->outnet = sq->outnet;
2347 w->on_tcp_waiting_list = 0;
2348 w->next_waiting = NULL((void*)0);
2349 w->cb = callback;
2350 w->cb_arg = callback_arg;
2351 w->ssl_upstream = sq->ssl_upstream;
2352 w->tls_auth_name = sq->tls_auth_name;
2353 w->timeout = timeout;
2354 w->id_node.key = NULL((void*)0);
2355 w->write_wait_prev = NULL((void*)0);
2356 w->write_wait_next = NULL((void*)0);
2357 w->write_wait_queued = 0;
2358 w->error_count = 0;
2359#ifdef USE_DNSTAP
2360 w->sq = NULL((void*)0);
2361#endif
2362 if(pend) {
2363 /* we have a buffer available right now */
2364 if(reuse) {
2365 log_assert(reuse == &pend->reuse);
2366 /* reuse existing fd, write query and continue */
2367 /* store query in tree by id */
2368 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2369 w->next_waiting = (void*)pend;
2370 reuse_tree_by_id_insert(&pend->reuse, w);
2371 /* can we write right now? */
2372 if(pend->query == NULL((void*)0)) {
2373 /* write straight away */
2374 /* stop the timer on read of the fd */
2375 comm_point_stop_listening(pend->c);
2376 pend->query = w;
2377 outnet_tcp_take_query_setup(pend->c->fd, pend,
2378 w);
2379 } else {
2380 /* put it in the waiting list for
2381 * this stream */
2382 reuse_write_wait_push_back(&pend->reuse, w);
2383 }
2384 } else {
2385 /* create new fd and connect to addr, setup to
2386 * write query */
2387 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2388 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2389 pend->reuse.pending = pend;
2390 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2391 pend->reuse.addrlen = sq->addrlen;
2392 if(!outnet_tcp_take_into_use(w)) {
2393 waiting_tcp_delete(w);
2394 return NULL((void*)0);
2395 }
2396 }
2397#ifdef USE_DNSTAP
2398 if(sq->outnet->dtenv &&
2399 (sq->outnet->dtenv->log_resolver_query_messages ||
2400 sq->outnet->dtenv->log_forwarder_query_messages)) {
2401 /* use w->pkt, because it has the ID value */
2402 sldns_buffer tmp;
2403 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2404 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2405 &pend->pi->addr, comm_tcp, sq->zone,
2406 sq->zonelen, &tmp);
2407 }
2408#endif
2409 } else {
2410 /* queue up */
2411 /* waiting for a buffer on the outside network buffer wait
2412 * list */
2413 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2414#ifdef USE_DNSTAP
2415 w->sq = sq;
2416#endif
2417 outnet_add_tcp_waiting(sq->outnet, w);
2418 }
2419 return w;
2420}
2421
2422/** create query for serviced queries */
2423static void
2424serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2425 uint16_t qtype, uint16_t qclass, uint16_t flags)
2426{
2427 sldns_buffer_clear(buff);
2428 /* skip id */
2429 sldns_buffer_write_u16(buff, flags);
2430 sldns_buffer_write_u16(buff, 1); /* qdcount */
2431 sldns_buffer_write_u16(buff, 0); /* ancount */
2432 sldns_buffer_write_u16(buff, 0); /* nscount */
2433 sldns_buffer_write_u16(buff, 0); /* arcount */
2434 sldns_buffer_write(buff, qname, qnamelen);
2435 sldns_buffer_write_u16(buff, qtype);
2436 sldns_buffer_write_u16(buff, qclass);
2437 sldns_buffer_flip(buff);
2438}
2439
2440/** lookup serviced query in serviced query rbtree */
2441static struct serviced_query*
2442lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2443 struct sockaddr_storage* addr, socklen_t addrlen,
2444 struct edns_option* opt_list)
2445{
2446 struct serviced_query key;
2447 key.node.key = &key;
2448 key.qbuf = sldns_buffer_begin(buff);
2449 key.qbuflen = sldns_buffer_limit(buff);
2450 key.dnssec = dnssec;
2451 memcpy(&key.addr, addr, addrlen);
2452 key.addrlen = addrlen;
2453 key.outnet = outnet;
2454 key.opt_list = opt_list;
2455 return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2456}
2457
2458/** Create new serviced entry */
2459static struct serviced_query*
2460serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2461 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2462 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2463 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2464 size_t pad_queries_block_size)
2465{
2466 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2467#ifdef UNBOUND_DEBUG
2468 rbnode_type* ins;
2469#endif
2470 if(!sq)
2471 return NULL((void*)0);
2472 sq->node.key = sq;
2473 sq->qbuf = memdup(sldns_buffer_begin(buff), sldns_buffer_limit(buff));
2474 if(!sq->qbuf) {
2475 free(sq);
2476 return NULL((void*)0);
2477 }
2478 sq->qbuflen = sldns_buffer_limit(buff);
2479 sq->zone = memdup(zone, zonelen);
2480 if(!sq->zone) {
2481 free(sq->qbuf);
2482 free(sq);
2483 return NULL((void*)0);
2484 }
2485 sq->zonelen = zonelen;
2486 sq->qtype = qtype;
2487 sq->dnssec = dnssec;
2488 sq->want_dnssec = want_dnssec;
2489 sq->nocaps = nocaps;
2490 sq->tcp_upstream = tcp_upstream;
2491 sq->ssl_upstream = ssl_upstream;
2492 if(tls_auth_name) {
2493 sq->tls_auth_name = strdup(tls_auth_name);
2494 if(!sq->tls_auth_name) {
2495 free(sq->zone);
2496 free(sq->qbuf);
2497 free(sq);
2498 return NULL((void*)0);
2499 }
2500 } else {
2501 sq->tls_auth_name = NULL((void*)0);
2502 }
2503 memcpy(&sq->addr, addr, addrlen);
2504 sq->addrlen = addrlen;
2505 sq->opt_list = NULL((void*)0);
2506 if(opt_list) {
2507 sq->opt_list = edns_opt_copy_alloc(opt_list);
2508 if(!sq->opt_list) {
2509 free(sq->tls_auth_name);
2510 free(sq->zone);
2511 free(sq->qbuf);
2512 free(sq);
2513 return NULL((void*)0);
2514 }
2515 }
2516 sq->outnet = outnet;
2517 sq->cblist = NULL((void*)0);
2518 sq->pending = NULL((void*)0);
2519 sq->status = serviced_initial;
2520 sq->retry = 0;
2521 sq->to_be_deleted = 0;
2522 sq->padding_block_size = pad_queries_block_size;
2523#ifdef UNBOUND_DEBUG
2524 ins =
2525#else
2526 (void)
2527#endif
2528 rbtree_insert(outnet->serviced, &sq->node);
2529 log_assert(ins != NULL); /* must not be already present */
2530 return sq;
2531}
2532
2533/** remove waiting tcp from the outnet waiting list */
2534static void
2535waiting_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
2536{
2537 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL((void*)0);
2538 w->on_tcp_waiting_list = 0;
2539 while(p) {
2540 if(p == w) {
2541 /* remove w */
2542 if(prev)
2543 prev->next_waiting = w->next_waiting;
2544 else outnet->tcp_wait_first = w->next_waiting;
2545 if(outnet->tcp_wait_last == w)
2546 outnet->tcp_wait_last = prev;
2547 return;
2548 }
2549 prev = p;
2550 p = p->next_waiting;
2551 }
2552 /* waiting_list_remove is currently called only with items that are
2553 * already in the waiting list. */
2554 log_assert(0);
2555}
2556
2557/** reuse tcp stream, remove serviced query from stream,
2558 * return true if the stream is kept, false if it is to be closed */
2559static int
2560reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2561 struct serviced_query* sq)
2562{
2563 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2564 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2565 /* remove the callback. let query continue to write to not cancel
2566 * the stream itself. also keep it as an entry in the tree_by_id,
2567 * in case the answer returns (that we no longer want), but we cannot
2568 * pick the same ID number meanwhile */
2569 w->cb = NULL((void*)0);
2570 /* see if can be entered in reuse tree
2571 * for that the FD has to be non-1 */
2572 if(pend_tcp->c->fd == -1) {
2573 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2574 return 0;
2575 }
2576 /* if in tree and used by other queries */
2577 if(pend_tcp->reuse.node.key) {
2578 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2579 /* do not reset the keepalive timer, for that
2580 * we'd need traffic, and this is where the serviced is
2581 * removed due to state machine internal reasons,
2582 * eg. iterator no longer interested in this query */
2583 return 1;
2584 }
2585 /* if still open and want to keep it open */
2586 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2587 sq->outnet->tcp_reuse_max) {
2588 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2589 /* set a keepalive timer on it */
2590 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2591 return 0;
2592 }
2593 reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2594 return 1;
2595 }
2596 return 0;
2597}
2598
2599/** cleanup serviced query entry */
2600static void
2601serviced_delete(struct serviced_query* sq)
2602{
2603 verbose(VERB_CLIENT, "serviced_delete");
2604 if(sq->pending) {
2605 /* clear up the pending query */
2606 if(sq->status == serviced_query_UDP_EDNS ||
2607 sq->status == serviced_query_UDP ||
2608 sq->status == serviced_query_UDP_EDNS_FRAG ||
2609 sq->status == serviced_query_UDP_EDNS_fallback) {
2610 struct pending* p = (struct pending*)sq->pending;
2611 verbose(VERB_CLIENT, "serviced_delete: UDP");
2612 if(p->pc)
2613 portcomm_loweruse(sq->outnet, p->pc);
2614 pending_delete(sq->outnet, p);
2615 /* this call can cause reentrant calls back into the
2616 * mesh */
2617 outnet_send_wait_udp(sq->outnet);
2618 } else {
2619 struct waiting_tcp* w = (struct waiting_tcp*)
2620 sq->pending;
2621 verbose(VERB_CLIENT, "serviced_delete: TCP");
2622 /* if on stream-write-waiting list then
2623 * remove from waiting list and waiting_tcp_delete */
2624 if(w->write_wait_queued) {
2625 struct pending_tcp* pend =
2626 (struct pending_tcp*)w->next_waiting;
2627 verbose(VERB_CLIENT, "serviced_delete: writewait");
2628 reuse_tree_by_id_delete(&pend->reuse, w);
2629 reuse_write_wait_remove(&pend->reuse, w);
2630 waiting_tcp_delete(w);
2631 } else if(!w->on_tcp_waiting_list) {
2632 struct pending_tcp* pend =
2633 (struct pending_tcp*)w->next_waiting;
2634 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2635 if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2636 reuse_cb_and_decommission(sq->outnet,
2637 pend, NETEVENT_CLOSED-1);
2638 use_free_buffer(sq->outnet);
2639 }
2640 sq->pending = NULL((void*)0);
2641 } else {
2642 verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2643 waiting_list_remove(sq->outnet, w);
2644 waiting_tcp_delete(w);
2645 }
2646 }
2647 }
2648 /* does not delete from tree, caller has to do that */
2649 serviced_node_del(&sq->node, NULL((void*)0));
2650}
2651
2652/** perturb a dname capitalization randomly */
2653static void
2654serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2655{
2656 uint8_t lablen;
2657 uint8_t* d = qbuf + 10;
2658 long int random = 0;
2659 int bits = 0;
2660 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2661 (void)len;
2662 lablen = *d++;
2663 while(lablen) {
2664 while(lablen--) {
2665 /* only perturb A-Z, a-z */
2666 if(isalpha((unsigned char)*d)) {
2667 /* get a random bit */
2668 if(bits == 0) {
2669 random = ub_random(rnd);
2670 bits = 30;
2671 }
2672 if(random & 0x1) {
2673 *d = (uint8_t)toupper((unsigned char)*d);
2674 } else {
2675 *d = (uint8_t)tolower((unsigned char)*d);
2676 }
2677 random >>= 1;
2678 bits--;
2679 }
2680 d++;
2681 }
2682 lablen = *d++;
2683 }
2684 if(verbosity >= VERB_ALGO) {
2685 char buf[LDNS_MAX_DOMAINLEN255+1];
2686 dname_str(qbuf+10, buf);
2687 verbose(VERB_ALGO, "qname perturbed to %s", buf);
2688 }
2689}
2690
2691/** put serviced query into a buffer */
2692static void
2693serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2694{
2695 /* if we are using 0x20 bits for ID randomness, perturb them */
2696 if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2697 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2698 }
2699 /* generate query */
2700 sldns_buffer_clear(buff);
2701 sldns_buffer_write_u16(buff, 0); /* id placeholder */
2702 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2703 sldns_buffer_flip(buff);
2704 if(with_edns) {
2705 /* add edns section */
2706 struct edns_data edns;
2707 struct edns_option padding_option;
2708 edns.edns_present = 1;
2709 edns.ext_rcode = 0;
2710 edns.edns_version = EDNS_ADVERTISED_VERSION0;
2711 edns.opt_list = sq->opt_list;
2712 if(sq->status == serviced_query_UDP_EDNS_FRAG) {
2713 if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2714 if(EDNS_FRAG_SIZE_IP61232 < EDNS_ADVERTISED_SIZE)
2715 edns.udp_size = EDNS_FRAG_SIZE_IP61232;
2716 else edns.udp_size = EDNS_ADVERTISED_SIZE;
2717 } else {
2718 if(EDNS_FRAG_SIZE_IP41472 < EDNS_ADVERTISED_SIZE)
2719 edns.udp_size = EDNS_FRAG_SIZE_IP41472;
2720 else edns.udp_size = EDNS_ADVERTISED_SIZE;
2721 }
2722 } else {
2723 edns.udp_size = EDNS_ADVERTISED_SIZE;
2724 }
2725 edns.bits = 0;
2726 if(sq->dnssec & EDNS_DO0x8000)
2727 edns.bits = EDNS_DO0x8000;
2728 if(sq->dnssec & BIT_CD0x0010)
2729 LDNS_CD_SET(sldns_buffer_begin(buff))(*(sldns_buffer_begin(buff)+3) |= 0x10U);
2730 if (sq->ssl_upstream && sq->padding_block_size) {
2731 padding_option.opt_code = LDNS_EDNS_PADDING;
2732 padding_option.opt_len = 0;
2733 padding_option.opt_data = NULL((void*)0);
2734 padding_option.next = edns.opt_list;
2735 edns.opt_list = &padding_option;
2736 edns.padding_block_size = sq->padding_block_size;
2737 }
2738 attach_edns_record(buff, &edns);
2739 }
2740}
2741
2742/**
2743 * Perform serviced query UDP sending operation.
2744 * Sends UDP with EDNS, unless infra host marked non EDNS.
2745 * @param sq: query to send.
2746 * @param buff: buffer scratch space.
2747 * @return 0 on error.
2748 */
2749static int
2750serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2751{
2752 int rtt, vs;
2753 uint8_t edns_lame_known;
2754 time_t now = *sq->outnet->now_secs;
2755
2756 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2757 sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2758 return 0;
2759 sq->last_rtt = rtt;
2760 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2761 if(sq->status == serviced_initial) {
2762 if(vs != -1) {
2763 sq->status = serviced_query_UDP_EDNS;
2764 } else {
2765 sq->status = serviced_query_UDP;
2766 }
2767 }
2768 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2769 (sq->status == serviced_query_UDP_EDNS_FRAG));
2770 sq->last_sent_time = *sq->outnet->now_tv;
2771 sq->edns_lame_known = (int)edns_lame_known;
2772 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2773 sq->pending = pending_udp_query(sq, buff, rtt,
2774 serviced_udp_callback, sq);
2775 if(!sq->pending)
2776 return 0;
2777 return 1;
2778}
2779
2780/** check that perturbed qname is identical */
2781static int
2782serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2783{
2784 uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2785 uint8_t* d2 = qbuf+10;
2786 uint8_t len1, len2;
2787 int count = 0;
2788 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2789 return 0;
2790 log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2791 len1 = *d1++;
2792 len2 = *d2++;
2793 while(len1 != 0 || len2 != 0) {
2794 if(LABEL_IS_PTR(len1)( ((len1)&0xc0) == 0xc0 )) {
2795 /* check if we can read *d1 with compression ptr rest */
2796 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2797 return 0;
2798 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1)( ((len1)&0x3f)<<8 | (*d1) );
2799 /* check if we can read the destination *d1 */
2800 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2801 return 0;
2802 len1 = *d1++;
2803 if(count++ > MAX_COMPRESS_PTRS256)
2804 return 0;
2805 continue;
2806 }
2807 if(d2 > qbuf+qbuflen)
2808 return 0;
2809 if(len1 != len2)
2810 return 0;
2811 if(len1 > LDNS_MAX_LABELLEN63)
2812 return 0;
2813 /* check len1 + 1(next length) are okay to read */
2814 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2815 return 0;
2816 log_assert(len1 <= LDNS_MAX_LABELLEN);
2817 log_assert(len2 <= LDNS_MAX_LABELLEN);
2818 log_assert(len1 == len2 && len1 != 0);
2819 /* compare the labels - bitwise identical */
2820 if(memcmp(d1, d2, len1) != 0)
2821 return 0;
2822 d1 += len1;
2823 d2 += len2;
2824 len1 = *d1++;
2825 len2 = *d2++;
2826 }
2827 return 1;
2828}
2829
2830/** call the callbacks for a serviced query */
2831static void
2832serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2833 struct comm_reply* rep)
2834{
2835 struct service_callback* p;
2836 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2837 uint8_t *backup_p = NULL((void*)0);
2838 size_t backlen = 0;
2839#ifdef UNBOUND_DEBUG
2840 rbnode_type* rem =
2841#else
2842 (void)
2843#endif
2844 /* remove from tree, and schedule for deletion, so that callbacks
2845 * can safely deregister themselves and even create new serviced
2846 * queries that are identical to this one. */
2847 rbtree_delete(sq->outnet->serviced, sq);
2848 log_assert(rem); /* should have been present */
2849 sq->to_be_deleted = 1;
2850 verbose(VERB_ALGO, "svcd callbacks start");
2851 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR0 && c &&
2852 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2853 /* for type PTR do not check perturbed name in answer,
2854 * compatibility with cisco dns guard boxes that mess up
2855 * reverse queries 0x20 contents */
2856 /* noerror and nxdomain must have a qname in reply */
2857 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2858 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2859 == LDNS_RCODE_NOERROR ||
2860 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2861 == LDNS_RCODE_NXDOMAIN)) {
2862 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
2863 log_addr(VERB_DETAIL, "from server",
2864 &sq->addr, sq->addrlen);
2865 log_buf(VERB_DETAIL, "for packet", c->buffer);
2866 error = NETEVENT_CLOSED-1;
2867 c = NULL((void*)0);
2868 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
2869 !serviced_check_qname(c->buffer, sq->qbuf,
2870 sq->qbuflen)) {
2871 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
2872 log_addr(VERB_DETAIL, "from server",
2873 &sq->addr, sq->addrlen);
2874 log_buf(VERB_DETAIL, "for packet", c->buffer);
2875 error = NETEVENT_CAPSFAIL-3;
2876 /* and cleanup too */
2877 pkt_dname_tolower(c->buffer,
2878 sldns_buffer_at(c->buffer, 12));
2879 } else {
2880 verbose(VERB_ALGO, "good 0x20-ID in reply qname");
2881 /* cleanup caps, prettier cache contents. */
2882 pkt_dname_tolower(c->buffer,
2883 sldns_buffer_at(c->buffer, 12));
2884 }
2885 }
2886 if(dobackup && c) {
2887 /* make a backup of the query, since the querystate processing
2888 * may send outgoing queries that overwrite the buffer.
2889 * use secondary buffer to store the query.
2890 * This is a data copy, but faster than packet to server */
2891 backlen = sldns_buffer_limit(c->buffer);
2892 backup_p = memdup(sldns_buffer_begin(c->buffer), backlen);
2893 if(!backup_p) {
2894 log_err("malloc failure in serviced query callbacks");
2895 error = NETEVENT_CLOSED-1;
2896 c = NULL((void*)0);
2897 }
2898 sq->outnet->svcd_overhead = backlen;
2899 }
2900 /* test the actual sq->cblist, because the next elem could be deleted*/
2901 while((p=sq->cblist) != NULL((void*)0)) {
2902 sq->cblist = p->next; /* remove this element */
2903 if(dobackup && c) {
2904 sldns_buffer_clear(c->buffer);
2905 sldns_buffer_write(c->buffer, backup_p, backlen);
2906 sldns_buffer_flip(c->buffer);
2907 }
2908 fptr_ok(fptr_whitelist_serviced_query(p->cb));
2909 (void)(*p->cb)(c, p->cb_arg, error, rep);
2910 free(p);
2911 }
2912 if(backup_p) {
2913 free(backup_p);
2914 sq->outnet->svcd_overhead = 0;
2915 }
2916 verbose(VERB_ALGO, "svcd callbacks end");
2917 log_assert(sq->cblist == NULL);
2918 serviced_delete(sq);
2919}
2920
2921int
2922serviced_tcp_callback(struct comm_point* c, void* arg, int error,
2923 struct comm_reply* rep)
2924{
2925 struct serviced_query* sq = (struct serviced_query*)arg;
2926 struct comm_reply r2;
2927#ifdef USE_DNSTAP
2928 struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
2929 struct pending_tcp* pend_tcp = NULL((void*)0);
2930 struct port_if* pi = NULL((void*)0);
2931 if(!w->on_tcp_waiting_list && w->next_waiting) {
2932 pend_tcp = (struct pending_tcp*)w->next_waiting;
2933 pi = pend_tcp->pi;
2934 }
2935#endif
2936 sq->pending = NULL((void*)0); /* removed after this callback */
2937 if(error != NETEVENT_NOERROR0)
2938 log_addr(VERB_QUERY, "tcp error for address",
2939 &sq->addr, sq->addrlen);
2940 if(error==NETEVENT_NOERROR0)
2941 infra_update_tcp_works(sq->outnet->infra, &sq->addr,
2942 sq->addrlen, sq->zone, sq->zonelen);
2943#ifdef USE_DNSTAP
2944 /*
2945 * sending src (local service)/dst (upstream) addresses over DNSTAP
2946 */
2947 if(error==NETEVENT_NOERROR0 && pi && sq->outnet->dtenv &&
2948 (sq->outnet->dtenv->log_resolver_response_messages ||
2949 sq->outnet->dtenv->log_forwarder_response_messages)) {
2950 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
2951 log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
2952 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
2953 &pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf,
2954 sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
2955 c->buffer);
2956 }
2957#endif
2958 if(error==NETEVENT_NOERROR0 && sq->status == serviced_query_TCP_EDNS &&
2959 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
2960 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin((*(sldns_buffer_begin( c->buffer)+3) & 0x0fU)
2961 c->buffer))(*(sldns_buffer_begin( c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOTIMPL) ) {
2962 /* attempt to fallback to nonEDNS */
2963 sq->status = serviced_query_TCP_EDNS_fallback;
2964 serviced_tcp_initiate(sq, c->buffer);
2965 return 0;
2966 } else if(error==NETEVENT_NOERROR0 &&
2967 sq->status == serviced_query_TCP_EDNS_fallback &&
2968 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
2969 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2970 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NXDOMAIN
2971 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2972 == LDNS_RCODE_YXDOMAIN)) {
2973 /* the fallback produced a result that looks promising, note
2974 * that this server should be approached without EDNS */
2975 /* only store noEDNS in cache if domain is noDNSSEC */
2976 if(!sq->want_dnssec)
2977 if(!infra_edns_update(sq->outnet->infra, &sq->addr,
2978 sq->addrlen, sq->zone, sq->zonelen, -1,
2979 *sq->outnet->now_secs))
2980 log_err("Out of memory caching no edns for host");
2981 sq->status = serviced_query_TCP;
2982 }
2983 if(sq->tcp_upstream || sq->ssl_upstream) {
2984 struct timeval now = *sq->outnet->now_tv;
2985 if(error!=NETEVENT_NOERROR0) {
2986 if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
2987 sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
2988 -1, sq->last_rtt, (time_t)now.tv_sec))
2989 log_err("out of memory in TCP exponential backoff.");
2990 } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
2991 (now.tv_sec == sq->last_sent_time.tv_sec &&
2992 now.tv_usec > sq->last_sent_time.tv_usec)) {
2993 /* convert from microseconds to milliseconds */
2994 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
2995 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
2996 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
2997 log_assert(roundtime >= 0);
2998 /* only store if less then AUTH_TIMEOUT seconds, it could be
2999 * huge due to system-hibernated and we woke up */
3000 if(roundtime < 60000) {
3001 if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3002 sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3003 roundtime, sq->last_rtt, (time_t)now.tv_sec))
3004 log_err("out of memory noting rtt.");
3005 }
3006 }
3007 }
3008 /* insert address into reply info */
3009 if(!rep) {
3010 /* create one if there isn't (on errors) */
3011 rep = &r2;
3012 r2.c = c;
3013 }
3014 memcpy(&rep->addr, &sq->addr, sq->addrlen);
3015 rep->addrlen = sq->addrlen;
3016 serviced_callbacks(sq, error, c, rep);
3017 return 0;
3018}
3019
3020static void
3021serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3022{
3023 verbose(VERB_ALGO, "initiate TCP query %s",
3024 sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3025 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3026 sq->last_sent_time = *sq->outnet->now_tv;
3027 sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3028 serviced_tcp_callback, sq);
3029 if(!sq->pending) {
3030 /* delete from tree so that a retry by above layer does not
3031 * clash with this entry */
3032 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3033 serviced_callbacks(sq, NETEVENT_CLOSED-1, NULL((void*)0), NULL((void*)0));
3034 }
3035}
3036
3037/** Send serviced query over TCP return false on initial failure */
3038static int
3039serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3040{
3041 int vs, rtt, timeout;
3042 uint8_t edns_lame_known;
3043 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3044 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3045 &rtt))
3046 return 0;
3047 sq->last_rtt = rtt;
3048 if(vs != -1)
3049 sq->status = serviced_query_TCP_EDNS;
3050 else sq->status = serviced_query_TCP;
3051 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3052 sq->last_sent_time = *sq->outnet->now_tv;
3053 if(sq->tcp_upstream || sq->ssl_upstream) {
3054 timeout = rtt;
3055 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3056 timeout = sq->outnet->tcp_auth_query_timeout;
3057 } else {
3058 timeout = sq->outnet->tcp_auth_query_timeout;
3059 }
3060 sq->pending = pending_tcp_query(sq, buff, timeout,
3061 serviced_tcp_callback, sq);
3062 return sq->pending != NULL((void*)0);
3063}
3064
3065/* see if packet is edns malformed; got zeroes at start.
3066 * This is from servers that return malformed packets to EDNS0 queries,
3067 * but they return good packets for nonEDNS0 queries.
3068 * We try to detect their output; without resorting to a full parse or
3069 * check for too many bytes after the end of the packet. */
3070static int
3071packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3072{
3073 size_t len;
3074 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE12)
3075 return 1; /* malformed */
3076 /* they have NOERROR rcode, 1 answer. */
3077 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf))(*(sldns_buffer_begin(buf)+3) & 0x0fU) != LDNS_RCODE_NOERROR)
3078 return 0;
3079 /* one query (to skip) and answer records */
3080 if(LDNS_QDCOUNT(sldns_buffer_begin(buf))(sldns_read_uint16(sldns_buffer_begin(buf)+4)) != 1 ||
3081 LDNS_ANCOUNT(sldns_buffer_begin(buf))(sldns_read_uint16(sldns_buffer_begin(buf)+6)) == 0)
3082 return 0;
3083 /* skip qname */
3084 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE12),
3085 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE12);
3086 if(len == 0)
3087 return 0;
3088 if(len == 1 && qtype == 0)
3089 return 0; /* we asked for '.' and type 0 */
3090 /* and then 4 bytes (type and class of query) */
3091 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE12 + len + 4 + 3)
3092 return 0;
3093
3094 /* and start with 11 zeroes as the answer RR */
3095 /* so check the qtype of the answer record, qname=0, type=0 */
3096 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[0] == 0 &&
3097 sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[1] == 0 &&
3098 sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[2] == 0)
3099 return 1;
3100 return 0;
3101}
3102
3103int
3104serviced_udp_callback(struct comm_point* c, void* arg, int error,
3105 struct comm_reply* rep)
3106{
3107 struct serviced_query* sq = (struct serviced_query*)arg;
3108 struct outside_network* outnet = sq->outnet;
3109 struct timeval now = *sq->outnet->now_tv;
3110#ifdef USE_DNSTAP
3111 struct pending* p = (struct pending*)sq->pending;
3112 struct port_if* pi = p->pc->pif;
3113#endif
3114
3115 sq->pending = NULL((void*)0); /* removed after callback */
3116 if(error == NETEVENT_TIMEOUT-2) {
3117 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) {
3118 /* fallback to 1480/1280 */
3119 sq->status = serviced_query_UDP_EDNS_FRAG;
3120 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3121 &sq->addr, sq->addrlen);
3122 if(!serviced_udp_send(sq, c->buffer)) {
3123 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3124 }
3125 return 0;
3126 }
3127 if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3128 /* fragmentation size did not fix it */
3129 sq->status = serviced_query_UDP_EDNS;
3130 }
3131 sq->retry++;
3132 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3133 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3134 (time_t)now.tv_sec))
3135 log_err("out of memory in UDP exponential backoff");
3136 if(sq->retry < OUTBOUND_UDP_RETRY1) {
3137 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3138 &sq->addr, sq->addrlen);
3139 if(!serviced_udp_send(sq, c->buffer)) {
3140 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3141 }
3142 return 0;
3143 }
3144 }
3145 if(error != NETEVENT_NOERROR0) {
3146 /* udp returns error (due to no ID or interface available) */
3147 serviced_callbacks(sq, error, c, rep);
3148 return 0;
3149 }
3150#ifdef USE_DNSTAP
3151 /*
3152 * sending src (local service)/dst (upstream) addresses over DNSTAP
3153 */
3154 if(error == NETEVENT_NOERROR0 && outnet->dtenv &&
3155 (outnet->dtenv->log_resolver_response_messages ||
3156 outnet->dtenv->log_forwarder_response_messages)) {
3157 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3158 log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3159 dt_msg_send_outside_response(outnet->dtenv, &sq->addr, &pi->addr, c->type,
3160 sq->zone, sq->zonelen, sq->qbuf, sq->qbuflen,
3161 &sq->last_sent_time, sq->outnet->now_tv, c->buffer);
3162 }
3163#endif
3164 if( (sq->status == serviced_query_UDP_EDNS
3165 ||sq->status == serviced_query_UDP_EDNS_FRAG)
3166 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3167 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3168 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOTIMPL
3169 || packet_edns_malformed(c->buffer, sq->qtype)
3170 )) {
3171 /* try to get an answer by falling back without EDNS */
3172 verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3173 sq->status = serviced_query_UDP_EDNS_fallback;
3174 sq->retry = 0;
3175 if(!serviced_udp_send(sq, c->buffer)) {
3176 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3177 }
3178 return 0;
3179 } else if(sq->status == serviced_query_UDP_EDNS &&
3180 !sq->edns_lame_known) {
3181 /* now we know that edns queries received answers store that */
3182 log_addr(VERB_ALGO, "serviced query: EDNS works for",
3183 &sq->addr, sq->addrlen);
3184 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3185 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3186 log_err("Out of memory caching edns works");
3187 }
3188 sq->edns_lame_known = 1;
3189 } else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3190 !sq->edns_lame_known && (LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3191 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOERROR ||
3192 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
3193 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin((*(sldns_buffer_begin( c->buffer)+3) & 0x0fU)
3194 c->buffer))(*(sldns_buffer_begin( c->buffer)+3) & 0x0fU) == LDNS_RCODE_YXDOMAIN)) {
3195 /* the fallback produced a result that looks promising, note
3196 * that this server should be approached without EDNS */
3197 /* only store noEDNS in cache if domain is noDNSSEC */
3198 if(!sq->want_dnssec) {
3199 log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3200 &sq->addr, sq->addrlen);
3201 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3202 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3203 log_err("Out of memory caching no edns for host");
3204 }
3205 } else {
3206 log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3207 "not stored because need DNSSEC for", &sq->addr,
3208 sq->addrlen);
3209 }
3210 sq->status = serviced_query_UDP;
3211 }
3212 if(now.tv_sec > sq->last_sent_time.tv_sec ||
3213 (now.tv_sec == sq->last_sent_time.tv_sec &&
3214 now.tv_usec > sq->last_sent_time.tv_usec)) {
3215 /* convert from microseconds to milliseconds */
3216 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3217 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3218 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3219 log_assert(roundtime >= 0);
3220 /* in case the system hibernated, do not enter a huge value,
3221 * above this value gives trouble with server selection */
3222 if(roundtime < 60000) {
3223 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3224 sq->zone, sq->zonelen, sq->qtype, roundtime,
3225 sq->last_rtt, (time_t)now.tv_sec))
3226 log_err("out of memory noting rtt.");
3227 }
3228 }
3229 /* perform TC flag check and TCP fallback after updating our
3230 * cache entries for EDNS status and RTT times */
3231 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+2) & 0x02U)) {
3232 /* fallback to TCP */
3233 /* this discards partial UDP contents */
3234 if(sq->status == serviced_query_UDP_EDNS ||
3235 sq->status == serviced_query_UDP_EDNS_FRAG ||
3236 sq->status == serviced_query_UDP_EDNS_fallback)
3237 /* if we have unfinished EDNS_fallback, start again */
3238 sq->status = serviced_query_TCP_EDNS;
3239 else sq->status = serviced_query_TCP;
3240 serviced_tcp_initiate(sq, c->buffer);
3241 return 0;
3242 }
3243 /* yay! an answer */
3244 serviced_callbacks(sq, error, c, rep);
3245 return 0;
3246}
3247
3248struct serviced_query*
3249outnet_serviced_query(struct outside_network* outnet,
3250 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3251 int nocaps, int tcp_upstream, int ssl_upstream, char* tls_auth_name,
3252 struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* zone,
3253 size_t zonelen, struct module_qstate* qstate,
3254 comm_point_callback_type* callback, void* callback_arg, sldns_buffer* buff,
3255 struct module_env* env)
3256{
3257 struct serviced_query* sq;
3258 struct service_callback* cb;
3259 struct edns_string_addr* client_string_addr;
3260
3261 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone, zonelen,
3262 qstate, qstate->region))
3263 return NULL((void*)0);
3264
3265 if((client_string_addr = edns_string_addr_lookup(
3266 &env->edns_strings->client_strings, addr, addrlen))) {
3267 edns_opt_list_append(&qstate->edns_opts_back_out,
3268 env->edns_strings->client_string_opcode,
3269 client_string_addr->string_len,
3270 client_string_addr->string, qstate->region);
3271 }
3272
3273 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3274 qinfo->qclass, flags);
3275 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3276 qstate->edns_opts_back_out);
3277 /* duplicate entries are included in the callback list, because
3278 * there is a counterpart registration by our caller that needs to
3279 * be doubly-removed (with callbacks perhaps). */
3280 if(!(cb = (struct service_callback*)malloc(sizeof(*cb))))
3281 return NULL((void*)0);
3282 if(!sq) {
3283 /* make new serviced query entry */
3284 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3285 tcp_upstream, ssl_upstream, tls_auth_name, addr,
3286 addrlen, zone, zonelen, (int)qinfo->qtype,
3287 qstate->edns_opts_back_out,
3288 ( ssl_upstream && env->cfg->pad_queries
3289 ? env->cfg->pad_queries_block_size : 0 ));
3290 if(!sq) {
3291 free(cb);
3292 return NULL((void*)0);
3293 }
3294 /* perform first network action */
3295 if(outnet->do_udp && !(tcp_upstream || ssl_upstream)) {
3296 if(!serviced_udp_send(sq, buff)) {
3297 (void)rbtree_delete(outnet->serviced, sq);
3298 serviced_node_del(&sq->node, NULL((void*)0));
3299 free(cb);
3300 return NULL((void*)0);
3301 }
3302 } else {
3303 if(!serviced_tcp_send(sq, buff)) {
3304 (void)rbtree_delete(outnet->serviced, sq);
3305 serviced_node_del(&sq->node, NULL((void*)0));
3306 free(cb);
3307 return NULL((void*)0);
3308 }
3309 }
3310 }
3311 /* add callback to list of callbacks */
3312 cb->cb = callback;
3313 cb->cb_arg = callback_arg;
3314 cb->next = sq->cblist;
3315 sq->cblist = cb;
3316 return sq;
3317}
3318
3319/** remove callback from list */
3320static void
3321callback_list_remove(struct serviced_query* sq, void* cb_arg)
3322{
3323 struct service_callback** pp = &sq->cblist;
3324 while(*pp) {
3325 if((*pp)->cb_arg == cb_arg) {
3326 struct service_callback* del = *pp;
3327 *pp = del->next;
3328 free(del);
3329 return;
3330 }
3331 pp = &(*pp)->next;
3332 }
3333}
3334
3335void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3336{
3337 if(!sq)
3338 return;
3339 callback_list_remove(sq, cb_arg);
3340 /* if callbacks() routine scheduled deletion, let it do that */
3341 if(!sq->cblist && !sq->to_be_deleted) {
3342 (void)rbtree_delete(sq->outnet->serviced, sq);
3343 serviced_delete(sq);
3344 }
3345}
3346
3347/** create fd to send to this destination */
3348static int
3349fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3350 socklen_t to_addrlen)
3351{
3352 struct sockaddr_storage* addr;
3353 socklen_t addrlen;
3354 int i, try, pnum, dscp;
3355 struct port_if* pif;
3356
3357 /* create fd */
3358 dscp = outnet->ip_dscp;
3359 for(try = 0; try<1000; try++) {
3360 int port = 0;
3361 int freebind = 0;
3362 int noproto = 0;
3363 int inuse = 0;
3364 int fd = -1;
3365
3366 /* select interface */
3367 if(addr_is_ip6(to_addr, to_addrlen)) {
3368 if(outnet->num_ip6 == 0) {
3369 char to[64];
3370 addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3371 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3372 return -1;
3373 }
3374 i = ub_random_max(outnet->rnd, outnet->num_ip6);
3375 pif = &outnet->ip6_ifs[i];
3376 } else {
3377 if(outnet->num_ip4 == 0) {
3378 char to[64];
3379 addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3380 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3381 return -1;
3382 }
3383 i = ub_random_max(outnet->rnd, outnet->num_ip4);
3384 pif = &outnet->ip4_ifs[i];
3385 }
3386 addr = &pif->addr;
3387 addrlen = pif->addrlen;
3388#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
3389 pnum = ub_random_max(outnet->rnd, pif->avail_total);
3390 if(pnum < pif->inuse) {
3391 /* port already open */
3392 port = pif->out[pnum]->number;
3393 } else {
3394 /* unused ports in start part of array */
3395 port = pif->avail_ports[pnum - pif->inuse];
3396 }
3397#else
3398 pnum = port = 0;
Value stored to 'pnum' is never read
3399#endif
3400 if(addr_is_ip6(to_addr, to_addrlen)) {
3401 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3402 sa.sin6_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
3403 fd = create_udp_sock(AF_INET624, SOCK_DGRAM2,
3404 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3405 0, 0, 0, NULL((void*)0), 0, freebind, 0, dscp);
3406 } else {
3407 struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3408 sa->sin_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
3409 fd = create_udp_sock(AF_INET2, SOCK_DGRAM2,
3410 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3411 0, 0, 0, NULL((void*)0), 0, freebind, 0, dscp);
3412 }
3413 if(fd != -1) {
3414 return fd;
3415 }
3416 if(!inuse) {
3417 return -1;
3418 }
3419 }
3420 /* too many tries */
3421 log_err("cannot send probe, ports are in use");
3422 return -1;
3423}
3424
3425struct comm_point*
3426outnet_comm_point_for_udp(struct outside_network* outnet,
3427 comm_point_callback_type* cb, void* cb_arg,
3428 struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3429{
3430 struct comm_point* cp;
3431 int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3432 if(fd == -1) {
3433 return NULL((void*)0);
3434 }
3435 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff,
3436 cb, cb_arg, NULL((void*)0));
3437 if(!cp) {
3438 log_err("malloc failure");
3439 close(fd);
3440 return NULL((void*)0);
3441 }
3442 return cp;
3443}
3444
3445/** setup SSL for comm point */
3446static int
3447setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3448 int fd, char* host)
3449{
3450 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3451 if(!cp->ssl) {
3452 log_err("cannot create SSL object");
3453 return 0;
3454 }
3455#ifdef USE_WINSOCK
3456 comm_point_tcp_win_bio_cb(cp, cp->ssl);
3457#endif
3458 cp->ssl_shake_state = comm_ssl_shake_write;
3459 /* https verification */
3460#ifdef HAVE_SSL
3461 if(outnet->tls_use_sni) {
3462 (void)SSL_set_tlsext_host_name(cp->ssl, host)SSL_ctrl(cp->ssl,55,0,(char *)host);
3463 }
3464#endif
3465#ifdef HAVE_SSL_SET1_HOST1
3466 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER0x01)) {
3467 /* because we set SSL_VERIFY_PEER, in netevent in
3468 * ssl_handshake, it'll check if the certificate
3469 * verification has succeeded */
3470 /* SSL_VERIFY_PEER is set on the sslctx */
3471 /* and the certificates to verify with are loaded into
3472 * it with SSL_load_verify_locations or
3473 * SSL_CTX_set_default_verify_paths */
3474 /* setting the hostname makes openssl verify the
3475 * host name in the x509 certificate in the
3476 * SSL connection*/
3477 if(!SSL_set1_host(cp->ssl, host)) {
3478 log_err("SSL_set1_host failed");
3479 return 0;
3480 }
3481 }
3482#elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST1)
3483 /* openssl 1.0.2 has this function that can be used for
3484 * set1_host like verification */
3485 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER0x01)) {
3486 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3487# ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS0x4
3488 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS0x4);
3489# endif
3490 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3491 log_err("X509_VERIFY_PARAM_set1_host failed");
3492 return 0;
3493 }
3494 }
3495#else
3496 (void)host;
3497#endif /* HAVE_SSL_SET1_HOST */
3498 return 1;
3499}
3500
3501struct comm_point*
3502outnet_comm_point_for_tcp(struct outside_network* outnet,
3503 comm_point_callback_type* cb, void* cb_arg,
3504 struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3505 sldns_buffer* query, int timeout, int ssl, char* host)
3506{
3507 struct comm_point* cp;
3508 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3509 if(fd == -1) {
3510 return 0;
3511 }
3512 fd_set_nonblock(fd);
3513 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3514 /* outnet_tcp_connect has closed fd on error for us */
3515 return 0;
3516 }
3517 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3518 if(!cp) {
3519 log_err("malloc failure");
3520 close(fd);
3521 return 0;
3522 }
3523 cp->repinfo.addrlen = to_addrlen;
3524 memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3525
3526 /* setup for SSL (if needed) */
3527 if(ssl) {
3528 if(!setup_comm_ssl(cp, outnet, fd, host)) {
3529 log_err("cannot setup XoT");
3530 comm_point_delete(cp);
3531 return NULL((void*)0);
3532 }
3533 }
3534
3535 /* set timeout on TCP connection */
3536 comm_point_start_listening(cp, fd, timeout);
3537 /* copy scratch buffer to cp->buffer */
3538 sldns_buffer_copy(cp->buffer, query);
3539 return cp;
3540}
3541
3542/** setup the User-Agent HTTP header based on http-user-agent configuration */
3543static void
3544setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3545{
3546 if(cfg->hide_http_user_agent) return;
3547 if(cfg->http_user_agent==NULL((void*)0) || cfg->http_user_agent[0] == 0) {
3548 sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME"unbound",
3549 PACKAGE_VERSION"1.13.2");
3550 } else {
3551 sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3552 }
3553}
3554
3555/** setup http request headers in buffer for sending query to destination */
3556static int
3557setup_http_request(sldns_buffer* buf, char* host, char* path,
3558 struct config_file* cfg)
3559{
3560 sldns_buffer_clear(buf);
3561 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3562 sldns_buffer_printf(buf, "Host: %s\r\n", host);
3563 setup_http_user_agent(buf, cfg);
3564 /* We do not really do multiple queries per connection,
3565 * but this header setting is also not needed.
3566 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3567 sldns_buffer_printf(buf, "\r\n");
3568 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3569 return 0; /* somehow buffer too short, but it is about 60K
3570 and the request is only a couple bytes long. */
3571 sldns_buffer_flip(buf);
3572 return 1;
3573}
3574
3575struct comm_point*
3576outnet_comm_point_for_http(struct outside_network* outnet,
3577 comm_point_callback_type* cb, void* cb_arg,
3578 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3579 int ssl, char* host, char* path, struct config_file* cfg)
3580{
3581 /* cp calls cb with err=NETEVENT_DONE when transfer is done */
3582 struct comm_point* cp;
3583 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3584 if(fd == -1) {
3585 return 0;
3586 }
3587 fd_set_nonblock(fd);
3588 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3589 /* outnet_tcp_connect has closed fd on error for us */
3590 return 0;
3591 }
3592 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3593 outnet->udp_buff);
3594 if(!cp) {
3595 log_err("malloc failure");
3596 close(fd);
3597 return 0;
3598 }
3599 cp->repinfo.addrlen = to_addrlen;
3600 memcpy(&cp->repinfo.addr, to_addr, to_addrlen);
3601
3602 /* setup for SSL (if needed) */
3603 if(ssl) {
3604 if(!setup_comm_ssl(cp, outnet, fd, host)) {
3605 log_err("cannot setup https");
3606 comm_point_delete(cp);
3607 return NULL((void*)0);
3608 }
3609 }
3610
3611 /* set timeout on TCP connection */
3612 comm_point_start_listening(cp, fd, timeout);
3613
3614 /* setup http request in cp->buffer */
3615 if(!setup_http_request(cp->buffer, host, path, cfg)) {
3616 log_err("error setting up http request");
3617 comm_point_delete(cp);
3618 return NULL((void*)0);
3619 }
3620 return cp;
3621}
3622
3623/** get memory used by waiting tcp entry (in use or not) */
3624static size_t
3625waiting_tcp_get_mem(struct waiting_tcp* w)
3626{
3627 size_t s;
3628 if(!w) return 0;
3629 s = sizeof(*w) + w->pkt_len;
3630 if(w->timer)
3631 s += comm_timer_get_mem(w->timer);
3632 return s;
3633}
3634
3635/** get memory used by port if */
3636static size_t
3637if_get_mem(struct port_if* pif)
3638{
3639 size_t s;
3640 int i;
3641 s = sizeof(*pif) +
3642#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
3643 sizeof(int)*pif->avail_total +
3644#endif
3645 sizeof(struct port_comm*)*pif->maxout;
3646 for(i=0; i<pif->inuse; i++)
3647 s += sizeof(*pif->out[i]) +
3648 comm_point_get_mem(pif->out[i]->cp);
3649 return s;
3650}
3651
3652/** get memory used by waiting udp */
3653static size_t
3654waiting_udp_get_mem(struct pending* w)
3655{
3656 size_t s;
3657 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3658 return s;
3659}
3660
3661size_t outnet_get_mem(struct outside_network* outnet)
3662{
3663 size_t i;
3664 int k;
3665 struct waiting_tcp* w;
3666 struct pending* u;
3667 struct serviced_query* sq;
3668 struct service_callback* sb;
3669 struct port_comm* pc;
3670 size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3671 sizeof(*outnet->udp_buff) +
3672 sldns_buffer_capacity(outnet->udp_buff);
3673 /* second buffer is not ours */
3674 for(pc = outnet->unused_fds; pc; pc = pc->next) {
3675 s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3676 }
3677 for(k=0; k<outnet->num_ip4; k++)
3678 s += if_get_mem(&outnet->ip4_ifs[k]);
3679 for(k=0; k<outnet->num_ip6; k++)
3680 s += if_get_mem(&outnet->ip6_ifs[k]);
3681 for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3682 s += waiting_udp_get_mem(u);
3683
3684 s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3685 for(i=0; i<outnet->num_tcp; i++) {
3686 s += sizeof(struct pending_tcp);
3687 s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3688 if(outnet->tcp_conns[i]->query)
3689 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3690 }
3691 for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3692 s += waiting_tcp_get_mem(w);
3693 s += sizeof(*outnet->pending);
3694 s += (sizeof(struct pending) + comm_timer_get_mem(NULL((void*)0))) *
3695 outnet->pending->count;
3696 s += sizeof(*outnet->serviced);
3697 s += outnet->svcd_overhead;
3698 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced)for(sq=(struct serviced_query*)rbtree_first(outnet->serviced
); (rbnode_type*)sq != &rbtree_null_node; sq = (struct serviced_query
*)rbtree_next((rbnode_type*)sq))
{
3699 s += sizeof(*sq) + sq->qbuflen;
3700 for(sb = sq->cblist; sb; sb = sb->next)
3701 s += sizeof(*sb);
3702 }
3703 return s;
3704}
3705
3706size_t
3707serviced_get_mem(struct serviced_query* sq)
3708{
3709 struct service_callback* sb;
3710 size_t s;
3711 s = sizeof(*sq) + sq->qbuflen;
3712 for(sb = sq->cblist; sb; sb = sb->next)
3713 s += sizeof(*sb);
3714 if(sq->status == serviced_query_UDP_EDNS ||
3715 sq->status == serviced_query_UDP ||
3716 sq->status == serviced_query_UDP_EDNS_FRAG ||
3717 sq->status == serviced_query_UDP_EDNS_fallback) {
3718 s += sizeof(struct pending);
3719 s += comm_timer_get_mem(NULL((void*)0));
3720 } else {
3721 /* does not have size of the pkt pointer */
3722 /* always has a timer except on malloc failures */
3723
3724 /* these sizes are part of the main outside network mem */
3725 /*
3726 s += sizeof(struct waiting_tcp);
3727 s += comm_timer_get_mem(NULL);
3728 */
3729 }
3730 return s;
3731}
3732