Bug Summary

File:src/sbin/unwind/libunbound/services/outside_network.c
Warning:line 2129, column 3
Value stored to 'my_port' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name outside_network.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sbin/unwind/obj -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sbin/unwind -I /usr/src/sbin/unwind -I /usr/src/sbin/unwind/libunbound/libunbound -I /usr/src/sbin/unwind/libunbound -internal-isystem /usr/local/llvm16/lib/clang/16/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/sbin/unwind/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fno-jump-tables -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/scan/2024-01-11-140451-98009-1 -x c /usr/src/sbin/unwind/libunbound/services/outside_network.c
1/*
2 * services/outside_network.c - implement sending of queries and wait answer.
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file has functions to send queries to authoritative servers and
40 * wait for the pending answer events.
41 */
42#include "config.h"
43#include <ctype.h>
44#ifdef HAVE_SYS_TYPES_H1
45# include <sys/types.h>
46#endif
47#include <sys/time.h>
48#include "services/outside_network.h"
49#include "services/listen_dnsport.h"
50#include "services/cache/infra.h"
51#include "iterator/iterator.h"
52#include "util/data/msgparse.h"
53#include "util/data/msgreply.h"
54#include "util/data/msgencode.h"
55#include "util/data/dname.h"
56#include "util/netevent.h"
57#include "util/log.h"
58#include "util/net_help.h"
59#include "util/random.h"
60#include "util/fptr_wlist.h"
61#include "util/edns.h"
62#include "sldns/sbuffer.h"
63#include "dnstap/dnstap.h"
64#ifdef HAVE_OPENSSL_SSL_H1
65#include <openssl/ssl.h>
66#endif
67#ifdef HAVE_X509_VERIFY_PARAM_SET1_HOST1
68#include <openssl/x509v3.h>
69#endif
70
71#ifdef HAVE_NETDB_H1
72#include <netdb.h>
73#endif
74#include <fcntl.h>
75
76/** number of times to retry making a random ID that is unique. */
77#define MAX_ID_RETRY1000 1000
78/** number of times to retry finding interface, port that can be opened. */
79#define MAX_PORT_RETRY10000 10000
80/** number of retries on outgoing UDP queries */
81#define OUTBOUND_UDP_RETRY1 1
82
83/** initiate TCP transaction for serviced query */
84static void serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff);
85/** with a fd available, randomize and send UDP */
86static int randomize_and_send_udp(struct pending* pend, sldns_buffer* packet,
87 int timeout);
88
89/** select a DNS ID for a TCP stream */
90static uint16_t tcp_select_id(struct outside_network* outnet,
91 struct reuse_tcp* reuse);
92
93/** Perform serviced query UDP sending operation */
94static int serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff);
95
96/** Send serviced query over TCP return false on initial failure */
97static int serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff);
98
99/** call the callbacks for a serviced query */
100static void serviced_callbacks(struct serviced_query* sq, int error,
101 struct comm_point* c, struct comm_reply* rep);
102
103int
104pending_cmp(const void* key1, const void* key2)
105{
106 struct pending *p1 = (struct pending*)key1;
107 struct pending *p2 = (struct pending*)key2;
108 if(p1->id < p2->id)
109 return -1;
110 if(p1->id > p2->id)
111 return 1;
112 log_assert(p1->id == p2->id);
113 return sockaddr_cmp(&p1->addr, p1->addrlen, &p2->addr, p2->addrlen);
114}
115
116int
117serviced_cmp(const void* key1, const void* key2)
118{
119 struct serviced_query* q1 = (struct serviced_query*)key1;
120 struct serviced_query* q2 = (struct serviced_query*)key2;
121 int r;
122 if(q1->qbuflen < q2->qbuflen)
123 return -1;
124 if(q1->qbuflen > q2->qbuflen)
125 return 1;
126 log_assert(q1->qbuflen == q2->qbuflen);
127 log_assert(q1->qbuflen >= 15 /* 10 header, root, type, class */);
128 /* alternate casing of qname is still the same query */
129 if((r = memcmp(q1->qbuf, q2->qbuf, 10)) != 0)
130 return r;
131 if((r = memcmp(q1->qbuf+q1->qbuflen-4, q2->qbuf+q2->qbuflen-4, 4)) != 0)
132 return r;
133 if(q1->dnssec != q2->dnssec) {
134 if(q1->dnssec < q2->dnssec)
135 return -1;
136 return 1;
137 }
138 if((r = query_dname_compare(q1->qbuf+10, q2->qbuf+10)) != 0)
139 return r;
140 if((r = edns_opt_list_compare(q1->opt_list, q2->opt_list)) != 0)
141 return r;
142 return sockaddr_cmp(&q1->addr, q1->addrlen, &q2->addr, q2->addrlen);
143}
144
145/** compare if the reuse element has the same address, port and same ssl-is
146 * used-for-it characteristic */
147static int
148reuse_cmp_addrportssl(const void* key1, const void* key2)
149{
150 struct reuse_tcp* r1 = (struct reuse_tcp*)key1;
151 struct reuse_tcp* r2 = (struct reuse_tcp*)key2;
152 int r;
153 /* compare address and port */
154 r = sockaddr_cmp(&r1->addr, r1->addrlen, &r2->addr, r2->addrlen);
155 if(r != 0)
156 return r;
157
158 /* compare if SSL-enabled */
159 if(r1->is_ssl && !r2->is_ssl)
160 return 1;
161 if(!r1->is_ssl && r2->is_ssl)
162 return -1;
163 return 0;
164}
165
166int
167reuse_cmp(const void* key1, const void* key2)
168{
169 int r;
170 r = reuse_cmp_addrportssl(key1, key2);
171 if(r != 0)
172 return r;
173
174 /* compare ptr value */
175 if(key1 < key2) return -1;
176 if(key1 > key2) return 1;
177 return 0;
178}
179
180int reuse_id_cmp(const void* key1, const void* key2)
181{
182 struct waiting_tcp* w1 = (struct waiting_tcp*)key1;
183 struct waiting_tcp* w2 = (struct waiting_tcp*)key2;
184 if(w1->id < w2->id)
185 return -1;
186 if(w1->id > w2->id)
187 return 1;
188 return 0;
189}
190
191/** delete waiting_tcp entry. Does not unlink from waiting list.
192 * @param w: to delete.
193 */
194static void
195waiting_tcp_delete(struct waiting_tcp* w)
196{
197 if(!w) return;
198 if(w->timer)
199 comm_timer_delete(w->timer);
200 free(w);
201}
202
203/**
204 * Pick random outgoing-interface of that family, and bind it.
205 * port set to 0 so OS picks a port number for us.
206 * if it is the ANY address, do not bind.
207 * @param pend: pending tcp structure, for storing the local address choice.
208 * @param w: tcp structure with destination address.
209 * @param s: socket fd.
210 * @return false on error, socket closed.
211 */
212static int
213pick_outgoing_tcp(struct pending_tcp* pend, struct waiting_tcp* w, int s)
214{
215 struct port_if* pi = NULL((void *)0);
216 int num;
217 pend->pi = NULL((void *)0);
218#ifdef INET6
219 if(addr_is_ip6(&w->addr, w->addrlen))
220 num = w->outnet->num_ip6;
221 else
222#endif
223 num = w->outnet->num_ip4;
224 if(num == 0) {
225 log_err("no TCP outgoing interfaces of family");
226 log_addr(VERB_OPS, "for addr", &w->addr, w->addrlen);
227 sock_close(s);
228 return 0;
229 }
230#ifdef INET6
231 if(addr_is_ip6(&w->addr, w->addrlen))
232 pi = &w->outnet->ip6_ifs[ub_random_max(w->outnet->rnd, num)];
233 else
234#endif
235 pi = &w->outnet->ip4_ifs[ub_random_max(w->outnet->rnd, num)];
236 log_assert(pi);
237 pend->pi = pi;
238 if(addr_is_any(&pi->addr, pi->addrlen)) {
239 /* binding to the ANY interface is for listening sockets */
240 return 1;
241 }
242 /* set port to 0 */
243 if(addr_is_ip6(&pi->addr, pi->addrlen))
244 ((struct sockaddr_in6*)&pi->addr)->sin6_port = 0;
245 else ((struct sockaddr_in*)&pi->addr)->sin_port = 0;
246 if(bind(s, (struct sockaddr*)&pi->addr, pi->addrlen) != 0) {
247#ifndef USE_WINSOCK
248#ifdef EADDRNOTAVAIL49
249 if(!(verbosity < 4 && errno(*__errno()) == EADDRNOTAVAIL49))
250#endif
251#else /* USE_WINSOCK */
252 if(!(verbosity < 4 && WSAGetLastError() == WSAEADDRNOTAVAIL))
253#endif
254 log_err("outgoing tcp: bind: %s", sock_strerror(errno(*__errno())));
255 sock_close(s);
256 return 0;
257 }
258 log_addr(VERB_ALGO, "tcp bound to src", &pi->addr, pi->addrlen);
259 return 1;
260}
261
262/** get TCP file descriptor for address, returns -1 on failure,
263 * tcp_mss is 0 or maxseg size to set for TCP packets. */
264int
265outnet_get_tcp_fd(struct sockaddr_storage* addr, socklen_t addrlen, int tcp_mss, int dscp)
266{
267 int s;
268 int af;
269 char* err;
270#if defined(SO_REUSEADDR0x0004) || defined(IP_BIND_ADDRESS_NO_PORT)
271 int on = 1;
272#endif
273#ifdef INET6
274 if(addr_is_ip6(addr, addrlen)){
275 s = socket(PF_INET624, SOCK_STREAM1, IPPROTO_TCP6);
276 af = AF_INET624;
277 } else {
278#else
279 {
280#endif
281 af = AF_INET2;
282 s = socket(PF_INET2, SOCK_STREAM1, IPPROTO_TCP6);
283 }
284 if(s == -1) {
285 log_err_addr("outgoing tcp: socket", sock_strerror(errno(*__errno())),
286 addr, addrlen);
287 return -1;
288 }
289
290#ifdef SO_REUSEADDR0x0004
291 if(setsockopt(s, SOL_SOCKET0xffff, SO_REUSEADDR0x0004, (void*)&on,
292 (socklen_t)sizeof(on)) < 0) {
293 verbose(VERB_ALGO, "outgoing tcp:"
294 " setsockopt(.. SO_REUSEADDR ..) failed");
295 }
296#endif
297
298 err = set_ip_dscp(s, af, dscp);
299 if(err != NULL((void *)0)) {
300 verbose(VERB_ALGO, "outgoing tcp:"
301 "error setting IP DiffServ codepoint on socket");
302 }
303
304 if(tcp_mss > 0) {
305#if defined(IPPROTO_TCP6) && defined(TCP_MAXSEG0x02)
306 if(setsockopt(s, IPPROTO_TCP6, TCP_MAXSEG0x02,
307 (void*)&tcp_mss, (socklen_t)sizeof(tcp_mss)) < 0) {
308 verbose(VERB_ALGO, "outgoing tcp:"
309 " setsockopt(.. TCP_MAXSEG ..) failed");
310 }
311#else
312 verbose(VERB_ALGO, "outgoing tcp:"
313 " setsockopt(TCP_MAXSEG) unsupported");
314#endif /* defined(IPPROTO_TCP) && defined(TCP_MAXSEG) */
315 }
316#ifdef IP_BIND_ADDRESS_NO_PORT
317 if(setsockopt(s, IPPROTO_IP0, IP_BIND_ADDRESS_NO_PORT, (void*)&on,
318 (socklen_t)sizeof(on)) < 0) {
319 verbose(VERB_ALGO, "outgoing tcp:"
320 " setsockopt(.. IP_BIND_ADDRESS_NO_PORT ..) failed");
321 }
322#endif /* IP_BIND_ADDRESS_NO_PORT */
323 return s;
324}
325
326/** connect tcp connection to addr, 0 on failure */
327int
328outnet_tcp_connect(int s, struct sockaddr_storage* addr, socklen_t addrlen)
329{
330 if(connect(s, (struct sockaddr*)addr, addrlen) == -1) {
331#ifndef USE_WINSOCK
332#ifdef EINPROGRESS36
333 if(errno(*__errno()) != EINPROGRESS36) {
334#endif
335 if(tcp_connect_errno_needs_log(
336 (struct sockaddr*)addr, addrlen))
337 log_err_addr("outgoing tcp: connect",
338 strerror(errno(*__errno())), addr, addrlen);
339 close(s);
340 return 0;
341#ifdef EINPROGRESS36
342 }
343#endif
344#else /* USE_WINSOCK */
345 if(WSAGetLastError() != WSAEINPROGRESS &&
346 WSAGetLastError() != WSAEWOULDBLOCK) {
347 closesocket(s);
348 return 0;
349 }
350#endif
351 }
352 return 1;
353}
354
355/** log reuse item addr and ptr with message */
356static void
357log_reuse_tcp(enum verbosity_value v, const char* msg, struct reuse_tcp* reuse)
358{
359 uint16_t port;
360 char addrbuf[128];
361 if(verbosity < v) return;
362 if(!reuse || !reuse->pending || !reuse->pending->c)
363 return;
364 addr_to_str(&reuse->addr, reuse->addrlen, addrbuf, sizeof(addrbuf));
365 port = ntohs(((struct sockaddr_in*)&reuse->addr)->sin_port)(__uint16_t)(__builtin_constant_p(((struct sockaddr_in*)&
reuse->addr)->sin_port) ? (__uint16_t)(((__uint16_t)(((
struct sockaddr_in*)&reuse->addr)->sin_port) & 0xffU
) << 8 | ((__uint16_t)(((struct sockaddr_in*)&reuse
->addr)->sin_port) & 0xff00U) >> 8) : __swap16md
(((struct sockaddr_in*)&reuse->addr)->sin_port))
;
366 verbose(v, "%s %s#%u fd %d", msg, addrbuf, (unsigned)port,
367 reuse->pending->c->fd);
368}
369
370/** pop the first element from the writewait list */
371struct waiting_tcp*
372reuse_write_wait_pop(struct reuse_tcp* reuse)
373{
374 struct waiting_tcp* w = reuse->write_wait_first;
375 if(!w)
376 return NULL((void *)0);
377 log_assert(w->write_wait_queued);
378 log_assert(!w->write_wait_prev);
379 reuse->write_wait_first = w->write_wait_next;
380 if(w->write_wait_next)
381 w->write_wait_next->write_wait_prev = NULL((void *)0);
382 else reuse->write_wait_last = NULL((void *)0);
383 w->write_wait_queued = 0;
384 w->write_wait_next = NULL((void *)0);
385 w->write_wait_prev = NULL((void *)0);
386 return w;
387}
388
389/** remove the element from the writewait list */
390void
391reuse_write_wait_remove(struct reuse_tcp* reuse, struct waiting_tcp* w)
392{
393 log_assert(w);
394 log_assert(w->write_wait_queued);
395 if(!w)
396 return;
397 if(!w->write_wait_queued)
398 return;
399 if(w->write_wait_prev)
400 w->write_wait_prev->write_wait_next = w->write_wait_next;
401 else reuse->write_wait_first = w->write_wait_next;
402 log_assert(!w->write_wait_prev ||
403 w->write_wait_prev->write_wait_next != w->write_wait_prev);
404 if(w->write_wait_next)
405 w->write_wait_next->write_wait_prev = w->write_wait_prev;
406 else reuse->write_wait_last = w->write_wait_prev;
407 log_assert(!w->write_wait_next
408 || w->write_wait_next->write_wait_prev != w->write_wait_next);
409 w->write_wait_queued = 0;
410 w->write_wait_next = NULL((void *)0);
411 w->write_wait_prev = NULL((void *)0);
412}
413
414/** push the element after the last on the writewait list */
415void
416reuse_write_wait_push_back(struct reuse_tcp* reuse, struct waiting_tcp* w)
417{
418 if(!w) return;
419 log_assert(!w->write_wait_queued);
420 if(reuse->write_wait_last) {
421 reuse->write_wait_last->write_wait_next = w;
422 log_assert(reuse->write_wait_last->write_wait_next !=
423 reuse->write_wait_last);
424 w->write_wait_prev = reuse->write_wait_last;
425 } else {
426 reuse->write_wait_first = w;
427 w->write_wait_prev = NULL((void *)0);
428 }
429 w->write_wait_next = NULL((void *)0);
430 reuse->write_wait_last = w;
431 w->write_wait_queued = 1;
432}
433
434/** insert element in tree by id */
435void
436reuse_tree_by_id_insert(struct reuse_tcp* reuse, struct waiting_tcp* w)
437{
438#ifdef UNBOUND_DEBUG
439 rbnode_type* added;
440#endif
441 log_assert(w->id_node.key == NULL);
442 w->id_node.key = w;
443#ifdef UNBOUND_DEBUG
444 added =
445#else
446 (void)
447#endif
448 rbtree_insert(&reuse->tree_by_id, &w->id_node);
449 log_assert(added); /* should have been added */
450}
451
452/** find element in tree by id */
453struct waiting_tcp*
454reuse_tcp_by_id_find(struct reuse_tcp* reuse, uint16_t id)
455{
456 struct waiting_tcp key_w;
457 rbnode_type* n;
458 memset(&key_w, 0, sizeof(key_w));
459 key_w.id_node.key = &key_w;
460 key_w.id = id;
461 n = rbtree_search(&reuse->tree_by_id, &key_w);
462 if(!n) return NULL((void *)0);
463 return (struct waiting_tcp*)n->key;
464}
465
466/** return ID value of rbnode in tree_by_id */
467static uint16_t
468tree_by_id_get_id(rbnode_type* node)
469{
470 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
471 return w->id;
472}
473
474/** insert into reuse tcp tree and LRU, false on failure (duplicate) */
475int
476reuse_tcp_insert(struct outside_network* outnet, struct pending_tcp* pend_tcp)
477{
478 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_insert", &pend_tcp->reuse);
479 if(pend_tcp->reuse.item_on_lru_list) {
480 if(!pend_tcp->reuse.node.key)
481 log_err("internal error: reuse_tcp_insert: "
482 "in lru list without key");
483 return 1;
484 }
485 pend_tcp->reuse.node.key = &pend_tcp->reuse;
486 pend_tcp->reuse.pending = pend_tcp;
487 if(!rbtree_insert(&outnet->tcp_reuse, &pend_tcp->reuse.node)) {
488 /* We are not in the LRU list but we are already in the
489 * tcp_reuse tree, strange.
490 * Continue to add ourselves to the LRU list. */
491 log_err("internal error: reuse_tcp_insert: in lru list but "
492 "not in the tree");
493 }
494 /* insert into LRU, first is newest */
495 pend_tcp->reuse.lru_prev = NULL((void *)0);
496 if(outnet->tcp_reuse_first) {
497 pend_tcp->reuse.lru_next = outnet->tcp_reuse_first;
498 log_assert(pend_tcp->reuse.lru_next != &pend_tcp->reuse);
499 outnet->tcp_reuse_first->lru_prev = &pend_tcp->reuse;
500 log_assert(outnet->tcp_reuse_first->lru_prev !=
501 outnet->tcp_reuse_first);
502 } else {
503 pend_tcp->reuse.lru_next = NULL((void *)0);
504 outnet->tcp_reuse_last = &pend_tcp->reuse;
505 }
506 outnet->tcp_reuse_first = &pend_tcp->reuse;
507 pend_tcp->reuse.item_on_lru_list = 1;
508 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
509 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
510 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
511 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
512 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
513 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
514 return 1;
515}
516
517/** find reuse tcp stream to destination for query, or NULL if none */
518static struct reuse_tcp*
519reuse_tcp_find(struct outside_network* outnet, struct sockaddr_storage* addr,
520 socklen_t addrlen, int use_ssl)
521{
522 struct waiting_tcp key_w;
523 struct pending_tcp key_p;
524 struct comm_point c;
525 rbnode_type* result = NULL((void *)0), *prev;
526 verbose(VERB_CLIENT, "reuse_tcp_find");
527 memset(&key_w, 0, sizeof(key_w));
528 memset(&key_p, 0, sizeof(key_p));
529 memset(&c, 0, sizeof(c));
530 key_p.query = &key_w;
531 key_p.c = &c;
532 key_p.reuse.pending = &key_p;
533 key_p.reuse.node.key = &key_p.reuse;
534 if(use_ssl)
535 key_p.reuse.is_ssl = 1;
536 if(addrlen > (socklen_t)sizeof(key_p.reuse.addr))
537 return NULL((void *)0);
538 memmove(&key_p.reuse.addr, addr, addrlen);
539 key_p.reuse.addrlen = addrlen;
540
541 verbose(VERB_CLIENT, "reuse_tcp_find: num reuse streams %u",
542 (unsigned)outnet->tcp_reuse.count);
543 if(outnet->tcp_reuse.root == NULL((void *)0) ||
544 outnet->tcp_reuse.root == RBTREE_NULL&rbtree_null_node)
545 return NULL((void *)0);
546 if(rbtree_find_less_equal(&outnet->tcp_reuse, &key_p.reuse,
547 &result)) {
548 /* exact match */
549 /* but the key is on stack, and ptr is compared, impossible */
550 log_assert(&key_p.reuse != (struct reuse_tcp*)result);
551 log_assert(&key_p != ((struct reuse_tcp*)result)->pending);
552 }
553 /* not found, return null */
554
555 /* It is possible that we search for something before the first element
556 * in the tree. Replace a null pointer with the first element.
557 */
558 if (!result) {
559 verbose(VERB_CLIENT, "reuse_tcp_find: taking first");
560 result = rbtree_first(&outnet->tcp_reuse);
561 }
562
563 if(!result || result == RBTREE_NULL&rbtree_null_node)
564 return NULL((void *)0);
565
566 /* It is possible that we got the previous address, but that the
567 * address we are looking for is in the tree. If the address we got
568 * is less than the address we are looking, then take the next entry.
569 */
570 if (reuse_cmp_addrportssl(result->key, &key_p.reuse) < 0) {
571 verbose(VERB_CLIENT, "reuse_tcp_find: key too low");
572 result = rbtree_next(result);
573 }
574
575 verbose(VERB_CLIENT, "reuse_tcp_find check inexact match");
576 /* inexact match, find one of possibly several connections to the
577 * same destination address, with the correct port, ssl, and
578 * also less than max number of open queries, or else, fail to open
579 * a new one */
580 /* rewind to start of sequence of same address,port,ssl */
581 prev = rbtree_previous(result);
582 while(prev && prev != RBTREE_NULL&rbtree_null_node &&
583 reuse_cmp_addrportssl(prev->key, &key_p.reuse) == 0) {
584 result = prev;
585 prev = rbtree_previous(result);
586 }
587
588 /* loop to find first one that has correct characteristics */
589 while(result && result != RBTREE_NULL&rbtree_null_node &&
590 reuse_cmp_addrportssl(result->key, &key_p.reuse) == 0) {
591 if(((struct reuse_tcp*)result)->tree_by_id.count <
592 outnet->max_reuse_tcp_queries) {
593 /* same address, port, ssl-yes-or-no, and has
594 * space for another query */
595 return (struct reuse_tcp*)result;
596 }
597 result = rbtree_next(result);
598 }
599 return NULL((void *)0);
600}
601
602/** use the buffer to setup writing the query */
603static void
604outnet_tcp_take_query_setup(int s, struct pending_tcp* pend,
605 struct waiting_tcp* w)
606{
607 struct timeval tv;
608 verbose(VERB_CLIENT, "outnet_tcp_take_query_setup: setup packet to write "
609 "len %d timeout %d msec",
610 (int)w->pkt_len, w->timeout);
611 pend->c->tcp_write_pkt = w->pkt;
612 pend->c->tcp_write_pkt_len = w->pkt_len;
613 pend->c->tcp_write_and_read = 1;
614 pend->c->tcp_write_byte_count = 0;
615 pend->c->tcp_is_reading = 0;
616 comm_point_start_listening(pend->c, s, -1);
617 /* set timer on the waiting_tcp entry, this is the write timeout
618 * for the written packet. The timer on pend->c is the timer
619 * for when there is no written packet and we have readtimeouts */
620#ifndef S_SPLINT_S
621 tv.tv_sec = w->timeout/1000;
622 tv.tv_usec = (w->timeout%1000)*1000;
623#endif
624 /* if the waiting_tcp was previously waiting for a buffer in the
625 * outside_network.tcpwaitlist, then the timer is reset now that
626 * we start writing it */
627 comm_timer_set(w->timer, &tv);
628}
629
630/** use next free buffer to service a tcp query */
631static int
632outnet_tcp_take_into_use(struct waiting_tcp* w)
633{
634 struct pending_tcp* pend = w->outnet->tcp_free;
635 int s;
636 log_assert(pend);
637 log_assert(w->pkt);
638 log_assert(w->pkt_len > 0);
639 log_assert(w->addrlen > 0);
640 pend->c->tcp_do_toggle_rw = 0;
641 pend->c->tcp_do_close = 0;
642
643 /* Consistency check, if we have ssl_upstream but no sslctx, then
644 * log an error and return failure.
645 */
646 if (w->ssl_upstream && !w->outnet->sslctx) {
647 log_err("SSL upstream requested but no SSL context");
648 return 0;
649 }
650
651 /* open socket */
652 s = outnet_get_tcp_fd(&w->addr, w->addrlen, w->outnet->tcp_mss, w->outnet->ip_dscp);
653
654 if(s == -1)
655 return 0;
656
657 if(!pick_outgoing_tcp(pend, w, s))
658 return 0;
659
660 fd_set_nonblock(s);
661#ifdef USE_OSX_MSG_FASTOPEN
662 /* API for fast open is different here. We use a connectx() function and
663 then writes can happen as normal even using SSL.*/
664 /* connectx requires that the len be set in the sockaddr struct*/
665 struct sockaddr_in *addr_in = (struct sockaddr_in *)&w->addr;
666 addr_in->sin_len = w->addrlen;
667 sa_endpoints_t endpoints;
668 endpoints.sae_srcif = 0;
669 endpoints.sae_srcaddr = NULL((void *)0);
670 endpoints.sae_srcaddrlen = 0;
671 endpoints.sae_dstaddr = (struct sockaddr *)&w->addr;
672 endpoints.sae_dstaddrlen = w->addrlen;
673 if (connectx(s, &endpoints, SAE_ASSOCID_ANY,
674 CONNECT_DATA_IDEMPOTENT | CONNECT_RESUME_ON_READ_WRITE,
675 NULL((void *)0), 0, NULL((void *)0), NULL((void *)0)) == -1) {
676 /* if fails, failover to connect for OSX 10.10 */
677#ifdef EINPROGRESS36
678 if(errno(*__errno()) != EINPROGRESS36) {
679#else
680 if(1) {
681#endif
682 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
683#else /* USE_OSX_MSG_FASTOPEN*/
684#ifdef USE_MSG_FASTOPEN
685 pend->c->tcp_do_fastopen = 1;
686 /* Only do TFO for TCP in which case no connect() is required here.
687 Don't combine client TFO with SSL, since OpenSSL can't
688 currently support doing a handshake on fd that already isn't connected*/
689 if (w->outnet->sslctx && w->ssl_upstream) {
690 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
691#else /* USE_MSG_FASTOPEN*/
692 if(connect(s, (struct sockaddr*)&w->addr, w->addrlen) == -1) {
693#endif /* USE_MSG_FASTOPEN*/
694#endif /* USE_OSX_MSG_FASTOPEN*/
695#ifndef USE_WINSOCK
696#ifdef EINPROGRESS36
697 if(errno(*__errno()) != EINPROGRESS36) {
698#else
699 if(1) {
700#endif
701 if(tcp_connect_errno_needs_log(
702 (struct sockaddr*)&w->addr, w->addrlen))
703 log_err_addr("outgoing tcp: connect",
704 strerror(errno(*__errno())), &w->addr, w->addrlen);
705 close(s);
706#else /* USE_WINSOCK */
707 if(WSAGetLastError() != WSAEINPROGRESS &&
708 WSAGetLastError() != WSAEWOULDBLOCK) {
709 closesocket(s);
710#endif
711 return 0;
712 }
713 }
714#ifdef USE_MSG_FASTOPEN
715 }
716#endif /* USE_MSG_FASTOPEN */
717#ifdef USE_OSX_MSG_FASTOPEN
718 }
719 }
720#endif /* USE_OSX_MSG_FASTOPEN */
721 if(w->outnet->sslctx && w->ssl_upstream) {
722 pend->c->ssl = outgoing_ssl_fd(w->outnet->sslctx, s);
723 if(!pend->c->ssl) {
724 pend->c->fd = s;
725 comm_point_close(pend->c);
726 return 0;
727 }
728 verbose(VERB_ALGO, "the query is using TLS encryption, for %s",
729 (w->tls_auth_name?w->tls_auth_name:"an unauthenticated connection"));
730#ifdef USE_WINSOCK
731 comm_point_tcp_win_bio_cb(pend->c, pend->c->ssl);
732#endif
733 pend->c->ssl_shake_state = comm_ssl_shake_write;
734 if(!set_auth_name_on_ssl(pend->c->ssl, w->tls_auth_name,
735 w->outnet->tls_use_sni)) {
736 pend->c->fd = s;
737#ifdef HAVE_SSL
738 SSL_free(pend->c->ssl);
739#endif
740 pend->c->ssl = NULL((void *)0);
741 comm_point_close(pend->c);
742 return 0;
743 }
744 }
745 w->next_waiting = (void*)pend;
746 w->outnet->num_tcp_outgoing++;
747 w->outnet->tcp_free = pend->next_free;
748 pend->next_free = NULL((void *)0);
749 pend->query = w;
750 pend->reuse.outnet = w->outnet;
751 pend->c->repinfo.remote_addrlen = w->addrlen;
752 pend->c->tcp_more_read_again = &pend->reuse.cp_more_read_again;
753 pend->c->tcp_more_write_again = &pend->reuse.cp_more_write_again;
754 pend->reuse.cp_more_read_again = 0;
755 pend->reuse.cp_more_write_again = 0;
756 memcpy(&pend->c->repinfo.remote_addr, &w->addr, w->addrlen);
757 pend->reuse.pending = pend;
758
759 /* Remove from tree in case the is_ssl will be different and causes the
760 * identity of the reuse_tcp to change; could result in nodes not being
761 * deleted from the tree (because the new identity does not match the
762 * previous node) but their ->key would be changed to NULL. */
763 if(pend->reuse.node.key)
764 reuse_tcp_remove_tree_list(w->outnet, &pend->reuse);
765
766 if(pend->c->ssl)
767 pend->reuse.is_ssl = 1;
768 else pend->reuse.is_ssl = 0;
769 /* insert in reuse by address tree if not already inserted there */
770 (void)reuse_tcp_insert(w->outnet, pend);
771 reuse_tree_by_id_insert(&pend->reuse, w);
772 outnet_tcp_take_query_setup(s, pend, w);
773 return 1;
774}
775
776/** Touch the lru of a reuse_tcp element, it is in use.
777 * This moves it to the front of the list, where it is not likely to
778 * be closed. Items at the back of the list are closed to make space. */
779void
780reuse_tcp_lru_touch(struct outside_network* outnet, struct reuse_tcp* reuse)
781{
782 if(!reuse->item_on_lru_list) {
783 log_err("internal error: we need to touch the lru_list but item not in list");
784 return; /* not on the list, no lru to modify */
785 }
786 log_assert(reuse->lru_prev ||
787 (!reuse->lru_prev && outnet->tcp_reuse_first == reuse));
788 if(!reuse->lru_prev)
789 return; /* already first in the list */
790 /* remove at current position */
791 /* since it is not first, there is a previous element */
792 reuse->lru_prev->lru_next = reuse->lru_next;
793 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
794 if(reuse->lru_next)
795 reuse->lru_next->lru_prev = reuse->lru_prev;
796 else outnet->tcp_reuse_last = reuse->lru_prev;
797 log_assert(!reuse->lru_next || reuse->lru_next->lru_prev != reuse->lru_next);
798 log_assert(outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_next &&
799 outnet->tcp_reuse_last != outnet->tcp_reuse_last->lru_prev);
800 /* insert at the front */
801 reuse->lru_prev = NULL((void *)0);
802 reuse->lru_next = outnet->tcp_reuse_first;
803 if(outnet->tcp_reuse_first) {
804 outnet->tcp_reuse_first->lru_prev = reuse;
805 }
806 log_assert(reuse->lru_next != reuse);
807 /* since it is not first, it is not the only element and
808 * lru_next is thus not NULL and thus reuse is now not the last in
809 * the list, so outnet->tcp_reuse_last does not need to be modified */
810 outnet->tcp_reuse_first = reuse;
811 log_assert(outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_next &&
812 outnet->tcp_reuse_first != outnet->tcp_reuse_first->lru_prev);
813 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
814 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
815}
816
817/** Snip the last reuse_tcp element off of the LRU list */
818struct reuse_tcp*
819reuse_tcp_lru_snip(struct outside_network* outnet)
820{
821 struct reuse_tcp* reuse = outnet->tcp_reuse_last;
822 if(!reuse) return NULL((void *)0);
823 /* snip off of LRU */
824 log_assert(reuse->lru_next == NULL);
825 if(reuse->lru_prev) {
826 outnet->tcp_reuse_last = reuse->lru_prev;
827 reuse->lru_prev->lru_next = NULL((void *)0);
828 } else {
829 outnet->tcp_reuse_last = NULL((void *)0);
830 outnet->tcp_reuse_first = NULL((void *)0);
831 }
832 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
833 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
834 reuse->item_on_lru_list = 0;
835 reuse->lru_next = NULL((void *)0);
836 reuse->lru_prev = NULL((void *)0);
837 return reuse;
838}
839
840/** remove waiting tcp from the outnet waiting list */
841void
842outnet_waiting_tcp_list_remove(struct outside_network* outnet, struct waiting_tcp* w)
843{
844 struct waiting_tcp* p = outnet->tcp_wait_first, *prev = NULL((void *)0);
845 w->on_tcp_waiting_list = 0;
846 while(p) {
847 if(p == w) {
848 /* remove w */
849 if(prev)
850 prev->next_waiting = w->next_waiting;
851 else outnet->tcp_wait_first = w->next_waiting;
852 if(outnet->tcp_wait_last == w)
853 outnet->tcp_wait_last = prev;
854 w->next_waiting = NULL((void *)0);
855 return;
856 }
857 prev = p;
858 p = p->next_waiting;
859 }
860 /* outnet_waiting_tcp_list_remove is currently called only with items
861 * that are already in the waiting list. */
862 log_assert(0);
863}
864
865/** pop the first waiting tcp from the outnet waiting list */
866struct waiting_tcp*
867outnet_waiting_tcp_list_pop(struct outside_network* outnet)
868{
869 struct waiting_tcp* w = outnet->tcp_wait_first;
870 if(!outnet->tcp_wait_first) return NULL((void *)0);
871 log_assert(w->on_tcp_waiting_list);
872 outnet->tcp_wait_first = w->next_waiting;
873 if(outnet->tcp_wait_last == w)
874 outnet->tcp_wait_last = NULL((void *)0);
875 w->on_tcp_waiting_list = 0;
876 w->next_waiting = NULL((void *)0);
877 return w;
878}
879
880/** add waiting_tcp element to the outnet tcp waiting list */
881void
882outnet_waiting_tcp_list_add(struct outside_network* outnet,
883 struct waiting_tcp* w, int set_timer)
884{
885 struct timeval tv;
886 log_assert(!w->on_tcp_waiting_list);
887 if(w->on_tcp_waiting_list)
888 return;
889 w->next_waiting = NULL((void *)0);
890 if(outnet->tcp_wait_last)
891 outnet->tcp_wait_last->next_waiting = w;
892 else outnet->tcp_wait_first = w;
893 outnet->tcp_wait_last = w;
894 w->on_tcp_waiting_list = 1;
895 if(set_timer) {
896#ifndef S_SPLINT_S
897 tv.tv_sec = w->timeout/1000;
898 tv.tv_usec = (w->timeout%1000)*1000;
899#endif
900 comm_timer_set(w->timer, &tv);
901 }
902}
903
904/** add waiting_tcp element as first to the outnet tcp waiting list */
905void
906outnet_waiting_tcp_list_add_first(struct outside_network* outnet,
907 struct waiting_tcp* w, int reset_timer)
908{
909 struct timeval tv;
910 log_assert(!w->on_tcp_waiting_list);
911 if(w->on_tcp_waiting_list)
912 return;
913 w->next_waiting = outnet->tcp_wait_first;
914 log_assert(w->next_waiting != w);
915 if(!outnet->tcp_wait_last)
916 outnet->tcp_wait_last = w;
917 outnet->tcp_wait_first = w;
918 w->on_tcp_waiting_list = 1;
919 if(reset_timer) {
920#ifndef S_SPLINT_S
921 tv.tv_sec = w->timeout/1000;
922 tv.tv_usec = (w->timeout%1000)*1000;
923#endif
924 comm_timer_set(w->timer, &tv);
925 }
926 log_assert(
927 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
928 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
929}
930
931/** call callback on waiting_tcp, if not NULL */
932static void
933waiting_tcp_callback(struct waiting_tcp* w, struct comm_point* c, int error,
934 struct comm_reply* reply_info)
935{
936 if(w && w->cb) {
937 fptr_ok(fptr_whitelist_pending_tcp(w->cb));
938 (void)(*w->cb)(c, w->cb_arg, error, reply_info);
939 }
940}
941
942/** see if buffers can be used to service TCP queries */
943static void
944use_free_buffer(struct outside_network* outnet)
945{
946 struct waiting_tcp* w;
947 while(outnet->tcp_wait_first && !outnet->want_to_quit) {
948#ifdef USE_DNSTAP
949 struct pending_tcp* pend_tcp = NULL((void *)0);
950#endif
951 struct reuse_tcp* reuse = NULL((void *)0);
952 w = outnet_waiting_tcp_list_pop(outnet);
953 log_assert(
954 (!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
955 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
956 reuse = reuse_tcp_find(outnet, &w->addr, w->addrlen,
957 w->ssl_upstream);
958 /* re-select an ID when moving to a new TCP buffer */
959 w->id = tcp_select_id(outnet, reuse);
960 LDNS_ID_SET(w->pkt, w->id)(sldns_write_uint16(w->pkt, w->id));
961 if(reuse) {
962 log_reuse_tcp(VERB_CLIENT, "use free buffer for waiting tcp: "
963 "found reuse", reuse);
964#ifdef USE_DNSTAP
965 pend_tcp = reuse->pending;
966#endif
967 reuse_tcp_lru_touch(outnet, reuse);
968 comm_timer_disable(w->timer);
969 w->next_waiting = (void*)reuse->pending;
970 reuse_tree_by_id_insert(reuse, w);
971 if(reuse->pending->query) {
972 /* on the write wait list */
973 reuse_write_wait_push_back(reuse, w);
974 } else {
975 /* write straight away */
976 /* stop the timer on read of the fd */
977 comm_point_stop_listening(reuse->pending->c);
978 reuse->pending->query = w;
979 outnet_tcp_take_query_setup(
980 reuse->pending->c->fd, reuse->pending,
981 w);
982 }
983 } else if(outnet->tcp_free) {
984 struct pending_tcp* pend = w->outnet->tcp_free;
985 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
986 pend->reuse.pending = pend;
987 memcpy(&pend->reuse.addr, &w->addr, w->addrlen);
988 pend->reuse.addrlen = w->addrlen;
989 if(!outnet_tcp_take_into_use(w)) {
990 waiting_tcp_callback(w, NULL((void *)0), NETEVENT_CLOSED-1,
991 NULL((void *)0));
992 waiting_tcp_delete(w);
993#ifdef USE_DNSTAP
994 w = NULL((void *)0);
995#endif
996 }
997#ifdef USE_DNSTAP
998 pend_tcp = pend;
999#endif
1000 } else {
1001 /* no reuse and no free buffer, put back at the start */
1002 outnet_waiting_tcp_list_add_first(outnet, w, 0);
1003 break;
1004 }
1005#ifdef USE_DNSTAP
1006 if(outnet->dtenv && pend_tcp && w && w->sq &&
1007 (outnet->dtenv->log_resolver_query_messages ||
1008 outnet->dtenv->log_forwarder_query_messages)) {
1009 sldns_buffer tmp;
1010 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
1011 dt_msg_send_outside_query(outnet->dtenv, &w->sq->addr,
1012 &pend_tcp->pi->addr, comm_tcp, w->sq->zone,
1013 w->sq->zonelen, &tmp);
1014 }
1015#endif
1016 }
1017}
1018
1019/** delete element from tree by id */
1020static void
1021reuse_tree_by_id_delete(struct reuse_tcp* reuse, struct waiting_tcp* w)
1022{
1023#ifdef UNBOUND_DEBUG
1024 rbnode_type* rem;
1025#endif
1026 log_assert(w->id_node.key != NULL);
1027#ifdef UNBOUND_DEBUG
1028 rem =
1029#else
1030 (void)
1031#endif
1032 rbtree_delete(&reuse->tree_by_id, w);
1033 log_assert(rem); /* should have been there */
1034 w->id_node.key = NULL((void *)0);
1035}
1036
1037/** move writewait list to go for another connection. */
1038static void
1039reuse_move_writewait_away(struct outside_network* outnet,
1040 struct pending_tcp* pend)
1041{
1042 /* the writewait list has not been written yet, so if the
1043 * stream was closed, they have not actually been failed, only
1044 * the queries written. Other queries can get written to another
1045 * stream. For upstreams that do not support multiple queries
1046 * and answers, the stream can get closed, and then the queries
1047 * can get written on a new socket */
1048 struct waiting_tcp* w;
1049 if(pend->query && pend->query->error_count == 0 &&
1050 pend->c->tcp_write_pkt == pend->query->pkt &&
1051 pend->c->tcp_write_pkt_len == pend->query->pkt_len) {
1052 /* since the current query is not written, it can also
1053 * move to a free buffer */
1054 if(verbosity >= VERB_CLIENT && pend->query->pkt_len > 12+2+2 &&
1055 LDNS_QDCOUNT(pend->query->pkt)(sldns_read_uint16(pend->query->pkt+4)) > 0 &&
1056 dname_valid(pend->query->pkt+12, pend->query->pkt_len-12)) {
1057 char buf[LDNS_MAX_DOMAINLEN255+1];
1058 dname_str(pend->query->pkt+12, buf);
1059 verbose(VERB_CLIENT, "reuse_move_writewait_away current %s %d bytes were written",
1060 buf, (int)pend->c->tcp_write_byte_count);
1061 }
1062 pend->c->tcp_write_pkt = NULL((void *)0);
1063 pend->c->tcp_write_pkt_len = 0;
1064 pend->c->tcp_write_and_read = 0;
1065 pend->reuse.cp_more_read_again = 0;
1066 pend->reuse.cp_more_write_again = 0;
1067 pend->c->tcp_is_reading = 1;
1068 w = pend->query;
1069 pend->query = NULL((void *)0);
1070 /* increase error count, so that if the next socket fails too
1071 * the server selection is run again with this query failed
1072 * and it can select a different server (if possible), or
1073 * fail the query */
1074 w->error_count ++;
1075 reuse_tree_by_id_delete(&pend->reuse, w);
1076 outnet_waiting_tcp_list_add(outnet, w, 1);
1077 }
1078 while((w = reuse_write_wait_pop(&pend->reuse)) != NULL((void *)0)) {
1079 if(verbosity >= VERB_CLIENT && w->pkt_len > 12+2+2 &&
1080 LDNS_QDCOUNT(w->pkt)(sldns_read_uint16(w->pkt+4)) > 0 &&
1081 dname_valid(w->pkt+12, w->pkt_len-12)) {
1082 char buf[LDNS_MAX_DOMAINLEN255+1];
1083 dname_str(w->pkt+12, buf);
1084 verbose(VERB_CLIENT, "reuse_move_writewait_away item %s", buf);
1085 }
1086 reuse_tree_by_id_delete(&pend->reuse, w);
1087 outnet_waiting_tcp_list_add(outnet, w, 1);
1088 }
1089}
1090
1091/** remove reused element from tree and lru list */
1092void
1093reuse_tcp_remove_tree_list(struct outside_network* outnet,
1094 struct reuse_tcp* reuse)
1095{
1096 verbose(VERB_CLIENT, "reuse_tcp_remove_tree_list");
1097 if(reuse->node.key) {
1098 /* delete it from reuse tree */
1099 if(!rbtree_delete(&outnet->tcp_reuse, reuse)) {
1100 /* should not be possible, it should be there */
1101 char buf[256];
1102 addr_to_str(&reuse->addr, reuse->addrlen, buf,
1103 sizeof(buf));
1104 log_err("reuse tcp delete: node not present, internal error, %s ssl %d lru %d", buf, reuse->is_ssl, reuse->item_on_lru_list);
1105 }
1106 reuse->node.key = NULL((void *)0);
1107 /* defend against loops on broken tree by zeroing the
1108 * rbnode structure */
1109 memset(&reuse->node, 0, sizeof(reuse->node));
1110 }
1111 /* delete from reuse list */
1112 if(reuse->item_on_lru_list) {
1113 if(reuse->lru_prev) {
1114 /* assert that members of the lru list are waiting
1115 * and thus have a pending pointer to the struct */
1116 log_assert(reuse->lru_prev->pending);
1117 reuse->lru_prev->lru_next = reuse->lru_next;
1118 log_assert(reuse->lru_prev->lru_next != reuse->lru_prev);
1119 } else {
1120 log_assert(!reuse->lru_next || reuse->lru_next->pending);
1121 outnet->tcp_reuse_first = reuse->lru_next;
1122 log_assert(!outnet->tcp_reuse_first ||
1123 (outnet->tcp_reuse_first !=
1124 outnet->tcp_reuse_first->lru_next &&
1125 outnet->tcp_reuse_first !=
1126 outnet->tcp_reuse_first->lru_prev));
1127 }
1128 if(reuse->lru_next) {
1129 /* assert that members of the lru list are waiting
1130 * and thus have a pending pointer to the struct */
1131 log_assert(reuse->lru_next->pending);
1132 reuse->lru_next->lru_prev = reuse->lru_prev;
1133 log_assert(reuse->lru_next->lru_prev != reuse->lru_next);
1134 } else {
1135 log_assert(!reuse->lru_prev || reuse->lru_prev->pending);
1136 outnet->tcp_reuse_last = reuse->lru_prev;
1137 log_assert(!outnet->tcp_reuse_last ||
1138 (outnet->tcp_reuse_last !=
1139 outnet->tcp_reuse_last->lru_next &&
1140 outnet->tcp_reuse_last !=
1141 outnet->tcp_reuse_last->lru_prev));
1142 }
1143 log_assert((!outnet->tcp_reuse_first && !outnet->tcp_reuse_last) ||
1144 (outnet->tcp_reuse_first && outnet->tcp_reuse_last));
1145 reuse->item_on_lru_list = 0;
1146 reuse->lru_next = NULL((void *)0);
1147 reuse->lru_prev = NULL((void *)0);
1148 }
1149 reuse->pending = NULL((void *)0);
1150}
1151
1152/** helper function that deletes an element from the tree of readwait
1153 * elements in tcp reuse structure */
1154static void reuse_del_readwait_elem(rbnode_type* node, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
1155{
1156 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1157 waiting_tcp_delete(w);
1158}
1159
1160/** delete readwait waiting_tcp elements, deletes the elements in the list */
1161void reuse_del_readwait(rbtree_type* tree_by_id)
1162{
1163 if(tree_by_id->root == NULL((void *)0) ||
1164 tree_by_id->root == RBTREE_NULL&rbtree_null_node)
1165 return;
1166 traverse_postorder(tree_by_id, &reuse_del_readwait_elem, NULL((void *)0));
1167 rbtree_init(tree_by_id, reuse_id_cmp);
1168}
1169
1170/** decommission a tcp buffer, closes commpoint and frees waiting_tcp entry */
1171static void
1172decommission_pending_tcp(struct outside_network* outnet,
1173 struct pending_tcp* pend)
1174{
1175 verbose(VERB_CLIENT, "decommission_pending_tcp");
1176 /* A certain code path can lead here twice for the same pending_tcp
1177 * creating a loop in the free pending_tcp list. */
1178 if(outnet->tcp_free != pend) {
1179 pend->next_free = outnet->tcp_free;
1180 outnet->tcp_free = pend;
1181 }
1182 if(pend->reuse.node.key) {
1183 /* needs unlink from the reuse tree to get deleted */
1184 reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1185 }
1186 /* free SSL structure after remove from outnet tcp reuse tree,
1187 * because the c->ssl null or not is used for sorting in the tree */
1188 if(pend->c->ssl) {
1189#ifdef HAVE_SSL
1190 SSL_shutdown(pend->c->ssl);
1191 SSL_free(pend->c->ssl);
1192 pend->c->ssl = NULL((void *)0);
1193#endif
1194 }
1195 comm_point_close(pend->c);
1196 pend->reuse.cp_more_read_again = 0;
1197 pend->reuse.cp_more_write_again = 0;
1198 /* unlink the query and writewait list, it is part of the tree
1199 * nodes and is deleted */
1200 pend->query = NULL((void *)0);
1201 pend->reuse.write_wait_first = NULL((void *)0);
1202 pend->reuse.write_wait_last = NULL((void *)0);
1203 reuse_del_readwait(&pend->reuse.tree_by_id);
1204}
1205
1206/** perform failure callbacks for waiting queries in reuse read rbtree */
1207static void reuse_cb_readwait_for_failure(rbtree_type* tree_by_id, int err)
1208{
1209 rbnode_type* node;
1210 if(tree_by_id->root == NULL((void *)0) ||
1211 tree_by_id->root == RBTREE_NULL&rbtree_null_node)
1212 return;
1213 node = rbtree_first(tree_by_id);
1214 while(node && node != RBTREE_NULL&rbtree_null_node) {
1215 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1216 waiting_tcp_callback(w, NULL((void *)0), err, NULL((void *)0));
1217 node = rbtree_next(node);
1218 }
1219}
1220
1221/** mark the entry for being in the cb_and_decommission stage */
1222static void mark_for_cb_and_decommission(rbnode_type* node,
1223 void* ATTR_UNUSED(arg)arg __attribute__((unused)))
1224{
1225 struct waiting_tcp* w = (struct waiting_tcp*)node->key;
1226 /* Mark the waiting_tcp to signal later code (serviced_delete) that
1227 * this item is part of the backed up tree_by_id and will be deleted
1228 * later. */
1229 w->in_cb_and_decommission = 1;
1230 /* Mark the serviced_query for deletion so that later code through
1231 * callbacks (iter_clear .. outnet_serviced_query_stop) won't
1232 * prematurely delete it. */
1233 if(w->cb)
1234 ((struct serviced_query*)w->cb_arg)->to_be_deleted = 1;
1235}
1236
1237/** perform callbacks for failure and also decommission pending tcp.
1238 * the callbacks remove references in sq->pending to the waiting_tcp
1239 * members of the tree_by_id in the pending tcp. The pending_tcp is
1240 * removed before the callbacks, so that the callbacks do not modify
1241 * the pending_tcp due to its reference in the outside_network reuse tree */
1242static void reuse_cb_and_decommission(struct outside_network* outnet,
1243 struct pending_tcp* pend, int error)
1244{
1245 rbtree_type store;
1246 store = pend->reuse.tree_by_id;
1247 pend->query = NULL((void *)0);
1248 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
1249 pend->reuse.write_wait_first = NULL((void *)0);
1250 pend->reuse.write_wait_last = NULL((void *)0);
1251 decommission_pending_tcp(outnet, pend);
1252 if(store.root != NULL((void *)0) && store.root != RBTREE_NULL&rbtree_null_node) {
1253 traverse_postorder(&store, &mark_for_cb_and_decommission, NULL((void *)0));
1254 }
1255 reuse_cb_readwait_for_failure(&store, error);
1256 reuse_del_readwait(&store);
1257}
1258
1259/** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1260static void
1261reuse_tcp_setup_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1262{
1263 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_timeout", &pend_tcp->reuse);
1264 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1265}
1266
1267/** set timeout on tcp fd and setup read event to catch incoming dns msgs */
1268static void
1269reuse_tcp_setup_read_and_timeout(struct pending_tcp* pend_tcp, int tcp_reuse_timeout)
1270{
1271 log_reuse_tcp(VERB_CLIENT, "reuse_tcp_setup_readtimeout", &pend_tcp->reuse);
1272 sldns_buffer_clear(pend_tcp->c->buffer);
1273 pend_tcp->c->tcp_is_reading = 1;
1274 pend_tcp->c->tcp_byte_count = 0;
1275 comm_point_stop_listening(pend_tcp->c);
1276 comm_point_start_listening(pend_tcp->c, -1, tcp_reuse_timeout);
1277}
1278
1279int
1280outnet_tcp_cb(struct comm_point* c, void* arg, int error,
1281 struct comm_reply *reply_info)
1282{
1283 struct pending_tcp* pend = (struct pending_tcp*)arg;
1284 struct outside_network* outnet = pend->reuse.outnet;
1285 struct waiting_tcp* w = NULL((void *)0);
1286 log_assert(pend->reuse.item_on_lru_list && pend->reuse.node.key);
1287 verbose(VERB_ALGO, "outnettcp cb");
1288 if(error == NETEVENT_TIMEOUT-2) {
1289 if(pend->c->tcp_write_and_read) {
1290 verbose(VERB_QUERY, "outnettcp got tcp timeout "
1291 "for read, ignored because write underway");
1292 /* if we are writing, ignore readtimer, wait for write timer
1293 * or write is done */
1294 return 0;
1295 } else {
1296 verbose(VERB_QUERY, "outnettcp got tcp timeout %s",
1297 (pend->reuse.tree_by_id.count?"for reading pkt":
1298 "for keepalive for reuse"));
1299 }
1300 /* must be timeout for reading or keepalive reuse,
1301 * close it. */
1302 reuse_tcp_remove_tree_list(outnet, &pend->reuse);
1303 } else if(error == NETEVENT_PKT_WRITTEN-5) {
1304 /* the packet we want to write has been written. */
1305 verbose(VERB_ALGO, "outnet tcp pkt was written event");
1306 log_assert(c == pend->c);
1307 log_assert(pend->query->pkt == pend->c->tcp_write_pkt);
1308 log_assert(pend->query->pkt_len == pend->c->tcp_write_pkt_len);
1309 pend->c->tcp_write_pkt = NULL((void *)0);
1310 pend->c->tcp_write_pkt_len = 0;
1311 /* the pend.query is already in tree_by_id */
1312 log_assert(pend->query->id_node.key);
1313 pend->query = NULL((void *)0);
1314 /* setup to write next packet or setup read timeout */
1315 if(pend->reuse.write_wait_first) {
1316 verbose(VERB_ALGO, "outnet tcp setup next pkt");
1317 /* we can write it straight away perhaps, set flag
1318 * because this callback called after a tcp write
1319 * succeeded and likely more buffer space is available
1320 * and we can write some more. */
1321 pend->reuse.cp_more_write_again = 1;
1322 pend->query = reuse_write_wait_pop(&pend->reuse);
1323 comm_point_stop_listening(pend->c);
1324 outnet_tcp_take_query_setup(pend->c->fd, pend,
1325 pend->query);
1326 } else {
1327 verbose(VERB_ALGO, "outnet tcp writes done, wait");
1328 pend->c->tcp_write_and_read = 0;
1329 pend->reuse.cp_more_read_again = 0;
1330 pend->reuse.cp_more_write_again = 0;
1331 pend->c->tcp_is_reading = 1;
1332 comm_point_stop_listening(pend->c);
1333 reuse_tcp_setup_timeout(pend, outnet->tcp_reuse_timeout);
1334 }
1335 return 0;
1336 } else if(error != NETEVENT_NOERROR0) {
1337 verbose(VERB_QUERY, "outnettcp got tcp error %d", error);
1338 reuse_move_writewait_away(outnet, pend);
1339 /* pass error below and exit */
1340 } else {
1341 /* check ID */
1342 if(sldns_buffer_limit(c->buffer) < sizeof(uint16_t)) {
1343 log_addr(VERB_QUERY,
1344 "outnettcp: bad ID in reply, too short, from:",
1345 &pend->reuse.addr, pend->reuse.addrlen);
1346 error = NETEVENT_CLOSED-1;
1347 } else {
1348 uint16_t id = LDNS_ID_WIRE(sldns_buffer_begin((sldns_read_uint16(sldns_buffer_begin( c->buffer)))
1349 c->buffer))(sldns_read_uint16(sldns_buffer_begin( c->buffer)));
1350 /* find the query the reply is for */
1351 w = reuse_tcp_by_id_find(&pend->reuse, id);
1352 /* Make sure that the reply we got is at least for a
1353 * sent query with the same ID; the waiting_tcp that
1354 * gets a reply is assumed to not be waiting to be
1355 * sent. */
1356 if(w && (w->on_tcp_waiting_list || w->write_wait_queued))
1357 w = NULL((void *)0);
1358 }
1359 }
1360 if(error == NETEVENT_NOERROR0 && !w) {
1361 /* no struct waiting found in tree, no reply to call */
1362 log_addr(VERB_QUERY, "outnettcp: bad ID in reply, from:",
1363 &pend->reuse.addr, pend->reuse.addrlen);
1364 error = NETEVENT_CLOSED-1;
1365 }
1366 if(error == NETEVENT_NOERROR0) {
1367 /* add to reuse tree so it can be reused, if not a failure.
1368 * This is possible if the state machine wants to make a tcp
1369 * query again to the same destination. */
1370 if(outnet->tcp_reuse.count < outnet->tcp_reuse_max) {
1371 (void)reuse_tcp_insert(outnet, pend);
1372 }
1373 }
1374 if(w) {
1375 log_assert(!w->on_tcp_waiting_list);
1376 log_assert(!w->write_wait_queued);
1377 reuse_tree_by_id_delete(&pend->reuse, w);
1378 verbose(VERB_CLIENT, "outnet tcp callback query err %d buflen %d",
1379 error, (int)sldns_buffer_limit(c->buffer));
1380 waiting_tcp_callback(w, c, error, reply_info);
1381 waiting_tcp_delete(w);
1382 }
1383 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb");
1384 if(error == NETEVENT_NOERROR0 && pend->reuse.node.key) {
1385 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: keep it");
1386 /* it is in the reuse_tcp tree, with other queries, or
1387 * on the empty list. do not decommission it */
1388 /* if there are more outstanding queries, we could try to
1389 * read again, to see if it is on the input,
1390 * because this callback called after a successful read
1391 * and there could be more bytes to read on the input */
1392 if(pend->reuse.tree_by_id.count != 0)
1393 pend->reuse.cp_more_read_again = 1;
1394 reuse_tcp_setup_read_and_timeout(pend, outnet->tcp_reuse_timeout);
1395 return 0;
1396 }
1397 verbose(VERB_CLIENT, "outnet_tcp_cb reuse after cb: decommission it");
1398 /* no queries on it, no space to keep it. or timeout or closed due
1399 * to error. Close it */
1400 reuse_cb_and_decommission(outnet, pend, (error==NETEVENT_TIMEOUT-2?
1401 NETEVENT_TIMEOUT-2:NETEVENT_CLOSED-1));
1402 use_free_buffer(outnet);
1403 return 0;
1404}
1405
1406/** lower use count on pc, see if it can be closed */
1407static void
1408portcomm_loweruse(struct outside_network* outnet, struct port_comm* pc)
1409{
1410 struct port_if* pif;
1411 pc->num_outstanding--;
1412 if(pc->num_outstanding > 0) {
1413 return;
1414 }
1415 /* close it and replace in unused list */
1416 verbose(VERB_ALGO, "close of port %d", pc->number);
1417 comm_point_close(pc->cp);
1418 pif = pc->pif;
1419 log_assert(pif->inuse > 0);
1420#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1421 pif->avail_ports[pif->avail_total - pif->inuse] = pc->number;
1422#endif
1423 pif->inuse--;
1424 pif->out[pc->index] = pif->out[pif->inuse];
1425 pif->out[pc->index]->index = pc->index;
1426 pc->next = outnet->unused_fds;
1427 outnet->unused_fds = pc;
1428}
1429
1430/** try to send waiting UDP queries */
1431static void
1432outnet_send_wait_udp(struct outside_network* outnet)
1433{
1434 struct pending* pend;
1435 /* process waiting queries */
1436 while(outnet->udp_wait_first && outnet->unused_fds
1437 && !outnet->want_to_quit) {
1438 pend = outnet->udp_wait_first;
1439 outnet->udp_wait_first = pend->next_waiting;
1440 if(!pend->next_waiting) outnet->udp_wait_last = NULL((void *)0);
1441 sldns_buffer_clear(outnet->udp_buff);
1442 sldns_buffer_write(outnet->udp_buff, pend->pkt, pend->pkt_len);
1443 sldns_buffer_flip(outnet->udp_buff);
1444 free(pend->pkt); /* freeing now makes get_mem correct */
1445 pend->pkt = NULL((void *)0);
1446 pend->pkt_len = 0;
1447 log_assert(!pend->sq->busy);
1448 pend->sq->busy = 1;
1449 if(!randomize_and_send_udp(pend, outnet->udp_buff,
1450 pend->timeout)) {
1451 /* callback error on pending */
1452 if(pend->cb) {
1453 fptr_ok(fptr_whitelist_pending_udp(pend->cb));
1454 (void)(*pend->cb)(outnet->unused_fds->cp, pend->cb_arg,
1455 NETEVENT_CLOSED-1, NULL((void *)0));
1456 }
1457 pending_delete(outnet, pend);
1458 } else {
1459 pend->sq->busy = 0;
1460 }
1461 }
1462}
1463
1464int
1465outnet_udp_cb(struct comm_point* c, void* arg, int error,
1466 struct comm_reply *reply_info)
1467{
1468 struct outside_network* outnet = (struct outside_network*)arg;
1469 struct pending key;
1470 struct pending* p;
1471 verbose(VERB_ALGO, "answer cb");
1472
1473 if(error != NETEVENT_NOERROR0) {
1474 verbose(VERB_QUERY, "outnetudp got udp error %d", error);
1475 return 0;
1476 }
1477 if(sldns_buffer_limit(c->buffer) < LDNS_HEADER_SIZE12) {
1478 verbose(VERB_QUERY, "outnetudp udp too short");
1479 return 0;
1480 }
1481 log_assert(reply_info);
1482
1483 /* setup lookup key */
1484 key.id = (unsigned)LDNS_ID_WIRE(sldns_buffer_begin(c->buffer))(sldns_read_uint16(sldns_buffer_begin(c->buffer)));
1485 memcpy(&key.addr, &reply_info->remote_addr, reply_info->remote_addrlen);
1486 key.addrlen = reply_info->remote_addrlen;
1487 verbose(VERB_ALGO, "Incoming reply id = %4.4x", key.id);
1488 log_addr(VERB_ALGO, "Incoming reply addr =",
1489 &reply_info->remote_addr, reply_info->remote_addrlen);
1490
1491 /* find it, see if this thing is a valid query response */
1492 verbose(VERB_ALGO, "lookup size is %d entries", (int)outnet->pending->count);
1493 p = (struct pending*)rbtree_search(outnet->pending, &key);
1494 if(!p) {
1495 verbose(VERB_QUERY, "received unwanted or unsolicited udp reply dropped.");
1496 log_buf(VERB_ALGO, "dropped message", c->buffer);
1497 outnet->unwanted_replies++;
1498 if(outnet->unwanted_threshold && ++outnet->unwanted_total
1499 >= outnet->unwanted_threshold) {
1500 log_warn("unwanted reply total reached threshold (%u)"
1501 " you may be under attack."
1502 " defensive action: clearing the cache",
1503 (unsigned)outnet->unwanted_threshold);
1504 fptr_ok(fptr_whitelist_alloc_cleanup(
1505 outnet->unwanted_action));
1506 (*outnet->unwanted_action)(outnet->unwanted_param);
1507 outnet->unwanted_total = 0;
1508 }
1509 return 0;
1510 }
1511
1512 verbose(VERB_ALGO, "received udp reply.");
1513 log_buf(VERB_ALGO, "udp message", c->buffer);
1514 if(p->pc->cp != c) {
1515 verbose(VERB_QUERY, "received reply id,addr on wrong port. "
1516 "dropped.");
1517 outnet->unwanted_replies++;
1518 if(outnet->unwanted_threshold && ++outnet->unwanted_total
1519 >= outnet->unwanted_threshold) {
1520 log_warn("unwanted reply total reached threshold (%u)"
1521 " you may be under attack."
1522 " defensive action: clearing the cache",
1523 (unsigned)outnet->unwanted_threshold);
1524 fptr_ok(fptr_whitelist_alloc_cleanup(
1525 outnet->unwanted_action));
1526 (*outnet->unwanted_action)(outnet->unwanted_param);
1527 outnet->unwanted_total = 0;
1528 }
1529 return 0;
1530 }
1531 comm_timer_disable(p->timer);
1532 verbose(VERB_ALGO, "outnet handle udp reply");
1533 /* delete from tree first in case callback creates a retry */
1534 (void)rbtree_delete(outnet->pending, p->node.key);
1535 if(p->cb) {
1536 fptr_ok(fptr_whitelist_pending_udp(p->cb));
1537 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_NOERROR0, reply_info);
1538 }
1539 portcomm_loweruse(outnet, p->pc);
1540 pending_delete(NULL((void *)0), p);
1541 outnet_send_wait_udp(outnet);
1542 return 0;
1543}
1544
1545/** calculate number of ip4 and ip6 interfaces*/
1546static void
1547calc_num46(char** ifs, int num_ifs, int do_ip4, int do_ip6,
1548 int* num_ip4, int* num_ip6)
1549{
1550 int i;
1551 *num_ip4 = 0;
1552 *num_ip6 = 0;
1553 if(num_ifs <= 0) {
1554 if(do_ip4)
1555 *num_ip4 = 1;
1556 if(do_ip6)
1557 *num_ip6 = 1;
1558 return;
1559 }
1560 for(i=0; i<num_ifs; i++)
1561 {
1562 if(str_is_ip6(ifs[i])) {
1563 if(do_ip6)
1564 (*num_ip6)++;
1565 } else {
1566 if(do_ip4)
1567 (*num_ip4)++;
1568 }
1569 }
1570}
1571
1572void
1573pending_udp_timer_delay_cb(void* arg)
1574{
1575 struct pending* p = (struct pending*)arg;
1576 struct outside_network* outnet = p->outnet;
1577 verbose(VERB_ALGO, "timeout udp with delay");
1578 portcomm_loweruse(outnet, p->pc);
1579 pending_delete(outnet, p);
1580 outnet_send_wait_udp(outnet);
1581}
1582
1583void
1584pending_udp_timer_cb(void *arg)
1585{
1586 struct pending* p = (struct pending*)arg;
1587 struct outside_network* outnet = p->outnet;
1588 /* it timed out */
1589 verbose(VERB_ALGO, "timeout udp");
1590 if(p->cb) {
1591 fptr_ok(fptr_whitelist_pending_udp(p->cb));
1592 (void)(*p->cb)(p->pc->cp, p->cb_arg, NETEVENT_TIMEOUT-2, NULL((void *)0));
1593 }
1594 /* if delayclose, keep port open for a longer time.
1595 * But if the udpwaitlist exists, then we are struggling to
1596 * keep up with demand for sockets, so do not wait, but service
1597 * the customer (customer service more important than portICMPs) */
1598 if(outnet->delayclose && !outnet->udp_wait_first) {
1599 p->cb = NULL((void *)0);
1600 p->timer->callback = &pending_udp_timer_delay_cb;
1601 comm_timer_set(p->timer, &outnet->delay_tv);
1602 return;
1603 }
1604 portcomm_loweruse(outnet, p->pc);
1605 pending_delete(outnet, p);
1606 outnet_send_wait_udp(outnet);
1607}
1608
1609/** create pending_tcp buffers */
1610static int
1611create_pending_tcp(struct outside_network* outnet, size_t bufsize)
1612{
1613 size_t i;
1614 if(outnet->num_tcp == 0)
1615 return 1; /* no tcp needed, nothing to do */
1616 if(!(outnet->tcp_conns = (struct pending_tcp **)calloc(
1617 outnet->num_tcp, sizeof(struct pending_tcp*))))
1618 return 0;
1619 for(i=0; i<outnet->num_tcp; i++) {
1620 if(!(outnet->tcp_conns[i] = (struct pending_tcp*)calloc(1,
1621 sizeof(struct pending_tcp))))
1622 return 0;
1623 outnet->tcp_conns[i]->next_free = outnet->tcp_free;
1624 outnet->tcp_free = outnet->tcp_conns[i];
1625 outnet->tcp_conns[i]->c = comm_point_create_tcp_out(
1626 outnet->base, bufsize, outnet_tcp_cb,
1627 outnet->tcp_conns[i]);
1628 if(!outnet->tcp_conns[i]->c)
1629 return 0;
1630 }
1631 return 1;
1632}
1633
1634/** setup an outgoing interface, ready address */
1635static int setup_if(struct port_if* pif, const char* addrstr,
1636 int* avail, int numavail, size_t numfd)
1637{
1638#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1639 pif->avail_total = numavail;
1640 pif->avail_ports = (int*)memdup(avail, (size_t)numavail*sizeof(int));
1641 if(!pif->avail_ports)
1642 return 0;
1643#endif
1644 if(!ipstrtoaddr(addrstr, UNBOUND_DNS_PORT53, &pif->addr, &pif->addrlen) &&
1645 !netblockstrtoaddr(addrstr, UNBOUND_DNS_PORT53,
1646 &pif->addr, &pif->addrlen, &pif->pfxlen))
1647 return 0;
1648 pif->maxout = (int)numfd;
1649 pif->inuse = 0;
1650 pif->out = (struct port_comm**)calloc(numfd,
1651 sizeof(struct port_comm*));
1652 if(!pif->out)
1653 return 0;
1654 return 1;
1655}
1656
1657struct outside_network*
1658outside_network_create(struct comm_base *base, size_t bufsize,
1659 size_t num_ports, char** ifs, int num_ifs, int do_ip4,
1660 int do_ip6, size_t num_tcp, int dscp, struct infra_cache* infra,
1661 struct ub_randstate* rnd, int use_caps_for_id, int* availports,
1662 int numavailports, size_t unwanted_threshold, int tcp_mss,
1663 void (*unwanted_action)(void*), void* unwanted_param, int do_udp,
1664 void* sslctx, int delayclose, int tls_use_sni, struct dt_env* dtenv,
1665 int udp_connect, int max_reuse_tcp_queries, int tcp_reuse_timeout,
1666 int tcp_auth_query_timeout)
1667{
1668 struct outside_network* outnet = (struct outside_network*)
1669 calloc(1, sizeof(struct outside_network));
1670 size_t k;
1671 if(!outnet) {
1672 log_err("malloc failed");
1673 return NULL((void *)0);
1674 }
1675 comm_base_timept(base, &outnet->now_secs, &outnet->now_tv);
1676 outnet->base = base;
1677 outnet->num_tcp = num_tcp;
1678 outnet->max_reuse_tcp_queries = max_reuse_tcp_queries;
1679 outnet->tcp_reuse_timeout= tcp_reuse_timeout;
1680 outnet->tcp_auth_query_timeout = tcp_auth_query_timeout;
1681 outnet->num_tcp_outgoing = 0;
1682 outnet->num_udp_outgoing = 0;
1683 outnet->infra = infra;
1684 outnet->rnd = rnd;
1685 outnet->sslctx = sslctx;
1686 outnet->tls_use_sni = tls_use_sni;
1687#ifdef USE_DNSTAP
1688 outnet->dtenv = dtenv;
1689#else
1690 (void)dtenv;
1691#endif
1692 outnet->svcd_overhead = 0;
1693 outnet->want_to_quit = 0;
1694 outnet->unwanted_threshold = unwanted_threshold;
1695 outnet->unwanted_action = unwanted_action;
1696 outnet->unwanted_param = unwanted_param;
1697 outnet->use_caps_for_id = use_caps_for_id;
1698 outnet->do_udp = do_udp;
1699 outnet->tcp_mss = tcp_mss;
1700 outnet->ip_dscp = dscp;
1701#ifndef S_SPLINT_S
1702 if(delayclose) {
1703 outnet->delayclose = 1;
1704 outnet->delay_tv.tv_sec = delayclose/1000;
1705 outnet->delay_tv.tv_usec = (delayclose%1000)*1000;
1706 }
1707#endif
1708 if(udp_connect) {
1709 outnet->udp_connect = 1;
1710 }
1711 if(numavailports == 0 || num_ports == 0) {
1712 log_err("no outgoing ports available");
1713 outside_network_delete(outnet);
1714 return NULL((void *)0);
1715 }
1716#ifndef INET6
1717 do_ip6 = 0;
1718#endif
1719 calc_num46(ifs, num_ifs, do_ip4, do_ip6,
1720 &outnet->num_ip4, &outnet->num_ip6);
1721 if(outnet->num_ip4 != 0) {
1722 if(!(outnet->ip4_ifs = (struct port_if*)calloc(
1723 (size_t)outnet->num_ip4, sizeof(struct port_if)))) {
1724 log_err("malloc failed");
1725 outside_network_delete(outnet);
1726 return NULL((void *)0);
1727 }
1728 }
1729 if(outnet->num_ip6 != 0) {
1730 if(!(outnet->ip6_ifs = (struct port_if*)calloc(
1731 (size_t)outnet->num_ip6, sizeof(struct port_if)))) {
1732 log_err("malloc failed");
1733 outside_network_delete(outnet);
1734 return NULL((void *)0);
1735 }
1736 }
1737 if( !(outnet->udp_buff = sldns_buffer_new(bufsize)) ||
1738 !(outnet->pending = rbtree_create(pending_cmp)) ||
1739 !(outnet->serviced = rbtree_create(serviced_cmp)) ||
1740 !create_pending_tcp(outnet, bufsize)) {
1741 log_err("malloc failed");
1742 outside_network_delete(outnet);
1743 return NULL((void *)0);
1744 }
1745 rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1746 outnet->tcp_reuse_max = num_tcp;
1747
1748 /* allocate commpoints */
1749 for(k=0; k<num_ports; k++) {
1750 struct port_comm* pc;
1751 pc = (struct port_comm*)calloc(1, sizeof(*pc));
1752 if(!pc) {
1753 log_err("malloc failed");
1754 outside_network_delete(outnet);
1755 return NULL((void *)0);
1756 }
1757 pc->cp = comm_point_create_udp(outnet->base, -1,
1758 outnet->udp_buff, 0, outnet_udp_cb, outnet, NULL((void *)0));
1759 if(!pc->cp) {
1760 log_err("malloc failed");
1761 free(pc);
1762 outside_network_delete(outnet);
1763 return NULL((void *)0);
1764 }
1765 pc->next = outnet->unused_fds;
1766 outnet->unused_fds = pc;
1767 }
1768
1769 /* allocate interfaces */
1770 if(num_ifs == 0) {
1771 if(do_ip4 && !setup_if(&outnet->ip4_ifs[0], "0.0.0.0",
1772 availports, numavailports, num_ports)) {
1773 log_err("malloc failed");
1774 outside_network_delete(outnet);
1775 return NULL((void *)0);
1776 }
1777 if(do_ip6 && !setup_if(&outnet->ip6_ifs[0], "::",
1778 availports, numavailports, num_ports)) {
1779 log_err("malloc failed");
1780 outside_network_delete(outnet);
1781 return NULL((void *)0);
1782 }
1783 } else {
1784 size_t done_4 = 0, done_6 = 0;
1785 int i;
1786 for(i=0; i<num_ifs; i++) {
1787 if(str_is_ip6(ifs[i]) && do_ip6) {
1788 if(!setup_if(&outnet->ip6_ifs[done_6], ifs[i],
1789 availports, numavailports, num_ports)){
1790 log_err("malloc failed");
1791 outside_network_delete(outnet);
1792 return NULL((void *)0);
1793 }
1794 done_6++;
1795 }
1796 if(!str_is_ip6(ifs[i]) && do_ip4) {
1797 if(!setup_if(&outnet->ip4_ifs[done_4], ifs[i],
1798 availports, numavailports, num_ports)){
1799 log_err("malloc failed");
1800 outside_network_delete(outnet);
1801 return NULL((void *)0);
1802 }
1803 done_4++;
1804 }
1805 }
1806 }
1807 return outnet;
1808}
1809
1810/** helper pending delete */
1811static void
1812pending_node_del(rbnode_type* node, void* arg)
1813{
1814 struct pending* pend = (struct pending*)node;
1815 struct outside_network* outnet = (struct outside_network*)arg;
1816 pending_delete(outnet, pend);
1817}
1818
1819/** helper serviced delete */
1820static void
1821serviced_node_del(rbnode_type* node, void* ATTR_UNUSED(arg)arg __attribute__((unused)))
1822{
1823 struct serviced_query* sq = (struct serviced_query*)node;
1824 alloc_reg_release(sq->alloc, sq->region);
1825 if(sq->timer)
1826 comm_timer_delete(sq->timer);
1827 free(sq);
1828}
1829
1830void
1831outside_network_quit_prepare(struct outside_network* outnet)
1832{
1833 if(!outnet)
1834 return;
1835 /* prevent queued items from being sent */
1836 outnet->want_to_quit = 1;
1837}
1838
1839void
1840outside_network_delete(struct outside_network* outnet)
1841{
1842 if(!outnet)
1843 return;
1844 outnet->want_to_quit = 1;
1845 /* check every element, since we can be called on malloc error */
1846 if(outnet->pending) {
1847 /* free pending elements, but do no unlink from tree. */
1848 traverse_postorder(outnet->pending, pending_node_del, NULL((void *)0));
1849 free(outnet->pending);
1850 }
1851 if(outnet->serviced) {
1852 traverse_postorder(outnet->serviced, serviced_node_del, NULL((void *)0));
1853 free(outnet->serviced);
1854 }
1855 if(outnet->udp_buff)
1856 sldns_buffer_free(outnet->udp_buff);
1857 if(outnet->unused_fds) {
1858 struct port_comm* p = outnet->unused_fds, *np;
1859 while(p) {
1860 np = p->next;
1861 comm_point_delete(p->cp);
1862 free(p);
1863 p = np;
1864 }
1865 outnet->unused_fds = NULL((void *)0);
1866 }
1867 if(outnet->ip4_ifs) {
1868 int i, k;
1869 for(i=0; i<outnet->num_ip4; i++) {
1870 for(k=0; k<outnet->ip4_ifs[i].inuse; k++) {
1871 struct port_comm* pc = outnet->ip4_ifs[i].
1872 out[k];
1873 comm_point_delete(pc->cp);
1874 free(pc);
1875 }
1876#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1877 free(outnet->ip4_ifs[i].avail_ports);
1878#endif
1879 free(outnet->ip4_ifs[i].out);
1880 }
1881 free(outnet->ip4_ifs);
1882 }
1883 if(outnet->ip6_ifs) {
1884 int i, k;
1885 for(i=0; i<outnet->num_ip6; i++) {
1886 for(k=0; k<outnet->ip6_ifs[i].inuse; k++) {
1887 struct port_comm* pc = outnet->ip6_ifs[i].
1888 out[k];
1889 comm_point_delete(pc->cp);
1890 free(pc);
1891 }
1892#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
1893 free(outnet->ip6_ifs[i].avail_ports);
1894#endif
1895 free(outnet->ip6_ifs[i].out);
1896 }
1897 free(outnet->ip6_ifs);
1898 }
1899 if(outnet->tcp_conns) {
1900 size_t i;
1901 for(i=0; i<outnet->num_tcp; i++)
1902 if(outnet->tcp_conns[i]) {
1903 struct pending_tcp* pend;
1904 pend = outnet->tcp_conns[i];
1905 if(pend->reuse.item_on_lru_list) {
1906 /* delete waiting_tcp elements that
1907 * the tcp conn is working on */
1908 decommission_pending_tcp(outnet, pend);
1909 }
1910 comm_point_delete(outnet->tcp_conns[i]->c);
1911 free(outnet->tcp_conns[i]);
1912 outnet->tcp_conns[i] = NULL((void *)0);
1913 }
1914 free(outnet->tcp_conns);
1915 outnet->tcp_conns = NULL((void *)0);
1916 }
1917 if(outnet->tcp_wait_first) {
1918 struct waiting_tcp* p = outnet->tcp_wait_first, *np;
1919 while(p) {
1920 np = p->next_waiting;
1921 waiting_tcp_delete(p);
1922 p = np;
1923 }
1924 }
1925 /* was allocated in struct pending that was deleted above */
1926 rbtree_init(&outnet->tcp_reuse, reuse_cmp);
1927 outnet->tcp_reuse_first = NULL((void *)0);
1928 outnet->tcp_reuse_last = NULL((void *)0);
1929 if(outnet->udp_wait_first) {
1930 struct pending* p = outnet->udp_wait_first, *np;
1931 while(p) {
1932 np = p->next_waiting;
1933 pending_delete(NULL((void *)0), p);
1934 p = np;
1935 }
1936 }
1937 free(outnet);
1938}
1939
1940void
1941pending_delete(struct outside_network* outnet, struct pending* p)
1942{
1943 if(!p)
1944 return;
1945 if(outnet && outnet->udp_wait_first &&
1946 (p->next_waiting || p == outnet->udp_wait_last) ) {
1947 /* delete from waiting list, if it is in the waiting list */
1948 struct pending* prev = NULL((void *)0), *x = outnet->udp_wait_first;
1949 while(x && x != p) {
1950 prev = x;
1951 x = x->next_waiting;
1952 }
1953 if(x) {
1954 log_assert(x == p);
1955 if(prev)
1956 prev->next_waiting = p->next_waiting;
1957 else outnet->udp_wait_first = p->next_waiting;
1958 if(outnet->udp_wait_last == p)
1959 outnet->udp_wait_last = prev;
1960 }
1961 }
1962 if(outnet) {
1963 (void)rbtree_delete(outnet->pending, p->node.key);
1964 }
1965 if(p->timer)
1966 comm_timer_delete(p->timer);
1967 free(p->pkt);
1968 free(p);
1969}
1970
1971static void
1972sai6_putrandom(struct sockaddr_in6 *sa, int pfxlen, struct ub_randstate *rnd)
1973{
1974 int i, last;
1975 if(!(pfxlen > 0 && pfxlen < 128))
1976 return;
1977 for(i = 0; i < (128 - pfxlen) / 8; i++) {
1978 sa->sin6_addr.s6_addr__u6_addr.__u6_addr8[15-i] = (uint8_t)ub_random_max(rnd, 256);
1979 }
1980 last = pfxlen & 7;
1981 if(last != 0) {
1982 sa->sin6_addr.s6_addr__u6_addr.__u6_addr8[15-i] |=
1983 ((0xFF >> last) & ub_random_max(rnd, 256));
1984 }
1985}
1986
1987/**
1988 * Try to open a UDP socket for outgoing communication.
1989 * Sets sockets options as needed.
1990 * @param addr: socket address.
1991 * @param addrlen: length of address.
1992 * @param pfxlen: length of network prefix (for address randomisation).
1993 * @param port: port override for addr.
1994 * @param inuse: if -1 is returned, this bool means the port was in use.
1995 * @param rnd: random state (for address randomisation).
1996 * @param dscp: DSCP to use.
1997 * @return fd or -1
1998 */
1999static int
2000udp_sockport(struct sockaddr_storage* addr, socklen_t addrlen, int pfxlen,
2001 int port, int* inuse, struct ub_randstate* rnd, int dscp)
2002{
2003 int fd, noproto;
2004 if(addr_is_ip6(addr, addrlen)) {
2005 int freebind = 0;
2006 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
2007 sa.sin6_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
2008 sa.sin6_flowinfo = 0;
2009 sa.sin6_scope_id = 0;
2010 if(pfxlen != 0) {
2011 freebind = 1;
2012 sai6_putrandom(&sa, pfxlen, rnd);
2013 }
2014 fd = create_udp_sock(AF_INET624, SOCK_DGRAM2,
2015 (struct sockaddr*)&sa, addrlen, 1, inuse, &noproto,
2016 0, 0, 0, NULL((void *)0), 0, freebind, 0, dscp);
2017 } else {
2018 struct sockaddr_in* sa = (struct sockaddr_in*)addr;
2019 sa->sin_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
2020 fd = create_udp_sock(AF_INET2, SOCK_DGRAM2,
2021 (struct sockaddr*)addr, addrlen, 1, inuse, &noproto,
2022 0, 0, 0, NULL((void *)0), 0, 0, 0, dscp);
2023 }
2024 return fd;
2025}
2026
2027/** Select random ID */
2028static int
2029select_id(struct outside_network* outnet, struct pending* pend,
2030 sldns_buffer* packet)
2031{
2032 int id_tries = 0;
2033 pend->id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2034 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id)(sldns_write_uint16(sldns_buffer_begin(packet), pend->id));
2035
2036 /* insert in tree */
2037 pend->node.key = pend;
2038 while(!rbtree_insert(outnet->pending, &pend->node)) {
2039 /* change ID to avoid collision */
2040 pend->id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2041 LDNS_ID_SET(sldns_buffer_begin(packet), pend->id)(sldns_write_uint16(sldns_buffer_begin(packet), pend->id));
2042 id_tries++;
2043 if(id_tries == MAX_ID_RETRY1000) {
2044 pend->id=99999; /* non existent ID */
2045 log_err("failed to generate unique ID, drop msg");
2046 return 0;
2047 }
2048 }
2049 verbose(VERB_ALGO, "inserted new pending reply id=%4.4x", pend->id);
2050 return 1;
2051}
2052
2053/** return true is UDP connect error needs to be logged */
2054static int udp_connect_needs_log(int err)
2055{
2056 switch(err) {
2057 case ECONNREFUSED61:
2058# ifdef ENETUNREACH51
2059 case ENETUNREACH51:
2060# endif
2061# ifdef EHOSTDOWN64
2062 case EHOSTDOWN64:
2063# endif
2064# ifdef EHOSTUNREACH65
2065 case EHOSTUNREACH65:
2066# endif
2067# ifdef ENETDOWN50
2068 case ENETDOWN50:
2069# endif
2070# ifdef EADDRNOTAVAIL49
2071 case EADDRNOTAVAIL49:
2072# endif
2073 case EPERM1:
2074 case EACCES13:
2075 if(verbosity >= VERB_ALGO)
2076 return 1;
2077 return 0;
2078 default:
2079 break;
2080 }
2081 return 1;
2082}
2083
2084
2085/** Select random interface and port */
2086static int
2087select_ifport(struct outside_network* outnet, struct pending* pend,
2088 int num_if, struct port_if* ifs)
2089{
2090 int my_if, my_port, fd, portno, inuse, tries=0;
2091 struct port_if* pif;
2092 /* randomly select interface and port */
2093 if(num_if == 0) {
2094 verbose(VERB_QUERY, "Need to send query but have no "
2095 "outgoing interfaces of that family");
2096 return 0;
2097 }
2098 log_assert(outnet->unused_fds);
2099 tries = 0;
2100 while(1) {
2101 my_if = ub_random_max(outnet->rnd, num_if);
2102 pif = &ifs[my_if];
2103#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
2104 if(outnet->udp_connect) {
2105 /* if we connect() we cannot reuse fds for a port */
2106 if(pif->inuse >= pif->avail_total) {
2107 tries++;
2108 if(tries < MAX_PORT_RETRY10000)
2109 continue;
2110 log_err("failed to find an open port, drop msg");
2111 return 0;
2112 }
2113 my_port = pif->inuse + ub_random_max(outnet->rnd,
2114 pif->avail_total - pif->inuse);
2115 } else {
2116 my_port = ub_random_max(outnet->rnd, pif->avail_total);
2117 if(my_port < pif->inuse) {
2118 /* port already open */
2119 pend->pc = pif->out[my_port];
2120 verbose(VERB_ALGO, "using UDP if=%d port=%d",
2121 my_if, pend->pc->number);
2122 break;
2123 }
2124 }
2125 /* try to open new port, if fails, loop to try again */
2126 log_assert(pif->inuse < pif->maxout);
2127 portno = pif->avail_ports[my_port - pif->inuse];
2128#else
2129 my_port = portno = 0;
Value stored to 'my_port' is never read
2130#endif
2131 fd = udp_sockport(&pif->addr, pif->addrlen, pif->pfxlen,
2132 portno, &inuse, outnet->rnd, outnet->ip_dscp);
2133 if(fd == -1 && !inuse) {
2134 /* nonrecoverable error making socket */
2135 return 0;
2136 }
2137 if(fd != -1) {
2138 verbose(VERB_ALGO, "opened UDP if=%d port=%d",
2139 my_if, portno);
2140 if(outnet->udp_connect) {
2141 /* connect() to the destination */
2142 if(connect(fd, (struct sockaddr*)&pend->addr,
2143 pend->addrlen) < 0) {
2144 if(udp_connect_needs_log(errno(*__errno()))) {
2145 log_err_addr("udp connect failed",
2146 strerror(errno(*__errno())), &pend->addr,
2147 pend->addrlen);
2148 }
2149 sock_close(fd);
2150 return 0;
2151 }
2152 }
2153 /* grab fd */
2154 pend->pc = outnet->unused_fds;
2155 outnet->unused_fds = pend->pc->next;
2156
2157 /* setup portcomm */
2158 pend->pc->next = NULL((void *)0);
2159 pend->pc->number = portno;
2160 pend->pc->pif = pif;
2161 pend->pc->index = pif->inuse;
2162 pend->pc->num_outstanding = 0;
2163 comm_point_start_listening(pend->pc->cp, fd, -1);
2164
2165 /* grab port in interface */
2166 pif->out[pif->inuse] = pend->pc;
2167#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
2168 pif->avail_ports[my_port - pif->inuse] =
2169 pif->avail_ports[pif->avail_total-pif->inuse-1];
2170#endif
2171 pif->inuse++;
2172 break;
2173 }
2174 /* failed, already in use */
2175 verbose(VERB_QUERY, "port %d in use, trying another", portno);
2176 tries++;
2177 if(tries == MAX_PORT_RETRY10000) {
2178 log_err("failed to find an open port, drop msg");
2179 return 0;
2180 }
2181 }
2182 log_assert(pend->pc);
2183 pend->pc->num_outstanding++;
2184
2185 return 1;
2186}
2187
2188static int
2189randomize_and_send_udp(struct pending* pend, sldns_buffer* packet, int timeout)
2190{
2191 struct timeval tv;
2192 struct outside_network* outnet = pend->sq->outnet;
2193
2194 /* select id */
2195 if(!select_id(outnet, pend, packet)) {
2196 return 0;
2197 }
2198
2199 /* select src_if, port */
2200 if(addr_is_ip6(&pend->addr, pend->addrlen)) {
2201 if(!select_ifport(outnet, pend,
2202 outnet->num_ip6, outnet->ip6_ifs))
2203 return 0;
2204 } else {
2205 if(!select_ifport(outnet, pend,
2206 outnet->num_ip4, outnet->ip4_ifs))
2207 return 0;
2208 }
2209 log_assert(pend->pc && pend->pc->cp);
2210
2211 /* send it over the commlink */
2212 if(!comm_point_send_udp_msg(pend->pc->cp, packet,
2213 (struct sockaddr*)&pend->addr, pend->addrlen, outnet->udp_connect)) {
2214 portcomm_loweruse(outnet, pend->pc);
2215 return 0;
2216 }
2217 outnet->num_udp_outgoing++;
2218
2219 /* system calls to set timeout after sending UDP to make roundtrip
2220 smaller. */
2221#ifndef S_SPLINT_S
2222 tv.tv_sec = timeout/1000;
2223 tv.tv_usec = (timeout%1000)*1000;
2224#endif
2225 comm_timer_set(pend->timer, &tv);
2226
2227#ifdef USE_DNSTAP
2228 /*
2229 * sending src (local service)/dst (upstream) addresses over DNSTAP
2230 * There are no chances to get the src (local service) addr if unbound
2231 * is not configured with specific outgoing IP-addresses. So we will
2232 * pass 0.0.0.0 (::) to argument for
2233 * dt_msg_send_outside_query()/dt_msg_send_outside_response() calls.
2234 */
2235 if(outnet->dtenv &&
2236 (outnet->dtenv->log_resolver_query_messages ||
2237 outnet->dtenv->log_forwarder_query_messages)) {
2238 log_addr(VERB_ALGO, "from local addr", &pend->pc->pif->addr, pend->pc->pif->addrlen);
2239 log_addr(VERB_ALGO, "request to upstream", &pend->addr, pend->addrlen);
2240 dt_msg_send_outside_query(outnet->dtenv, &pend->addr, &pend->pc->pif->addr, comm_udp,
2241 pend->sq->zone, pend->sq->zonelen, packet);
2242 }
2243#endif
2244 return 1;
2245}
2246
2247struct pending*
2248pending_udp_query(struct serviced_query* sq, struct sldns_buffer* packet,
2249 int timeout, comm_point_callback_type* cb, void* cb_arg)
2250{
2251 struct pending* pend = (struct pending*)calloc(1, sizeof(*pend));
2252 if(!pend) return NULL((void *)0);
2253 pend->outnet = sq->outnet;
2254 pend->sq = sq;
2255 pend->addrlen = sq->addrlen;
2256 memmove(&pend->addr, &sq->addr, sq->addrlen);
2257 pend->cb = cb;
2258 pend->cb_arg = cb_arg;
2259 pend->node.key = pend;
2260 pend->timer = comm_timer_create(sq->outnet->base, pending_udp_timer_cb,
2261 pend);
2262 if(!pend->timer) {
2263 free(pend);
2264 return NULL((void *)0);
2265 }
2266
2267 if(sq->outnet->unused_fds == NULL((void *)0)) {
2268 /* no unused fd, cannot create a new port (randomly) */
2269 verbose(VERB_ALGO, "no fds available, udp query waiting");
2270 pend->timeout = timeout;
2271 pend->pkt_len = sldns_buffer_limit(packet);
2272 pend->pkt = (uint8_t*)memdup(sldns_buffer_begin(packet),
2273 pend->pkt_len);
2274 if(!pend->pkt) {
2275 comm_timer_delete(pend->timer);
2276 free(pend);
2277 return NULL((void *)0);
2278 }
2279 /* put at end of waiting list */
2280 if(sq->outnet->udp_wait_last)
2281 sq->outnet->udp_wait_last->next_waiting = pend;
2282 else
2283 sq->outnet->udp_wait_first = pend;
2284 sq->outnet->udp_wait_last = pend;
2285 return pend;
2286 }
2287 log_assert(!sq->busy);
2288 sq->busy = 1;
2289 if(!randomize_and_send_udp(pend, packet, timeout)) {
2290 pending_delete(sq->outnet, pend);
2291 return NULL((void *)0);
2292 }
2293 sq->busy = 0;
2294 return pend;
2295}
2296
2297void
2298outnet_tcptimer(void* arg)
2299{
2300 struct waiting_tcp* w = (struct waiting_tcp*)arg;
2301 struct outside_network* outnet = w->outnet;
2302 verbose(VERB_CLIENT, "outnet_tcptimer");
2303 if(w->on_tcp_waiting_list) {
2304 /* it is on the waiting list */
2305 outnet_waiting_tcp_list_remove(outnet, w);
2306 waiting_tcp_callback(w, NULL((void *)0), NETEVENT_TIMEOUT-2, NULL((void *)0));
2307 waiting_tcp_delete(w);
2308 } else {
2309 /* it was in use */
2310 struct pending_tcp* pend=(struct pending_tcp*)w->next_waiting;
2311 reuse_cb_and_decommission(outnet, pend, NETEVENT_TIMEOUT-2);
2312 }
2313 use_free_buffer(outnet);
2314}
2315
2316/** close the oldest reuse_tcp connection to make a fd and struct pend
2317 * available for a new stream connection */
2318static void
2319reuse_tcp_close_oldest(struct outside_network* outnet)
2320{
2321 struct reuse_tcp* reuse;
2322 verbose(VERB_CLIENT, "reuse_tcp_close_oldest");
2323 reuse = reuse_tcp_lru_snip(outnet);
2324 if(!reuse) return;
2325 /* free up */
2326 reuse_cb_and_decommission(outnet, reuse->pending, NETEVENT_CLOSED-1);
2327}
2328
2329static uint16_t
2330tcp_select_id(struct outside_network* outnet, struct reuse_tcp* reuse)
2331{
2332 if(reuse)
2333 return reuse_tcp_select_id(reuse, outnet);
2334 return GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2335}
2336
2337/** find spare ID value for reuse tcp stream. That is random and also does
2338 * not collide with an existing query ID that is in use or waiting */
2339uint16_t
2340reuse_tcp_select_id(struct reuse_tcp* reuse, struct outside_network* outnet)
2341{
2342 uint16_t id = 0, curid, nextid;
2343 const int try_random = 2000;
2344 int i;
2345 unsigned select, count, space;
2346 rbnode_type* node;
2347
2348 /* make really sure the tree is not empty */
2349 if(reuse->tree_by_id.count == 0) {
2350 id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2351 return id;
2352 }
2353
2354 /* try to find random empty spots by picking them */
2355 for(i = 0; i<try_random; i++) {
2356 id = GET_RANDOM_ID(outnet->rnd)(((unsigned)ub_random(outnet->rnd)>>8) & 0xffff);
2357 if(!reuse_tcp_by_id_find(reuse, id)) {
2358 return id;
2359 }
2360 }
2361
2362 /* equally pick a random unused element from the tree that is
2363 * not in use. Pick a the n-th index of an unused number,
2364 * then loop over the empty spaces in the tree and find it */
2365 log_assert(reuse->tree_by_id.count < 0xffff);
2366 select = ub_random_max(outnet->rnd, 0xffff - reuse->tree_by_id.count);
2367 /* select value now in 0 .. num free - 1 */
2368
2369 count = 0; /* number of free spaces passed by */
2370 node = rbtree_first(&reuse->tree_by_id);
2371 log_assert(node && node != RBTREE_NULL); /* tree not empty */
2372 /* see if select is before first node */
2373 if(select < (unsigned)tree_by_id_get_id(node))
2374 return select;
2375 count += tree_by_id_get_id(node);
2376 /* perhaps select is between nodes */
2377 while(node && node != RBTREE_NULL&rbtree_null_node) {
2378 rbnode_type* next = rbtree_next(node);
2379 if(next && next != RBTREE_NULL&rbtree_null_node) {
2380 curid = tree_by_id_get_id(node);
2381 nextid = tree_by_id_get_id(next);
2382 log_assert(curid < nextid);
2383 if(curid != 0xffff && curid + 1 < nextid) {
2384 /* space between nodes */
2385 space = nextid - curid - 1;
2386 log_assert(select >= count);
2387 if(select < count + space) {
2388 /* here it is */
2389 return curid + 1 + (select - count);
2390 }
2391 count += space;
2392 }
2393 }
2394 node = next;
2395 }
2396
2397 /* select is after the last node */
2398 /* count is the number of free positions before the nodes in the
2399 * tree */
2400 node = rbtree_last(&reuse->tree_by_id);
2401 log_assert(node && node != RBTREE_NULL); /* tree not empty */
2402 curid = tree_by_id_get_id(node);
2403 log_assert(count + (0xffff-curid) + reuse->tree_by_id.count == 0xffff);
2404 return curid + 1 + (select - count);
2405}
2406
2407struct waiting_tcp*
2408pending_tcp_query(struct serviced_query* sq, sldns_buffer* packet,
2409 int timeout, comm_point_callback_type* callback, void* callback_arg)
2410{
2411 struct pending_tcp* pend = sq->outnet->tcp_free;
2412 struct reuse_tcp* reuse = NULL((void *)0);
2413 struct waiting_tcp* w;
2414
2415 verbose(VERB_CLIENT, "pending_tcp_query");
2416 if(sldns_buffer_limit(packet) < sizeof(uint16_t)) {
2417 verbose(VERB_ALGO, "pending tcp query with too short buffer < 2");
2418 return NULL((void *)0);
2419 }
2420
2421 /* find out if a reused stream to the target exists */
2422 /* if so, take it into use */
2423 reuse = reuse_tcp_find(sq->outnet, &sq->addr, sq->addrlen,
2424 sq->ssl_upstream);
2425 if(reuse) {
2426 log_reuse_tcp(VERB_CLIENT, "pending_tcp_query: found reuse", reuse);
2427 log_assert(reuse->pending);
2428 pend = reuse->pending;
2429 reuse_tcp_lru_touch(sq->outnet, reuse);
2430 }
2431
2432 log_assert(!reuse || (reuse && pend));
2433 /* if !pend but we have reuse streams, close a reuse stream
2434 * to be able to open a new one to this target, no use waiting
2435 * to reuse a file descriptor while another query needs to use
2436 * that buffer and file descriptor now. */
2437 if(!pend) {
2438 reuse_tcp_close_oldest(sq->outnet);
2439 pend = sq->outnet->tcp_free;
2440 log_assert(!reuse || (pend == reuse->pending));
2441 }
2442
2443 /* allocate space to store query */
2444 w = (struct waiting_tcp*)malloc(sizeof(struct waiting_tcp)
2445 + sldns_buffer_limit(packet));
2446 if(!w) {
2447 return NULL((void *)0);
2448 }
2449 if(!(w->timer = comm_timer_create(sq->outnet->base, outnet_tcptimer, w))) {
2450 free(w);
2451 return NULL((void *)0);
2452 }
2453 w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
2454 w->pkt_len = sldns_buffer_limit(packet);
2455 memmove(w->pkt, sldns_buffer_begin(packet), w->pkt_len);
2456 w->id = tcp_select_id(sq->outnet, reuse);
2457 LDNS_ID_SET(w->pkt, w->id)(sldns_write_uint16(w->pkt, w->id));
2458 memcpy(&w->addr, &sq->addr, sq->addrlen);
2459 w->addrlen = sq->addrlen;
2460 w->outnet = sq->outnet;
2461 w->on_tcp_waiting_list = 0;
2462 w->next_waiting = NULL((void *)0);
2463 w->cb = callback;
2464 w->cb_arg = callback_arg;
2465 w->ssl_upstream = sq->ssl_upstream;
2466 w->tls_auth_name = sq->tls_auth_name;
2467 w->timeout = timeout;
2468 w->id_node.key = NULL((void *)0);
2469 w->write_wait_prev = NULL((void *)0);
2470 w->write_wait_next = NULL((void *)0);
2471 w->write_wait_queued = 0;
2472 w->error_count = 0;
2473#ifdef USE_DNSTAP
2474 w->sq = NULL((void *)0);
2475#endif
2476 w->in_cb_and_decommission = 0;
2477 if(pend) {
2478 /* we have a buffer available right now */
2479 if(reuse) {
2480 log_assert(reuse == &pend->reuse);
2481 /* reuse existing fd, write query and continue */
2482 /* store query in tree by id */
2483 verbose(VERB_CLIENT, "pending_tcp_query: reuse, store");
2484 w->next_waiting = (void*)pend;
2485 reuse_tree_by_id_insert(&pend->reuse, w);
2486 /* can we write right now? */
2487 if(pend->query == NULL((void *)0)) {
2488 /* write straight away */
2489 /* stop the timer on read of the fd */
2490 comm_point_stop_listening(pend->c);
2491 pend->query = w;
2492 outnet_tcp_take_query_setup(pend->c->fd, pend,
2493 w);
2494 } else {
2495 /* put it in the waiting list for
2496 * this stream */
2497 reuse_write_wait_push_back(&pend->reuse, w);
2498 }
2499 } else {
2500 /* create new fd and connect to addr, setup to
2501 * write query */
2502 verbose(VERB_CLIENT, "pending_tcp_query: new fd, connect");
2503 rbtree_init(&pend->reuse.tree_by_id, reuse_id_cmp);
2504 pend->reuse.pending = pend;
2505 memcpy(&pend->reuse.addr, &sq->addr, sq->addrlen);
2506 pend->reuse.addrlen = sq->addrlen;
2507 if(!outnet_tcp_take_into_use(w)) {
2508 waiting_tcp_delete(w);
2509 return NULL((void *)0);
2510 }
2511 }
2512#ifdef USE_DNSTAP
2513 if(sq->outnet->dtenv &&
2514 (sq->outnet->dtenv->log_resolver_query_messages ||
2515 sq->outnet->dtenv->log_forwarder_query_messages)) {
2516 /* use w->pkt, because it has the ID value */
2517 sldns_buffer tmp;
2518 sldns_buffer_init_frm_data(&tmp, w->pkt, w->pkt_len);
2519 dt_msg_send_outside_query(sq->outnet->dtenv, &sq->addr,
2520 &pend->pi->addr, comm_tcp, sq->zone,
2521 sq->zonelen, &tmp);
2522 }
2523#endif
2524 } else {
2525 /* queue up */
2526 /* waiting for a buffer on the outside network buffer wait
2527 * list */
2528 verbose(VERB_CLIENT, "pending_tcp_query: queue to wait");
2529#ifdef USE_DNSTAP
2530 w->sq = sq;
2531#endif
2532 outnet_waiting_tcp_list_add(sq->outnet, w, 1);
2533 }
2534 return w;
2535}
2536
2537/** create query for serviced queries */
2538static void
2539serviced_gen_query(sldns_buffer* buff, uint8_t* qname, size_t qnamelen,
2540 uint16_t qtype, uint16_t qclass, uint16_t flags)
2541{
2542 sldns_buffer_clear(buff);
2543 /* skip id */
2544 sldns_buffer_write_u16(buff, flags);
2545 sldns_buffer_write_u16(buff, 1); /* qdcount */
2546 sldns_buffer_write_u16(buff, 0); /* ancount */
2547 sldns_buffer_write_u16(buff, 0); /* nscount */
2548 sldns_buffer_write_u16(buff, 0); /* arcount */
2549 sldns_buffer_write(buff, qname, qnamelen);
2550 sldns_buffer_write_u16(buff, qtype);
2551 sldns_buffer_write_u16(buff, qclass);
2552 sldns_buffer_flip(buff);
2553}
2554
2555/** lookup serviced query in serviced query rbtree */
2556static struct serviced_query*
2557lookup_serviced(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2558 struct sockaddr_storage* addr, socklen_t addrlen,
2559 struct edns_option* opt_list)
2560{
2561 struct serviced_query key;
2562 key.node.key = &key;
2563 key.qbuf = sldns_buffer_begin(buff);
2564 key.qbuflen = sldns_buffer_limit(buff);
2565 key.dnssec = dnssec;
2566 memcpy(&key.addr, addr, addrlen);
2567 key.addrlen = addrlen;
2568 key.outnet = outnet;
2569 key.opt_list = opt_list;
2570 return (struct serviced_query*)rbtree_search(outnet->serviced, &key);
2571}
2572
2573void
2574serviced_timer_cb(void* arg)
2575{
2576 struct serviced_query* sq = (struct serviced_query*)arg;
2577 struct outside_network* outnet = sq->outnet;
2578 verbose(VERB_ALGO, "serviced send timer");
2579 /* By the time this cb is called, if we don't have any registered
2580 * callbacks for this serviced_query anymore; do not send. */
2581 if(!sq->cblist)
2582 goto delete;
2583 /* perform first network action */
2584 if(outnet->do_udp && !(sq->tcp_upstream || sq->ssl_upstream)) {
2585 if(!serviced_udp_send(sq, outnet->udp_buff))
2586 goto delete;
2587 } else {
2588 if(!serviced_tcp_send(sq, outnet->udp_buff))
2589 goto delete;
2590 }
2591 /* Maybe by this time we don't have callbacks attached anymore. Don't
2592 * proactively try to delete; let it run and maybe another callback
2593 * will get attached by the time we get an answer. */
2594 return;
2595delete:
2596 serviced_callbacks(sq, NETEVENT_CLOSED-1, NULL((void *)0), NULL((void *)0));
2597}
2598
2599/** Create new serviced entry */
2600static struct serviced_query*
2601serviced_create(struct outside_network* outnet, sldns_buffer* buff, int dnssec,
2602 int want_dnssec, int nocaps, int tcp_upstream, int ssl_upstream,
2603 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
2604 uint8_t* zone, size_t zonelen, int qtype, struct edns_option* opt_list,
2605 size_t pad_queries_block_size, struct alloc_cache* alloc,
2606 struct regional* region)
2607{
2608 struct serviced_query* sq = (struct serviced_query*)malloc(sizeof(*sq));
2609 struct timeval t;
2610#ifdef UNBOUND_DEBUG
2611 rbnode_type* ins;
2612#endif
2613 if(!sq) {
2614 alloc_reg_release(alloc, region);
2615 return NULL((void *)0);
2616 }
2617 sq->node.key = sq;
2618 sq->alloc = alloc;
2619 sq->region = region;
2620 sq->qbuf = regional_alloc_init(region, sldns_buffer_begin(buff),
2621 sldns_buffer_limit(buff));
2622 if(!sq->qbuf) {
2623 alloc_reg_release(alloc, region);
2624 free(sq);
2625 return NULL((void *)0);
2626 }
2627 sq->qbuflen = sldns_buffer_limit(buff);
2628 sq->zone = regional_alloc_init(region, zone, zonelen);
2629 if(!sq->zone) {
2630 alloc_reg_release(alloc, region);
2631 free(sq);
2632 return NULL((void *)0);
2633 }
2634 sq->zonelen = zonelen;
2635 sq->qtype = qtype;
2636 sq->dnssec = dnssec;
2637 sq->want_dnssec = want_dnssec;
2638 sq->nocaps = nocaps;
2639 sq->tcp_upstream = tcp_upstream;
2640 sq->ssl_upstream = ssl_upstream;
2641 if(tls_auth_name) {
2642 sq->tls_auth_name = regional_strdup(region, tls_auth_name);
2643 if(!sq->tls_auth_name) {
2644 alloc_reg_release(alloc, region);
2645 free(sq);
2646 return NULL((void *)0);
2647 }
2648 } else {
2649 sq->tls_auth_name = NULL((void *)0);
2650 }
2651 memcpy(&sq->addr, addr, addrlen);
2652 sq->addrlen = addrlen;
2653 sq->opt_list = opt_list;
2654 sq->busy = 0;
2655 sq->timer = comm_timer_create(outnet->base, serviced_timer_cb, sq);
2656 if(!sq->timer) {
2657 alloc_reg_release(alloc, region);
2658 free(sq);
2659 return NULL((void *)0);
2660 }
2661 memset(&t, 0, sizeof(t));
2662 comm_timer_set(sq->timer, &t);
2663 sq->outnet = outnet;
2664 sq->cblist = NULL((void *)0);
2665 sq->pending = NULL((void *)0);
2666 sq->status = serviced_initial;
2667 sq->retry = 0;
2668 sq->to_be_deleted = 0;
2669 sq->padding_block_size = pad_queries_block_size;
2670#ifdef UNBOUND_DEBUG
2671 ins =
2672#else
2673 (void)
2674#endif
2675 rbtree_insert(outnet->serviced, &sq->node);
2676 log_assert(ins != NULL); /* must not be already present */
2677 return sq;
2678}
2679
2680/** reuse tcp stream, remove serviced query from stream,
2681 * return true if the stream is kept, false if it is to be closed */
2682static int
2683reuse_tcp_remove_serviced_keep(struct waiting_tcp* w,
2684 struct serviced_query* sq)
2685{
2686 struct pending_tcp* pend_tcp = (struct pending_tcp*)w->next_waiting;
2687 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep");
2688 /* remove the callback. let query continue to write to not cancel
2689 * the stream itself. also keep it as an entry in the tree_by_id,
2690 * in case the answer returns (that we no longer want), but we cannot
2691 * pick the same ID number meanwhile */
2692 w->cb = NULL((void *)0);
2693 /* see if can be entered in reuse tree
2694 * for that the FD has to be non-1 */
2695 if(pend_tcp->c->fd == -1) {
2696 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: -1 fd");
2697 return 0;
2698 }
2699 /* if in tree and used by other queries */
2700 if(pend_tcp->reuse.node.key) {
2701 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: in use by other queries");
2702 /* do not reset the keepalive timer, for that
2703 * we'd need traffic, and this is where the serviced is
2704 * removed due to state machine internal reasons,
2705 * eg. iterator no longer interested in this query */
2706 return 1;
2707 }
2708 /* if still open and want to keep it open */
2709 if(pend_tcp->c->fd != -1 && sq->outnet->tcp_reuse.count <
2710 sq->outnet->tcp_reuse_max) {
2711 verbose(VERB_CLIENT, "reuse_tcp_remove_serviced_keep: keep open");
2712 /* set a keepalive timer on it */
2713 if(!reuse_tcp_insert(sq->outnet, pend_tcp)) {
2714 return 0;
2715 }
2716 reuse_tcp_setup_timeout(pend_tcp, sq->outnet->tcp_reuse_timeout);
2717 return 1;
2718 }
2719 return 0;
2720}
2721
2722/** cleanup serviced query entry */
2723static void
2724serviced_delete(struct serviced_query* sq)
2725{
2726 verbose(VERB_CLIENT, "serviced_delete");
2727 if(sq->pending) {
2728 /* clear up the pending query */
2729 if(sq->status == serviced_query_UDP_EDNS ||
2730 sq->status == serviced_query_UDP ||
2731 sq->status == serviced_query_UDP_EDNS_FRAG ||
2732 sq->status == serviced_query_UDP_EDNS_fallback) {
2733 struct pending* p = (struct pending*)sq->pending;
2734 verbose(VERB_CLIENT, "serviced_delete: UDP");
2735 if(p->pc)
2736 portcomm_loweruse(sq->outnet, p->pc);
2737 pending_delete(sq->outnet, p);
2738 /* this call can cause reentrant calls back into the
2739 * mesh */
2740 outnet_send_wait_udp(sq->outnet);
2741 } else {
2742 struct waiting_tcp* w = (struct waiting_tcp*)
2743 sq->pending;
2744 verbose(VERB_CLIENT, "serviced_delete: TCP");
2745 log_assert(!(w->write_wait_queued && w->on_tcp_waiting_list));
2746 /* if on stream-write-waiting list then
2747 * remove from waiting list and waiting_tcp_delete */
2748 if(w->write_wait_queued) {
2749 struct pending_tcp* pend =
2750 (struct pending_tcp*)w->next_waiting;
2751 verbose(VERB_CLIENT, "serviced_delete: writewait");
2752 if(!w->in_cb_and_decommission)
2753 reuse_tree_by_id_delete(&pend->reuse, w);
2754 reuse_write_wait_remove(&pend->reuse, w);
2755 if(!w->in_cb_and_decommission)
2756 waiting_tcp_delete(w);
2757 } else if(!w->on_tcp_waiting_list) {
2758 struct pending_tcp* pend =
2759 (struct pending_tcp*)w->next_waiting;
2760 verbose(VERB_CLIENT, "serviced_delete: tcpreusekeep");
2761 /* w needs to stay on tree_by_id to not assign
2762 * the same ID; remove the callback since its
2763 * serviced_query will be gone. */
2764 w->cb = NULL((void *)0);
2765 if(!reuse_tcp_remove_serviced_keep(w, sq)) {
2766 if(!w->in_cb_and_decommission)
2767 reuse_cb_and_decommission(sq->outnet,
2768 pend, NETEVENT_CLOSED-1);
2769 use_free_buffer(sq->outnet);
2770 }
2771 sq->pending = NULL((void *)0);
2772 } else {
2773 verbose(VERB_CLIENT, "serviced_delete: tcpwait");
2774 outnet_waiting_tcp_list_remove(sq->outnet, w);
2775 if(!w->in_cb_and_decommission)
2776 waiting_tcp_delete(w);
2777 }
2778 }
2779 }
2780 /* does not delete from tree, caller has to do that */
2781 serviced_node_del(&sq->node, NULL((void *)0));
2782}
2783
2784/** perturb a dname capitalization randomly */
2785static void
2786serviced_perturb_qname(struct ub_randstate* rnd, uint8_t* qbuf, size_t len)
2787{
2788 uint8_t lablen;
2789 uint8_t* d = qbuf + 10;
2790 long int random = 0;
2791 int bits = 0;
2792 log_assert(len >= 10 + 5 /* offset qname, root, qtype, qclass */);
2793 (void)len;
2794 lablen = *d++;
2795 while(lablen) {
2796 while(lablen--) {
2797 /* only perturb A-Z, a-z */
2798 if(isalpha((unsigned char)*d)) {
2799 /* get a random bit */
2800 if(bits == 0) {
2801 random = ub_random(rnd);
2802 bits = 30;
2803 }
2804 if(random & 0x1) {
2805 *d = (uint8_t)toupper((unsigned char)*d);
2806 } else {
2807 *d = (uint8_t)tolower((unsigned char)*d);
2808 }
2809 random >>= 1;
2810 bits--;
2811 }
2812 d++;
2813 }
2814 lablen = *d++;
2815 }
2816 if(verbosity >= VERB_ALGO) {
2817 char buf[LDNS_MAX_DOMAINLEN255+1];
2818 dname_str(qbuf+10, buf);
2819 verbose(VERB_ALGO, "qname perturbed to %s", buf);
2820 }
2821}
2822
2823/** put serviced query into a buffer */
2824static void
2825serviced_encode(struct serviced_query* sq, sldns_buffer* buff, int with_edns)
2826{
2827 /* if we are using 0x20 bits for ID randomness, perturb them */
2828 if(sq->outnet->use_caps_for_id && !sq->nocaps) {
2829 serviced_perturb_qname(sq->outnet->rnd, sq->qbuf, sq->qbuflen);
2830 }
2831 /* generate query */
2832 sldns_buffer_clear(buff);
2833 sldns_buffer_write_u16(buff, 0); /* id placeholder */
2834 sldns_buffer_write(buff, sq->qbuf, sq->qbuflen);
2835 sldns_buffer_flip(buff);
2836 if(with_edns) {
2837 /* add edns section */
2838 struct edns_data edns;
2839 struct edns_option padding_option;
2840 edns.edns_present = 1;
2841 edns.ext_rcode = 0;
2842 edns.edns_version = EDNS_ADVERTISED_VERSION0;
2843 edns.opt_list_in = NULL((void *)0);
2844 edns.opt_list_out = sq->opt_list;
2845 edns.opt_list_inplace_cb_out = NULL((void *)0);
2846 if(sq->status == serviced_query_UDP_EDNS_FRAG) {
2847 if(addr_is_ip6(&sq->addr, sq->addrlen)) {
2848 if(EDNS_FRAG_SIZE_IP61232 < EDNS_ADVERTISED_SIZE)
2849 edns.udp_size = EDNS_FRAG_SIZE_IP61232;
2850 else edns.udp_size = EDNS_ADVERTISED_SIZE;
2851 } else {
2852 if(EDNS_FRAG_SIZE_IP41472 < EDNS_ADVERTISED_SIZE)
2853 edns.udp_size = EDNS_FRAG_SIZE_IP41472;
2854 else edns.udp_size = EDNS_ADVERTISED_SIZE;
2855 }
2856 } else {
2857 edns.udp_size = EDNS_ADVERTISED_SIZE;
2858 }
2859 edns.bits = 0;
2860 if(sq->dnssec & EDNS_DO0x8000)
2861 edns.bits = EDNS_DO0x8000;
2862 if(sq->dnssec & BIT_CD0x0010)
2863 LDNS_CD_SET(sldns_buffer_begin(buff))(*(sldns_buffer_begin(buff)+3) |= 0x10U);
2864 if (sq->ssl_upstream && sq->padding_block_size) {
2865 padding_option.opt_code = LDNS_EDNS_PADDING;
2866 padding_option.opt_len = 0;
2867 padding_option.opt_data = NULL((void *)0);
2868 padding_option.next = edns.opt_list_out;
2869 edns.opt_list_out = &padding_option;
2870 edns.padding_block_size = sq->padding_block_size;
2871 }
2872 attach_edns_record(buff, &edns);
2873 }
2874}
2875
2876/**
2877 * Perform serviced query UDP sending operation.
2878 * Sends UDP with EDNS, unless infra host marked non EDNS.
2879 * @param sq: query to send.
2880 * @param buff: buffer scratch space.
2881 * @return 0 on error.
2882 */
2883static int
2884serviced_udp_send(struct serviced_query* sq, sldns_buffer* buff)
2885{
2886 int rtt, vs;
2887 uint8_t edns_lame_known;
2888 time_t now = *sq->outnet->now_secs;
2889
2890 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
2891 sq->zonelen, now, &vs, &edns_lame_known, &rtt))
2892 return 0;
2893 sq->last_rtt = rtt;
2894 verbose(VERB_ALGO, "EDNS lookup known=%d vs=%d", edns_lame_known, vs);
2895 if(sq->status == serviced_initial) {
2896 if(vs != -1) {
2897 sq->status = serviced_query_UDP_EDNS;
2898 } else {
2899 sq->status = serviced_query_UDP;
2900 }
2901 }
2902 serviced_encode(sq, buff, (sq->status == serviced_query_UDP_EDNS) ||
2903 (sq->status == serviced_query_UDP_EDNS_FRAG));
2904 sq->last_sent_time = *sq->outnet->now_tv;
2905 sq->edns_lame_known = (int)edns_lame_known;
2906 verbose(VERB_ALGO, "serviced query UDP timeout=%d msec", rtt);
2907 sq->pending = pending_udp_query(sq, buff, rtt,
2908 serviced_udp_callback, sq);
2909 if(!sq->pending)
2910 return 0;
2911 return 1;
2912}
2913
2914/** check that perturbed qname is identical */
2915static int
2916serviced_check_qname(sldns_buffer* pkt, uint8_t* qbuf, size_t qbuflen)
2917{
2918 uint8_t* d1 = sldns_buffer_begin(pkt)+12;
2919 uint8_t* d2 = qbuf+10;
2920 uint8_t len1, len2;
2921 int count = 0;
2922 if(sldns_buffer_limit(pkt) < 12+1+4) /* packet too small for qname */
2923 return 0;
2924 log_assert(qbuflen >= 15 /* 10 header, root, type, class */);
2925 len1 = *d1++;
2926 len2 = *d2++;
2927 while(len1 != 0 || len2 != 0) {
2928 if(LABEL_IS_PTR(len1)( ((len1)&0xc0) == 0xc0 )) {
2929 /* check if we can read *d1 with compression ptr rest */
2930 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2931 return 0;
2932 d1 = sldns_buffer_begin(pkt)+PTR_OFFSET(len1, *d1)( ((len1)&0x3f)<<8 | (*d1) );
2933 /* check if we can read the destination *d1 */
2934 if(d1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2935 return 0;
2936 len1 = *d1++;
2937 if(count++ > MAX_COMPRESS_PTRS256)
2938 return 0;
2939 continue;
2940 }
2941 if(d2 > qbuf+qbuflen)
2942 return 0;
2943 if(len1 != len2)
2944 return 0;
2945 if(len1 > LDNS_MAX_LABELLEN63)
2946 return 0;
2947 /* check len1 + 1(next length) are okay to read */
2948 if(d1+len1 >= sldns_buffer_at(pkt, sldns_buffer_limit(pkt)))
2949 return 0;
2950 log_assert(len1 <= LDNS_MAX_LABELLEN);
2951 log_assert(len2 <= LDNS_MAX_LABELLEN);
2952 log_assert(len1 == len2 && len1 != 0);
2953 /* compare the labels - bitwise identical */
2954 if(memcmp(d1, d2, len1) != 0)
2955 return 0;
2956 d1 += len1;
2957 d2 += len2;
2958 len1 = *d1++;
2959 len2 = *d2++;
2960 }
2961 return 1;
2962}
2963
2964/** call the callbacks for a serviced query */
2965static void
2966serviced_callbacks(struct serviced_query* sq, int error, struct comm_point* c,
2967 struct comm_reply* rep)
2968{
2969 struct service_callback* p;
2970 int dobackup = (sq->cblist && sq->cblist->next); /* >1 cb*/
2971 uint8_t *backup_p = NULL((void *)0);
2972 size_t backlen = 0;
2973#ifdef UNBOUND_DEBUG
2974 rbnode_type* rem =
2975#else
2976 (void)
2977#endif
2978 /* remove from tree, and schedule for deletion, so that callbacks
2979 * can safely deregister themselves and even create new serviced
2980 * queries that are identical to this one. */
2981 rbtree_delete(sq->outnet->serviced, sq);
2982 log_assert(rem); /* should have been present */
2983 sq->to_be_deleted = 1;
2984 verbose(VERB_ALGO, "svcd callbacks start");
2985 if(sq->outnet->use_caps_for_id && error == NETEVENT_NOERROR0 && c &&
2986 !sq->nocaps && sq->qtype != LDNS_RR_TYPE_PTR) {
2987 /* for type PTR do not check perturbed name in answer,
2988 * compatibility with cisco dns guard boxes that mess up
2989 * reverse queries 0x20 contents */
2990 /* noerror and nxdomain must have a qname in reply */
2991 if(sldns_buffer_read_u16_at(c->buffer, 4) == 0 &&
2992 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2993 == LDNS_RCODE_NOERROR ||
2994 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
2995 == LDNS_RCODE_NXDOMAIN)) {
2996 verbose(VERB_DETAIL, "no qname in reply to check 0x20ID");
2997 log_addr(VERB_DETAIL, "from server",
2998 &sq->addr, sq->addrlen);
2999 log_buf(VERB_DETAIL, "for packet", c->buffer);
3000 error = NETEVENT_CLOSED-1;
3001 c = NULL((void *)0);
3002 } else if(sldns_buffer_read_u16_at(c->buffer, 4) > 0 &&
3003 !serviced_check_qname(c->buffer, sq->qbuf,
3004 sq->qbuflen)) {
3005 verbose(VERB_DETAIL, "wrong 0x20-ID in reply qname");
3006 log_addr(VERB_DETAIL, "from server",
3007 &sq->addr, sq->addrlen);
3008 log_buf(VERB_DETAIL, "for packet", c->buffer);
3009 error = NETEVENT_CAPSFAIL-3;
3010 /* and cleanup too */
3011 pkt_dname_tolower(c->buffer,
3012 sldns_buffer_at(c->buffer, 12));
3013 } else {
3014 verbose(VERB_ALGO, "good 0x20-ID in reply qname");
3015 /* cleanup caps, prettier cache contents. */
3016 pkt_dname_tolower(c->buffer,
3017 sldns_buffer_at(c->buffer, 12));
3018 }
3019 }
3020 if(dobackup && c) {
3021 /* make a backup of the query, since the querystate processing
3022 * may send outgoing queries that overwrite the buffer.
3023 * use secondary buffer to store the query.
3024 * This is a data copy, but faster than packet to server */
3025 backlen = sldns_buffer_limit(c->buffer);
3026 backup_p = regional_alloc_init(sq->region,
3027 sldns_buffer_begin(c->buffer), backlen);
3028 if(!backup_p) {
3029 log_err("malloc failure in serviced query callbacks");
3030 error = NETEVENT_CLOSED-1;
3031 c = NULL((void *)0);
3032 }
3033 sq->outnet->svcd_overhead = backlen;
3034 }
3035 /* test the actual sq->cblist, because the next elem could be deleted*/
3036 while((p=sq->cblist) != NULL((void *)0)) {
3037 sq->cblist = p->next; /* remove this element */
3038 if(dobackup && c) {
3039 sldns_buffer_clear(c->buffer);
3040 sldns_buffer_write(c->buffer, backup_p, backlen);
3041 sldns_buffer_flip(c->buffer);
3042 }
3043 fptr_ok(fptr_whitelist_serviced_query(p->cb));
3044 (void)(*p->cb)(c, p->cb_arg, error, rep);
3045 }
3046 if(backup_p) {
3047 sq->outnet->svcd_overhead = 0;
3048 }
3049 verbose(VERB_ALGO, "svcd callbacks end");
3050 log_assert(sq->cblist == NULL);
3051 serviced_delete(sq);
3052}
3053
3054int
3055serviced_tcp_callback(struct comm_point* c, void* arg, int error,
3056 struct comm_reply* rep)
3057{
3058 struct serviced_query* sq = (struct serviced_query*)arg;
3059 struct comm_reply r2;
3060#ifdef USE_DNSTAP
3061 struct waiting_tcp* w = (struct waiting_tcp*)sq->pending;
3062 struct pending_tcp* pend_tcp = NULL((void *)0);
3063 struct port_if* pi = NULL((void *)0);
3064 if(w && !w->on_tcp_waiting_list && w->next_waiting) {
3065 pend_tcp = (struct pending_tcp*)w->next_waiting;
3066 pi = pend_tcp->pi;
3067 }
3068#endif
3069 sq->pending = NULL((void *)0); /* removed after this callback */
3070 if(error != NETEVENT_NOERROR0)
3071 log_addr(VERB_QUERY, "tcp error for address",
3072 &sq->addr, sq->addrlen);
3073 if(error==NETEVENT_NOERROR0)
3074 infra_update_tcp_works(sq->outnet->infra, &sq->addr,
3075 sq->addrlen, sq->zone, sq->zonelen);
3076#ifdef USE_DNSTAP
3077 /*
3078 * sending src (local service)/dst (upstream) addresses over DNSTAP
3079 */
3080 if(error==NETEVENT_NOERROR0 && pi && sq->outnet->dtenv &&
3081 (sq->outnet->dtenv->log_resolver_response_messages ||
3082 sq->outnet->dtenv->log_forwarder_response_messages)) {
3083 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3084 log_addr(VERB_ALGO, "to local addr", &pi->addr, pi->addrlen);
3085 dt_msg_send_outside_response(sq->outnet->dtenv, &sq->addr,
3086 &pi->addr, c->type, sq->zone, sq->zonelen, sq->qbuf,
3087 sq->qbuflen, &sq->last_sent_time, sq->outnet->now_tv,
3088 c->buffer);
3089 }
3090#endif
3091 if(error==NETEVENT_NOERROR0 && sq->status == serviced_query_TCP_EDNS &&
3092 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
3093 LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE(sldns_buffer_begin((*(sldns_buffer_begin( c->buffer)+3) & 0x0fU)
3094 c->buffer))(*(sldns_buffer_begin( c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOTIMPL) ) {
3095 /* attempt to fallback to nonEDNS */
3096 sq->status = serviced_query_TCP_EDNS_fallback;
3097 serviced_tcp_initiate(sq, c->buffer);
3098 return 0;
3099 } else if(error==NETEVENT_NOERROR0 &&
3100 sq->status == serviced_query_TCP_EDNS_fallback &&
3101 (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
3102 LDNS_RCODE_NOERROR || LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3103 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NXDOMAIN
3104 || LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3105 == LDNS_RCODE_YXDOMAIN)) {
3106 /* the fallback produced a result that looks promising, note
3107 * that this server should be approached without EDNS */
3108 /* only store noEDNS in cache if domain is noDNSSEC */
3109 if(!sq->want_dnssec)
3110 if(!infra_edns_update(sq->outnet->infra, &sq->addr,
3111 sq->addrlen, sq->zone, sq->zonelen, -1,
3112 *sq->outnet->now_secs))
3113 log_err("Out of memory caching no edns for host");
3114 sq->status = serviced_query_TCP;
3115 }
3116 if(sq->tcp_upstream || sq->ssl_upstream) {
3117 struct timeval now = *sq->outnet->now_tv;
3118 if(error!=NETEVENT_NOERROR0) {
3119 if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3120 sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3121 -1, sq->last_rtt, (time_t)now.tv_sec))
3122 log_err("out of memory in TCP exponential backoff.");
3123 } else if(now.tv_sec > sq->last_sent_time.tv_sec ||
3124 (now.tv_sec == sq->last_sent_time.tv_sec &&
3125 now.tv_usec > sq->last_sent_time.tv_usec)) {
3126 /* convert from microseconds to milliseconds */
3127 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3128 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3129 verbose(VERB_ALGO, "measured TCP-time at %d msec", roundtime);
3130 log_assert(roundtime >= 0);
3131 /* only store if less then AUTH_TIMEOUT seconds, it could be
3132 * huge due to system-hibernated and we woke up */
3133 if(roundtime < 60000) {
3134 if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
3135 sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
3136 roundtime, sq->last_rtt, (time_t)now.tv_sec))
3137 log_err("out of memory noting rtt.");
3138 }
3139 }
3140 }
3141 /* insert address into reply info */
3142 if(!rep) {
3143 /* create one if there isn't (on errors) */
3144 rep = &r2;
3145 r2.c = c;
3146 }
3147 memcpy(&rep->remote_addr, &sq->addr, sq->addrlen);
3148 rep->remote_addrlen = sq->addrlen;
3149 serviced_callbacks(sq, error, c, rep);
3150 return 0;
3151}
3152
3153static void
3154serviced_tcp_initiate(struct serviced_query* sq, sldns_buffer* buff)
3155{
3156 verbose(VERB_ALGO, "initiate TCP query %s",
3157 sq->status==serviced_query_TCP_EDNS?"EDNS":"");
3158 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3159 sq->last_sent_time = *sq->outnet->now_tv;
3160 log_assert(!sq->busy);
3161 sq->busy = 1;
3162 sq->pending = pending_tcp_query(sq, buff, sq->outnet->tcp_auth_query_timeout,
3163 serviced_tcp_callback, sq);
3164 sq->busy = 0;
3165 if(!sq->pending) {
3166 /* delete from tree so that a retry by above layer does not
3167 * clash with this entry */
3168 verbose(VERB_ALGO, "serviced_tcp_initiate: failed to send tcp query");
3169 serviced_callbacks(sq, NETEVENT_CLOSED-1, NULL((void *)0), NULL((void *)0));
3170 }
3171}
3172
3173/** Send serviced query over TCP return false on initial failure */
3174static int
3175serviced_tcp_send(struct serviced_query* sq, sldns_buffer* buff)
3176{
3177 int vs, rtt, timeout;
3178 uint8_t edns_lame_known;
3179 if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
3180 sq->zonelen, *sq->outnet->now_secs, &vs, &edns_lame_known,
3181 &rtt))
3182 return 0;
3183 sq->last_rtt = rtt;
3184 if(vs != -1)
3185 sq->status = serviced_query_TCP_EDNS;
3186 else sq->status = serviced_query_TCP;
3187 serviced_encode(sq, buff, sq->status == serviced_query_TCP_EDNS);
3188 sq->last_sent_time = *sq->outnet->now_tv;
3189 if(sq->tcp_upstream || sq->ssl_upstream) {
3190 timeout = rtt;
3191 if(rtt >= UNKNOWN_SERVER_NICENESS && rtt < sq->outnet->tcp_auth_query_timeout)
3192 timeout = sq->outnet->tcp_auth_query_timeout;
3193 } else {
3194 timeout = sq->outnet->tcp_auth_query_timeout;
3195 }
3196 log_assert(!sq->busy);
3197 sq->busy = 1;
3198 sq->pending = pending_tcp_query(sq, buff, timeout,
3199 serviced_tcp_callback, sq);
3200 sq->busy = 0;
3201 return sq->pending != NULL((void *)0);
3202}
3203
3204/* see if packet is edns malformed; got zeroes at start.
3205 * This is from servers that return malformed packets to EDNS0 queries,
3206 * but they return good packets for nonEDNS0 queries.
3207 * We try to detect their output; without resorting to a full parse or
3208 * check for too many bytes after the end of the packet. */
3209static int
3210packet_edns_malformed(struct sldns_buffer* buf, int qtype)
3211{
3212 size_t len;
3213 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE12)
3214 return 1; /* malformed */
3215 /* they have NOERROR rcode, 1 answer. */
3216 if(LDNS_RCODE_WIRE(sldns_buffer_begin(buf))(*(sldns_buffer_begin(buf)+3) & 0x0fU) != LDNS_RCODE_NOERROR)
3217 return 0;
3218 /* one query (to skip) and answer records */
3219 if(LDNS_QDCOUNT(sldns_buffer_begin(buf))(sldns_read_uint16(sldns_buffer_begin(buf)+4)) != 1 ||
3220 LDNS_ANCOUNT(sldns_buffer_begin(buf))(sldns_read_uint16(sldns_buffer_begin(buf)+6)) == 0)
3221 return 0;
3222 /* skip qname */
3223 len = dname_valid(sldns_buffer_at(buf, LDNS_HEADER_SIZE12),
3224 sldns_buffer_limit(buf)-LDNS_HEADER_SIZE12);
3225 if(len == 0)
3226 return 0;
3227 if(len == 1 && qtype == 0)
3228 return 0; /* we asked for '.' and type 0 */
3229 /* and then 4 bytes (type and class of query) */
3230 if(sldns_buffer_limit(buf) < LDNS_HEADER_SIZE12 + len + 4 + 3)
3231 return 0;
3232
3233 /* and start with 11 zeroes as the answer RR */
3234 /* so check the qtype of the answer record, qname=0, type=0 */
3235 if(sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[0] == 0 &&
3236 sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[1] == 0 &&
3237 sldns_buffer_at(buf, LDNS_HEADER_SIZE12+len+4)[2] == 0)
3238 return 1;
3239 return 0;
3240}
3241
3242int
3243serviced_udp_callback(struct comm_point* c, void* arg, int error,
3244 struct comm_reply* rep)
3245{
3246 struct serviced_query* sq = (struct serviced_query*)arg;
3247 struct outside_network* outnet = sq->outnet;
3248 struct timeval now = *sq->outnet->now_tv;
3249#ifdef USE_DNSTAP
3250 struct pending* p = (struct pending*)sq->pending;
3251#endif
3252
3253 sq->pending = NULL((void *)0); /* removed after callback */
3254 if(error == NETEVENT_TIMEOUT-2) {
3255 if(sq->status == serviced_query_UDP_EDNS && sq->last_rtt < 5000) {
3256 /* fallback to 1480/1280 */
3257 sq->status = serviced_query_UDP_EDNS_FRAG;
3258 log_name_addr(VERB_ALGO, "try edns1xx0", sq->qbuf+10,
3259 &sq->addr, sq->addrlen);
3260 if(!serviced_udp_send(sq, c->buffer)) {
3261 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3262 }
3263 return 0;
3264 }
3265 if(sq->status == serviced_query_UDP_EDNS_FRAG) {
3266 /* fragmentation size did not fix it */
3267 sq->status = serviced_query_UDP_EDNS;
3268 }
3269 sq->retry++;
3270 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3271 sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
3272 (time_t)now.tv_sec))
3273 log_err("out of memory in UDP exponential backoff");
3274 if(sq->retry < OUTBOUND_UDP_RETRY1) {
3275 log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
3276 &sq->addr, sq->addrlen);
3277 if(!serviced_udp_send(sq, c->buffer)) {
3278 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3279 }
3280 return 0;
3281 }
3282 }
3283 if(error != NETEVENT_NOERROR0) {
3284 /* udp returns error (due to no ID or interface available) */
3285 serviced_callbacks(sq, error, c, rep);
3286 return 0;
3287 }
3288#ifdef USE_DNSTAP
3289 /*
3290 * sending src (local service)/dst (upstream) addresses over DNSTAP
3291 */
3292 if(error == NETEVENT_NOERROR0 && outnet->dtenv && p->pc &&
3293 (outnet->dtenv->log_resolver_response_messages ||
3294 outnet->dtenv->log_forwarder_response_messages)) {
3295 log_addr(VERB_ALGO, "response from upstream", &sq->addr, sq->addrlen);
3296 log_addr(VERB_ALGO, "to local addr", &p->pc->pif->addr,
3297 p->pc->pif->addrlen);
3298 dt_msg_send_outside_response(outnet->dtenv, &sq->addr,
3299 &p->pc->pif->addr, c->type, sq->zone, sq->zonelen,
3300 sq->qbuf, sq->qbuflen, &sq->last_sent_time,
3301 sq->outnet->now_tv, c->buffer);
3302 }
3303#endif
3304 if( (sq->status == serviced_query_UDP_EDNS
3305 ||sq->status == serviced_query_UDP_EDNS_FRAG)
3306 && (LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3307 == LDNS_RCODE_FORMERR || LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3308 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOTIMPL
3309 || packet_edns_malformed(c->buffer, sq->qtype)
3310 )) {
3311 /* try to get an answer by falling back without EDNS */
3312 verbose(VERB_ALGO, "serviced query: attempt without EDNS");
3313 sq->status = serviced_query_UDP_EDNS_fallback;
3314 sq->retry = 0;
3315 if(!serviced_udp_send(sq, c->buffer)) {
3316 serviced_callbacks(sq, NETEVENT_CLOSED-1, c, rep);
3317 }
3318 return 0;
3319 } else if(sq->status == serviced_query_UDP_EDNS &&
3320 !sq->edns_lame_known) {
3321 /* now we know that edns queries received answers store that */
3322 log_addr(VERB_ALGO, "serviced query: EDNS works for",
3323 &sq->addr, sq->addrlen);
3324 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3325 sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
3326 log_err("Out of memory caching edns works");
3327 }
3328 sq->edns_lame_known = 1;
3329 } else if(sq->status == serviced_query_UDP_EDNS_fallback &&
3330 !sq->edns_lame_known && (LDNS_RCODE_WIRE((*(sldns_buffer_begin(c->buffer)+3) & 0x0fU)
3331 sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) == LDNS_RCODE_NOERROR ||
3332 LDNS_RCODE_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+3) & 0x0fU) ==
3333 LDNS_RCODE_NXDOMAIN || LDNS_RCODE_WIRE(sldns_buffer_begin((*(sldns_buffer_begin( c->buffer)+3) & 0x0fU)
3334 c->buffer))(*(sldns_buffer_begin( c->buffer)+3) & 0x0fU) == LDNS_RCODE_YXDOMAIN)) {
3335 /* the fallback produced a result that looks promising, note
3336 * that this server should be approached without EDNS */
3337 /* only store noEDNS in cache if domain is noDNSSEC */
3338 if(!sq->want_dnssec) {
3339 log_addr(VERB_ALGO, "serviced query: EDNS fails for",
3340 &sq->addr, sq->addrlen);
3341 if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
3342 sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
3343 log_err("Out of memory caching no edns for host");
3344 }
3345 } else {
3346 log_addr(VERB_ALGO, "serviced query: EDNS fails, but "
3347 "not stored because need DNSSEC for", &sq->addr,
3348 sq->addrlen);
3349 }
3350 sq->status = serviced_query_UDP;
3351 }
3352 if(now.tv_sec > sq->last_sent_time.tv_sec ||
3353 (now.tv_sec == sq->last_sent_time.tv_sec &&
3354 now.tv_usec > sq->last_sent_time.tv_usec)) {
3355 /* convert from microseconds to milliseconds */
3356 int roundtime = ((int)(now.tv_sec - sq->last_sent_time.tv_sec))*1000
3357 + ((int)now.tv_usec - (int)sq->last_sent_time.tv_usec)/1000;
3358 verbose(VERB_ALGO, "measured roundtrip at %d msec", roundtime);
3359 log_assert(roundtime >= 0);
3360 /* in case the system hibernated, do not enter a huge value,
3361 * above this value gives trouble with server selection */
3362 if(roundtime < 60000) {
3363 if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
3364 sq->zone, sq->zonelen, sq->qtype, roundtime,
3365 sq->last_rtt, (time_t)now.tv_sec))
3366 log_err("out of memory noting rtt.");
3367 }
3368 }
3369 /* perform TC flag check and TCP fallback after updating our
3370 * cache entries for EDNS status and RTT times */
3371 if(LDNS_TC_WIRE(sldns_buffer_begin(c->buffer))(*(sldns_buffer_begin(c->buffer)+2) & 0x02U)) {
3372 /* fallback to TCP */
3373 /* this discards partial UDP contents */
3374 if(sq->status == serviced_query_UDP_EDNS ||
3375 sq->status == serviced_query_UDP_EDNS_FRAG ||
3376 sq->status == serviced_query_UDP_EDNS_fallback)
3377 /* if we have unfinished EDNS_fallback, start again */
3378 sq->status = serviced_query_TCP_EDNS;
3379 else sq->status = serviced_query_TCP;
3380 serviced_tcp_initiate(sq, c->buffer);
3381 return 0;
3382 }
3383 /* yay! an answer */
3384 serviced_callbacks(sq, error, c, rep);
3385 return 0;
3386}
3387
3388struct serviced_query*
3389outnet_serviced_query(struct outside_network* outnet,
3390 struct query_info* qinfo, uint16_t flags, int dnssec, int want_dnssec,
3391 int nocaps, int check_ratelimit, int tcp_upstream, int ssl_upstream,
3392 char* tls_auth_name, struct sockaddr_storage* addr, socklen_t addrlen,
3393 uint8_t* zone, size_t zonelen, struct module_qstate* qstate,
3394 comm_point_callback_type* callback, void* callback_arg,
3395 sldns_buffer* buff, struct module_env* env, int* was_ratelimited)
3396{
3397 struct serviced_query* sq;
3398 struct service_callback* cb;
3399 struct edns_string_addr* client_string_addr;
3400 struct regional* region;
3401 struct edns_option* backed_up_opt_list = qstate->edns_opts_back_out;
3402 struct edns_option* per_upstream_opt_list = NULL((void *)0);
3403 time_t timenow = 0;
3404
3405 /* If we have an already populated EDNS option list make a copy since
3406 * we may now add upstream specific EDNS options. */
3407 /* Use a region that could be attached to a serviced_query, if it needs
3408 * to be created. If an existing one is found then this region will be
3409 * destroyed here. */
3410 region = alloc_reg_obtain(env->alloc);
3411 if(!region) return NULL((void *)0);
3412 if(qstate->edns_opts_back_out) {
3413 per_upstream_opt_list = edns_opt_copy_region(
3414 qstate->edns_opts_back_out, region);
3415 if(!per_upstream_opt_list) {
3416 alloc_reg_release(env->alloc, region);
3417 return NULL((void *)0);
3418 }
3419 qstate->edns_opts_back_out = per_upstream_opt_list;
3420 }
3421
3422 if(!inplace_cb_query_call(env, qinfo, flags, addr, addrlen, zone,
3423 zonelen, qstate, region)) {
3424 alloc_reg_release(env->alloc, region);
3425 return NULL((void *)0);
3426 }
3427 /* Restore the option list; we can explicitly use the copied one from
3428 * now on. */
3429 per_upstream_opt_list = qstate->edns_opts_back_out;
3430 qstate->edns_opts_back_out = backed_up_opt_list;
3431
3432 if((client_string_addr = edns_string_addr_lookup(
3433 &env->edns_strings->client_strings, addr, addrlen))) {
3434 edns_opt_list_append(&per_upstream_opt_list,
3435 env->edns_strings->client_string_opcode,
3436 client_string_addr->string_len,
3437 client_string_addr->string, region);
3438 }
3439
3440 serviced_gen_query(buff, qinfo->qname, qinfo->qname_len, qinfo->qtype,
3441 qinfo->qclass, flags);
3442 sq = lookup_serviced(outnet, buff, dnssec, addr, addrlen,
3443 per_upstream_opt_list);
3444 if(!sq) {
3445 /* Check ratelimit only for new serviced_query */
3446 if(check_ratelimit) {
3447 timenow = *env->now;
3448 if(!infra_ratelimit_inc(env->infra_cache, zone,
3449 zonelen, timenow, env->cfg->ratelimit_backoff,
3450 &qstate->qinfo, qstate->reply)) {
3451 /* Can we pass through with slip factor? */
3452 if(env->cfg->ratelimit_factor == 0 ||
3453 ub_random_max(env->rnd,
3454 env->cfg->ratelimit_factor) != 1) {
3455 *was_ratelimited = 1;
3456 alloc_reg_release(env->alloc, region);
3457 return NULL((void *)0);
3458 }
3459 log_nametypeclass(VERB_ALGO,
3460 "ratelimit allowed through for "
3461 "delegation point", zone,
3462 LDNS_RR_TYPE_NS, LDNS_RR_CLASS_IN);
3463 }
3464 }
3465 /* make new serviced query entry */
3466 sq = serviced_create(outnet, buff, dnssec, want_dnssec, nocaps,
3467 tcp_upstream, ssl_upstream, tls_auth_name, addr,
3468 addrlen, zone, zonelen, (int)qinfo->qtype,
3469 per_upstream_opt_list,
3470 ( ssl_upstream && env->cfg->pad_queries
3471 ? env->cfg->pad_queries_block_size : 0 ),
3472 env->alloc, region);
3473 if(!sq) {
3474 if(check_ratelimit) {
3475 infra_ratelimit_dec(env->infra_cache,
3476 zone, zonelen, timenow);
3477 }
3478 return NULL((void *)0);
3479 }
3480 if(!(cb = (struct service_callback*)regional_alloc(
3481 sq->region, sizeof(*cb)))) {
3482 if(check_ratelimit) {
3483 infra_ratelimit_dec(env->infra_cache,
3484 zone, zonelen, timenow);
3485 }
3486 (void)rbtree_delete(outnet->serviced, sq);
3487 serviced_node_del(&sq->node, NULL((void *)0));
3488 return NULL((void *)0);
3489 }
3490 /* No network action at this point; it will be invoked with the
3491 * serviced_query timer instead to run outside of the mesh. */
3492 } else {
3493 /* We don't need this region anymore. */
3494 alloc_reg_release(env->alloc, region);
3495 /* duplicate entries are included in the callback list, because
3496 * there is a counterpart registration by our caller that needs
3497 * to be doubly-removed (with callbacks perhaps). */
3498 if(!(cb = (struct service_callback*)regional_alloc(
3499 sq->region, sizeof(*cb)))) {
3500 return NULL((void *)0);
3501 }
3502 }
3503 /* add callback to list of callbacks */
3504 cb->cb = callback;
3505 cb->cb_arg = callback_arg;
3506 cb->next = sq->cblist;
3507 sq->cblist = cb;
3508 return sq;
3509}
3510
3511/** remove callback from list */
3512static void
3513callback_list_remove(struct serviced_query* sq, void* cb_arg)
3514{
3515 struct service_callback** pp = &sq->cblist;
3516 while(*pp) {
3517 if((*pp)->cb_arg == cb_arg) {
3518 struct service_callback* del = *pp;
3519 *pp = del->next;
3520 return;
3521 }
3522 pp = &(*pp)->next;
3523 }
3524}
3525
3526void outnet_serviced_query_stop(struct serviced_query* sq, void* cb_arg)
3527{
3528 if(!sq)
3529 return;
3530 callback_list_remove(sq, cb_arg);
3531 /* if callbacks() routine scheduled deletion, let it do that */
3532 if(!sq->cblist && !sq->busy && !sq->to_be_deleted) {
3533 (void)rbtree_delete(sq->outnet->serviced, sq);
3534 serviced_delete(sq);
3535 }
3536}
3537
3538/** create fd to send to this destination */
3539static int
3540fd_for_dest(struct outside_network* outnet, struct sockaddr_storage* to_addr,
3541 socklen_t to_addrlen)
3542{
3543 struct sockaddr_storage* addr;
3544 socklen_t addrlen;
3545 int i, try, pnum, dscp;
3546 struct port_if* pif;
3547
3548 /* create fd */
3549 dscp = outnet->ip_dscp;
3550 for(try = 0; try<1000; try++) {
3551 int port = 0;
3552 int freebind = 0;
3553 int noproto = 0;
3554 int inuse = 0;
3555 int fd = -1;
3556
3557 /* select interface */
3558 if(addr_is_ip6(to_addr, to_addrlen)) {
3559 if(outnet->num_ip6 == 0) {
3560 char to[64];
3561 addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3562 verbose(VERB_QUERY, "need ipv6 to send, but no ipv6 outgoing interfaces, for %s", to);
3563 return -1;
3564 }
3565 i = ub_random_max(outnet->rnd, outnet->num_ip6);
3566 pif = &outnet->ip6_ifs[i];
3567 } else {
3568 if(outnet->num_ip4 == 0) {
3569 char to[64];
3570 addr_to_str(to_addr, to_addrlen, to, sizeof(to));
3571 verbose(VERB_QUERY, "need ipv4 to send, but no ipv4 outgoing interfaces, for %s", to);
3572 return -1;
3573 }
3574 i = ub_random_max(outnet->rnd, outnet->num_ip4);
3575 pif = &outnet->ip4_ifs[i];
3576 }
3577 addr = &pif->addr;
3578 addrlen = pif->addrlen;
3579#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
3580 pnum = ub_random_max(outnet->rnd, pif->avail_total);
3581 if(pnum < pif->inuse) {
3582 /* port already open */
3583 port = pif->out[pnum]->number;
3584 } else {
3585 /* unused ports in start part of array */
3586 port = pif->avail_ports[pnum - pif->inuse];
3587 }
3588#else
3589 pnum = port = 0;
3590#endif
3591 if(addr_is_ip6(to_addr, to_addrlen)) {
3592 struct sockaddr_in6 sa = *(struct sockaddr_in6*)addr;
3593 sa.sin6_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
3594 fd = create_udp_sock(AF_INET624, SOCK_DGRAM2,
3595 (struct sockaddr*)&sa, addrlen, 1, &inuse, &noproto,
3596 0, 0, 0, NULL((void *)0), 0, freebind, 0, dscp);
3597 } else {
3598 struct sockaddr_in* sa = (struct sockaddr_in*)addr;
3599 sa->sin_port = (in_port_t)htons((uint16_t)port)(__uint16_t)(__builtin_constant_p((uint16_t)port) ? (__uint16_t
)(((__uint16_t)((uint16_t)port) & 0xffU) << 8 | ((__uint16_t
)((uint16_t)port) & 0xff00U) >> 8) : __swap16md((uint16_t
)port))
;
3600 fd = create_udp_sock(AF_INET2, SOCK_DGRAM2,
3601 (struct sockaddr*)addr, addrlen, 1, &inuse, &noproto,
3602 0, 0, 0, NULL((void *)0), 0, freebind, 0, dscp);
3603 }
3604 if(fd != -1) {
3605 return fd;
3606 }
3607 if(!inuse) {
3608 return -1;
3609 }
3610 }
3611 /* too many tries */
3612 log_err("cannot send probe, ports are in use");
3613 return -1;
3614}
3615
3616struct comm_point*
3617outnet_comm_point_for_udp(struct outside_network* outnet,
3618 comm_point_callback_type* cb, void* cb_arg,
3619 struct sockaddr_storage* to_addr, socklen_t to_addrlen)
3620{
3621 struct comm_point* cp;
3622 int fd = fd_for_dest(outnet, to_addr, to_addrlen);
3623 if(fd == -1) {
3624 return NULL((void *)0);
3625 }
3626 cp = comm_point_create_udp(outnet->base, fd, outnet->udp_buff, 0,
3627 cb, cb_arg, NULL((void *)0));
3628 if(!cp) {
3629 log_err("malloc failure");
3630 close(fd);
3631 return NULL((void *)0);
3632 }
3633 return cp;
3634}
3635
3636/** setup SSL for comm point */
3637static int
3638setup_comm_ssl(struct comm_point* cp, struct outside_network* outnet,
3639 int fd, char* host)
3640{
3641 cp->ssl = outgoing_ssl_fd(outnet->sslctx, fd);
3642 if(!cp->ssl) {
3643 log_err("cannot create SSL object");
3644 return 0;
3645 }
3646#ifdef USE_WINSOCK
3647 comm_point_tcp_win_bio_cb(cp, cp->ssl);
3648#endif
3649 cp->ssl_shake_state = comm_ssl_shake_write;
3650 /* https verification */
3651#ifdef HAVE_SSL
3652 if(outnet->tls_use_sni) {
3653 (void)SSL_set_tlsext_host_name(cp->ssl, host)SSL_ctrl(cp->ssl,55,0,(char *)host);
3654 }
3655#endif
3656#ifdef HAVE_SSL_SET1_HOST1
3657 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER0x01)) {
3658 /* because we set SSL_VERIFY_PEER, in netevent in
3659 * ssl_handshake, it'll check if the certificate
3660 * verification has succeeded */
3661 /* SSL_VERIFY_PEER is set on the sslctx */
3662 /* and the certificates to verify with are loaded into
3663 * it with SSL_load_verify_locations or
3664 * SSL_CTX_set_default_verify_paths */
3665 /* setting the hostname makes openssl verify the
3666 * host name in the x509 certificate in the
3667 * SSL connection*/
3668 if(!SSL_set1_host(cp->ssl, host)) {
3669 log_err("SSL_set1_host failed");
3670 return 0;
3671 }
3672 }
3673#elif defined(HAVE_X509_VERIFY_PARAM_SET1_HOST1)
3674 /* openssl 1.0.2 has this function that can be used for
3675 * set1_host like verification */
3676 if((SSL_CTX_get_verify_mode(outnet->sslctx)&SSL_VERIFY_PEER0x01)) {
3677 X509_VERIFY_PARAM* param = SSL_get0_param(cp->ssl);
3678# ifdef X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS0x4
3679 X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS0x4);
3680# endif
3681 if(!X509_VERIFY_PARAM_set1_host(param, host, strlen(host))) {
3682 log_err("X509_VERIFY_PARAM_set1_host failed");
3683 return 0;
3684 }
3685 }
3686#else
3687 (void)host;
3688#endif /* HAVE_SSL_SET1_HOST */
3689 return 1;
3690}
3691
3692struct comm_point*
3693outnet_comm_point_for_tcp(struct outside_network* outnet,
3694 comm_point_callback_type* cb, void* cb_arg,
3695 struct sockaddr_storage* to_addr, socklen_t to_addrlen,
3696 sldns_buffer* query, int timeout, int ssl, char* host)
3697{
3698 struct comm_point* cp;
3699 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3700 if(fd == -1) {
3701 return 0;
3702 }
3703 fd_set_nonblock(fd);
3704 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3705 /* outnet_tcp_connect has closed fd on error for us */
3706 return 0;
3707 }
3708 cp = comm_point_create_tcp_out(outnet->base, 65552, cb, cb_arg);
3709 if(!cp) {
3710 log_err("malloc failure");
3711 close(fd);
3712 return 0;
3713 }
3714 cp->repinfo.remote_addrlen = to_addrlen;
3715 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3716
3717 /* setup for SSL (if needed) */
3718 if(ssl) {
3719 if(!setup_comm_ssl(cp, outnet, fd, host)) {
3720 log_err("cannot setup XoT");
3721 comm_point_delete(cp);
3722 return NULL((void *)0);
3723 }
3724 }
3725
3726 /* set timeout on TCP connection */
3727 comm_point_start_listening(cp, fd, timeout);
3728 /* copy scratch buffer to cp->buffer */
3729 sldns_buffer_copy(cp->buffer, query);
3730 return cp;
3731}
3732
3733/** setup the User-Agent HTTP header based on http-user-agent configuration */
3734static void
3735setup_http_user_agent(sldns_buffer* buf, struct config_file* cfg)
3736{
3737 if(cfg->hide_http_user_agent) return;
3738 if(cfg->http_user_agent==NULL((void *)0) || cfg->http_user_agent[0] == 0) {
3739 sldns_buffer_printf(buf, "User-Agent: %s/%s\r\n", PACKAGE_NAME"unbound",
3740 PACKAGE_VERSION"1.18.0");
3741 } else {
3742 sldns_buffer_printf(buf, "User-Agent: %s\r\n", cfg->http_user_agent);
3743 }
3744}
3745
3746/** setup http request headers in buffer for sending query to destination */
3747static int
3748setup_http_request(sldns_buffer* buf, char* host, char* path,
3749 struct config_file* cfg)
3750{
3751 sldns_buffer_clear(buf);
3752 sldns_buffer_printf(buf, "GET /%s HTTP/1.1\r\n", path);
3753 sldns_buffer_printf(buf, "Host: %s\r\n", host);
3754 setup_http_user_agent(buf, cfg);
3755 /* We do not really do multiple queries per connection,
3756 * but this header setting is also not needed.
3757 * sldns_buffer_printf(buf, "Connection: close\r\n") */
3758 sldns_buffer_printf(buf, "\r\n");
3759 if(sldns_buffer_position(buf)+10 > sldns_buffer_capacity(buf))
3760 return 0; /* somehow buffer too short, but it is about 60K
3761 and the request is only a couple bytes long. */
3762 sldns_buffer_flip(buf);
3763 return 1;
3764}
3765
3766struct comm_point*
3767outnet_comm_point_for_http(struct outside_network* outnet,
3768 comm_point_callback_type* cb, void* cb_arg,
3769 struct sockaddr_storage* to_addr, socklen_t to_addrlen, int timeout,
3770 int ssl, char* host, char* path, struct config_file* cfg)
3771{
3772 /* cp calls cb with err=NETEVENT_DONE when transfer is done */
3773 struct comm_point* cp;
3774 int fd = outnet_get_tcp_fd(to_addr, to_addrlen, outnet->tcp_mss, outnet->ip_dscp);
3775 if(fd == -1) {
3776 return 0;
3777 }
3778 fd_set_nonblock(fd);
3779 if(!outnet_tcp_connect(fd, to_addr, to_addrlen)) {
3780 /* outnet_tcp_connect has closed fd on error for us */
3781 return 0;
3782 }
3783 cp = comm_point_create_http_out(outnet->base, 65552, cb, cb_arg,
3784 outnet->udp_buff);
3785 if(!cp) {
3786 log_err("malloc failure");
3787 close(fd);
3788 return 0;
3789 }
3790 cp->repinfo.remote_addrlen = to_addrlen;
3791 memcpy(&cp->repinfo.remote_addr, to_addr, to_addrlen);
3792
3793 /* setup for SSL (if needed) */
3794 if(ssl) {
3795 if(!setup_comm_ssl(cp, outnet, fd, host)) {
3796 log_err("cannot setup https");
3797 comm_point_delete(cp);
3798 return NULL((void *)0);
3799 }
3800 }
3801
3802 /* set timeout on TCP connection */
3803 comm_point_start_listening(cp, fd, timeout);
3804
3805 /* setup http request in cp->buffer */
3806 if(!setup_http_request(cp->buffer, host, path, cfg)) {
3807 log_err("error setting up http request");
3808 comm_point_delete(cp);
3809 return NULL((void *)0);
3810 }
3811 return cp;
3812}
3813
3814/** get memory used by waiting tcp entry (in use or not) */
3815static size_t
3816waiting_tcp_get_mem(struct waiting_tcp* w)
3817{
3818 size_t s;
3819 if(!w) return 0;
3820 s = sizeof(*w) + w->pkt_len;
3821 if(w->timer)
3822 s += comm_timer_get_mem(w->timer);
3823 return s;
3824}
3825
3826/** get memory used by port if */
3827static size_t
3828if_get_mem(struct port_if* pif)
3829{
3830 size_t s;
3831 int i;
3832 s = sizeof(*pif) +
3833#ifndef DISABLE_EXPLICIT_PORT_RANDOMISATION1
3834 sizeof(int)*pif->avail_total +
3835#endif
3836 sizeof(struct port_comm*)*pif->maxout;
3837 for(i=0; i<pif->inuse; i++)
3838 s += sizeof(*pif->out[i]) +
3839 comm_point_get_mem(pif->out[i]->cp);
3840 return s;
3841}
3842
3843/** get memory used by waiting udp */
3844static size_t
3845waiting_udp_get_mem(struct pending* w)
3846{
3847 size_t s;
3848 s = sizeof(*w) + comm_timer_get_mem(w->timer) + w->pkt_len;
3849 return s;
3850}
3851
3852size_t outnet_get_mem(struct outside_network* outnet)
3853{
3854 size_t i;
3855 int k;
3856 struct waiting_tcp* w;
3857 struct pending* u;
3858 struct serviced_query* sq;
3859 struct service_callback* sb;
3860 struct port_comm* pc;
3861 size_t s = sizeof(*outnet) + sizeof(*outnet->base) +
3862 sizeof(*outnet->udp_buff) +
3863 sldns_buffer_capacity(outnet->udp_buff);
3864 /* second buffer is not ours */
3865 for(pc = outnet->unused_fds; pc; pc = pc->next) {
3866 s += sizeof(*pc) + comm_point_get_mem(pc->cp);
3867 }
3868 for(k=0; k<outnet->num_ip4; k++)
3869 s += if_get_mem(&outnet->ip4_ifs[k]);
3870 for(k=0; k<outnet->num_ip6; k++)
3871 s += if_get_mem(&outnet->ip6_ifs[k]);
3872 for(u=outnet->udp_wait_first; u; u=u->next_waiting)
3873 s += waiting_udp_get_mem(u);
3874
3875 s += sizeof(struct pending_tcp*)*outnet->num_tcp;
3876 for(i=0; i<outnet->num_tcp; i++) {
3877 s += sizeof(struct pending_tcp);
3878 s += comm_point_get_mem(outnet->tcp_conns[i]->c);
3879 if(outnet->tcp_conns[i]->query)
3880 s += waiting_tcp_get_mem(outnet->tcp_conns[i]->query);
3881 }
3882 for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
3883 s += waiting_tcp_get_mem(w);
3884 s += sizeof(*outnet->pending);
3885 s += (sizeof(struct pending) + comm_timer_get_mem(NULL((void *)0))) *
3886 outnet->pending->count;
3887 s += sizeof(*outnet->serviced);
3888 s += outnet->svcd_overhead;
3889 RBTREE_FOR(sq, struct serviced_query*, outnet->serviced)for(sq=(struct serviced_query*)rbtree_first(outnet->serviced
); (rbnode_type*)sq != &rbtree_null_node; sq = (struct serviced_query
*)rbtree_next((rbnode_type*)sq))
{
3890 s += sizeof(*sq) + sq->qbuflen;
3891 for(sb = sq->cblist; sb; sb = sb->next)
3892 s += sizeof(*sb);
3893 }
3894 return s;
3895}
3896
3897size_t
3898serviced_get_mem(struct serviced_query* sq)
3899{
3900 struct service_callback* sb;
3901 size_t s;
3902 s = sizeof(*sq) + sq->qbuflen;
3903 for(sb = sq->cblist; sb; sb = sb->next)
3904 s += sizeof(*sb);
3905 if(sq->status == serviced_query_UDP_EDNS ||
3906 sq->status == serviced_query_UDP ||
3907 sq->status == serviced_query_UDP_EDNS_FRAG ||
3908 sq->status == serviced_query_UDP_EDNS_fallback) {
3909 s += sizeof(struct pending);
3910 s += comm_timer_get_mem(NULL((void *)0));
3911 } else {
3912 /* does not have size of the pkt pointer */
3913 /* always has a timer except on malloc failures */
3914
3915 /* these sizes are part of the main outside network mem */
3916 /*
3917 s += sizeof(struct waiting_tcp);
3918 s += comm_timer_get_mem(NULL);
3919 */
3920 }
3921 return s;
3922}
3923