Bug Summary

File:src/usr.sbin/ntpd/ntp.c
Warning:line 234, column 25
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ntp.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/ntpd/obj -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/usr.sbin/ntpd -internal-isystem /usr/local/llvm16/lib/clang/16/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/ntpd/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fno-jump-tables -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/scan/2024-01-11-140451-98009-1 -x c /usr/src/usr.sbin/ntpd/ntp.c
1/* $OpenBSD: ntp.c,v 1.172 2023/12/20 15:36:36 otto Exp $ */
2
3/*
4 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5 * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/types.h>
21#include <sys/time.h>
22#include <sys/stat.h>
23#include <errno(*__errno()).h>
24#include <fcntl.h>
25#include <paths.h>
26#include <poll.h>
27#include <pwd.h>
28#include <signal.h>
29#include <stdlib.h>
30#include <string.h>
31#include <syslog.h>
32#include <time.h>
33#include <unistd.h>
34#include <err.h>
35
36#include "ntpd.h"
37
38#define PFD_PIPE_MAIN0 0
39#define PFD_PIPE_DNS1 1
40#define PFD_SOCK_CTL2 2
41#define PFD_MAX3 3
42
43volatile sig_atomic_t ntp_quit = 0;
44struct imsgbuf *ibuf_main;
45static struct imsgbuf *ibuf_dns;
46struct ntpd_conf *conf;
47struct ctl_conns ctl_conns;
48u_int peer_cnt;
49u_int sensors_cnt;
50extern u_int constraint_cnt;
51
52void ntp_sighdlr(int);
53int ntp_dispatch_imsg(void);
54int ntp_dispatch_imsg_dns(void);
55void peer_add(struct ntp_peer *);
56void peer_remove(struct ntp_peer *);
57int inpool(struct sockaddr_storage *,
58 struct sockaddr_storage[MAX_SERVERS_DNS8], size_t);
59
60void
61ntp_sighdlr(int sig)
62{
63 switch (sig) {
64 case SIGINT2:
65 case SIGTERM15:
66 ntp_quit = 1;
67 break;
68 }
69}
70
71void
72ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
73{
74 int a, b, nfds, i, j, idx_peers, timeout;
75 int nullfd, pipe_dns[2], idx_clients;
76 int ctls;
77 int fd_ctl;
78 int clear_cdns;
79 u_int pfd_elms = 0, idx2peer_elms = 0;
80 u_int listener_cnt, new_cnt, sent_cnt, trial_cnt;
81 u_int ctl_cnt;
82 struct pollfd *pfd = NULL((void *)0);
1
'pfd' initialized to a null pointer value
83 struct servent *se;
84 struct listen_addr *la;
85 struct ntp_peer *p;
86 struct ntp_peer **idx2peer = NULL((void *)0);
87 struct ntp_sensor *s, *next_s;
88 struct constraint *cstr;
89 struct timespec tp;
90 struct stat stb;
91 struct ctl_conn *cc;
92 time_t nextaction, last_sensor_scan = 0, now;
93 time_t last_action = 0, interval, last_cdns_reset = 0;
94 void *newp;
95
96 if (socketpair(AF_UNIX1, SOCK_STREAM1 | SOCK_CLOEXEC0x8000, PF_UNSPEC0,
2
Assuming the condition is false
3
Taking false branch
97 pipe_dns) == -1)
98 fatal("socketpair");
99
100 start_child(NTPDNS_PROC_NAME"ntp_dns", pipe_dns[1], argc, argv);
101
102 log_init(nconf->debug ? LOG_TO_STDERR(1<<0) : LOG_TO_SYSLOG(1<<1), nconf->verbose,
4
Assuming field 'debug' is not equal to 0
5
'?' condition is true
103 LOG_DAEMON(3<<3));
104 if (!nconf->debug
5.1
Field 'debug' is not equal to 0
&& setsid() == -1)
105 fatal("setsid");
106 log_procinit("ntp");
107
108 if ((se = getservbyname("ntp", "udp")) == NULL((void *)0))
6
Assuming the condition is false
7
Taking false branch
109 fatal("getservbyname");
110
111 /* Start control socket. */
112 if ((fd_ctl = control_init(CTLSOCKET"/var/run/ntpd.sock")) == -1)
8
Assuming the condition is false
9
Taking false branch
113 fatalx("control socket init failed");
114 if (control_listen(fd_ctl) == -1)
10
Assuming the condition is false
11
Taking false branch
115 fatalx("control socket listen failed");
116 if ((nullfd = open("/dev/null", O_RDWR0x0002)) == -1)
12
Assuming the condition is false
13
Taking false branch
117 fatal(NULL((void *)0));
118
119 if (stat(pw->pw_dir, &stb) == -1) {
14
Assuming the condition is false
120 fatal("privsep dir %s could not be opened", pw->pw_dir);
121 }
122 if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP0000020|S_IWOTH0000002)) != 0) {
15
Assuming field 'st_uid' is equal to 0
16
Assuming the condition is false
17
Taking false branch
123 fatalx("bad privsep dir %s permissions: %o",
124 pw->pw_dir, stb.st_mode);
125 }
126 if (chroot(pw->pw_dir) == -1)
18
Assuming the condition is false
19
Taking false branch
127 fatal("chroot");
128 if (chdir("/") == -1)
20
Assuming the condition is false
21
Taking false branch
129 fatal("chdir(\"/\")");
130
131 if (!nconf->debug
21.1
Field 'debug' is not equal to 0
) {
22
Taking false branch
132 dup2(nullfd, STDIN_FILENO0);
133 dup2(nullfd, STDOUT_FILENO1);
134 dup2(nullfd, STDERR_FILENO2);
135 }
136 close(nullfd);
137
138 setproctitle("ntp engine");
139
140 conf = nconf;
141 setup_listeners(se, conf, &listener_cnt);
142
143 if (setgroups(1, &pw->pw_gid) ||
23
Assuming the condition is false
26
Taking false branch
144 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
24
Assuming the condition is false
145 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
25
Assuming the condition is false
146 fatal("can't drop privileges");
147
148 endservent();
149
150 /* The ntp process will want to open NTP client sockets -> "inet" */
151 if (pledge("stdio inet", NULL((void *)0)) == -1)
27
Assuming the condition is false
28
Taking false branch
152 err(1, "pledge");
153
154 signal(SIGTERM15, ntp_sighdlr);
155 signal(SIGINT2, ntp_sighdlr);
156 signal(SIGPIPE13, SIG_IGN(void (*)(int))1);
157 signal(SIGHUP1, SIG_IGN(void (*)(int))1);
158 signal(SIGCHLD20, SIG_DFL(void (*)(int))0);
159
160 if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL((void *)0))
29
Assuming the condition is false
30
Taking false branch
161 fatal(NULL((void *)0));
162 imsg_init(ibuf_main, PARENT_SOCK_FILENO(2 + 1));
163 if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL((void *)0))
31
Assuming the condition is false
32
Taking false branch
164 fatal(NULL((void *)0));
165 imsg_init(ibuf_dns, pipe_dns[0]);
166
167 constraint_cnt = 0;
168 conf->constraint_median = 0;
169 conf->constraint_last = getmonotime();
170 TAILQ_FOREACH(cstr, &conf->constraints, entry)for((cstr) = ((&conf->constraints)->tqh_first); (cstr
) != ((void *)0); (cstr) = ((cstr)->entry.tqe_next))
33
Assuming 'cstr' is equal to null
34
Loop condition is false. Execution continues on line 173
171 constraint_cnt += constraint_init(cstr);
172
173 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
35
Assuming 'p' is equal to null
36
Loop condition is false. Execution continues on line 176
174 client_peer_init(p);
175
176 memset(&conf->status, 0, sizeof(conf->status));
177
178 conf->freq.num = 0;
179 conf->freq.samples = 0;
180 conf->freq.x = 0.0;
181 conf->freq.xx = 0.0;
182 conf->freq.xy = 0.0;
183 conf->freq.y = 0.0;
184 conf->freq.overall_offset = 0.0;
185
186 conf->status.synced = 0;
187 clock_getres(CLOCK_REALTIME0, &tp);
188 b = 1000000000 / tp.tv_nsec; /* convert to Hz */
189 for (a = 0; b > 1; a--, b >>= 1)
37
Assuming 'b' is <= 1
38
Loop condition is false. Execution continues on line 191
190 ;
191 conf->status.precision = a;
192 conf->scale = 1;
193
194 TAILQ_INIT(&ctl_conns)do { (&ctl_conns)->tqh_first = ((void *)0); (&ctl_conns
)->tqh_last = &(&ctl_conns)->tqh_first; } while
(0)
;
39
Loop condition is false. Exiting loop
195 sensor_init();
196
197 log_info("ntp engine ready");
198
199 ctl_cnt = 0;
200 peer_cnt = 0;
201 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
40
Assuming 'p' is equal to null
41
Loop condition is false. Execution continues on line 204
202 peer_cnt++;
203
204 while (ntp_quit == 0) {
42
Assuming 'ntp_quit' is equal to 0
43
Loop condition is true. Entering loop body
205 if (peer_cnt
43.1
'peer_cnt' is <= 'idx2peer_elms'
> idx2peer_elms) {
44
Taking false branch
206 if ((newp = reallocarray(idx2peer, peer_cnt,
207 sizeof(*idx2peer))) == NULL((void *)0)) {
208 /* panic for now */
209 log_warn("could not resize idx2peer from %u -> "
210 "%u entries", idx2peer_elms, peer_cnt);
211 fatalx("exiting");
212 }
213 idx2peer = newp;
214 idx2peer_elms = peer_cnt;
215 }
216
217 new_cnt = PFD_MAX3 +
218 peer_cnt + listener_cnt + ctl_cnt;
219 if (new_cnt > pfd_elms) {
45
Assuming 'new_cnt' is <= 'pfd_elms'
46
Taking false branch
220 if ((newp = reallocarray(pfd, new_cnt,
221 sizeof(*pfd))) == NULL((void *)0)) {
222 /* panic for now */
223 log_warn("could not resize pfd from %u -> "
224 "%u entries", pfd_elms, new_cnt);
225 fatalx("exiting");
226 }
227 pfd = newp;
228 pfd_elms = new_cnt;
229 }
230
231 memset(pfd, 0, sizeof(*pfd) * pfd_elms);
232 memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
233 nextaction = getmonotime() + 900;
234 pfd[PFD_PIPE_MAIN0].fd = ibuf_main->fd;
47
Dereference of null pointer
235 pfd[PFD_PIPE_MAIN0].events = POLLIN0x0001;
236 pfd[PFD_PIPE_DNS1].fd = ibuf_dns->fd;
237 pfd[PFD_PIPE_DNS1].events = POLLIN0x0001;
238 pfd[PFD_SOCK_CTL2].fd = fd_ctl;
239 pfd[PFD_SOCK_CTL2].events = POLLIN0x0001;
240
241 i = PFD_MAX3;
242 TAILQ_FOREACH(la, &conf->listen_addrs, entry)for((la) = ((&conf->listen_addrs)->tqh_first); (la)
!= ((void *)0); (la) = ((la)->entry.tqe_next))
{
243 pfd[i].fd = la->fd;
244 pfd[i].events = POLLIN0x0001;
245 i++;
246 }
247
248 idx_peers = i;
249 sent_cnt = trial_cnt = 0;
250 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
{
251 if (!p->trusted && constraint_cnt &&
252 conf->constraint_median == 0)
253 continue;
254
255 if (p->next > 0 && p->next <= getmonotime()) {
256 if (p->state > STATE_DNS_INPROGRESS)
257 trial_cnt++;
258 if (client_query(p) == 0)
259 sent_cnt++;
260 }
261 if (p->deadline > 0 && p->deadline <= getmonotime()) {
262 timeout = 300;
263 log_debug("no reply from %s received in time, "
264 "next query %ds", log_ntp_addr( p->addr),
265 timeout);
266 if (p->trustlevel >= TRUSTLEVEL_BADPEER6 &&
267 (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER6)
268 log_info("peer %s now invalid",
269 log_ntp_addr(p->addr));
270 if (client_nextaddr(p) == 1) {
271 peer_addr_head_clear(p);
272 client_nextaddr(p);
273 }
274 set_next(p, timeout);
275 }
276 if (p->senderrors > MAX_SEND_ERRORS3) {
277 log_debug("failed to send query to %s, "
278 "next query %ds", log_ntp_addr(p->addr),
279 INTERVAL_QUERY_PATHETIC60);
280 p->senderrors = 0;
281 if (client_nextaddr(p) == 1) {
282 peer_addr_head_clear(p);
283 client_nextaddr(p);
284 }
285 set_next(p, INTERVAL_QUERY_PATHETIC60);
286 }
287 if (p->next > 0 && p->next < nextaction)
288 nextaction = p->next;
289 if (p->deadline > 0 && p->deadline < nextaction)
290 nextaction = p->deadline;
291
292 if (p->state == STATE_QUERY_SENT &&
293 p->query.fd != -1) {
294 pfd[i].fd = p->query.fd;
295 pfd[i].events = POLLIN0x0001;
296 idx2peer[i - idx_peers] = p;
297 i++;
298 }
299 }
300 idx_clients = i;
301
302 if (!TAILQ_EMPTY(&conf->ntp_conf_sensors)(((&conf->ntp_conf_sensors)->tqh_first) == ((void *
)0))
&&
303 (conf->trusted_sensors || constraint_cnt == 0 ||
304 conf->constraint_median != 0)) {
305 if (last_sensor_scan == 0 ||
306 last_sensor_scan + SENSOR_SCAN_INTERVAL(1*60) <= getmonotime()) {
307 sensors_cnt = sensor_scan();
308 last_sensor_scan = getmonotime();
309 }
310 if (sensors_cnt == 0 &&
311 nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL(1*60))
312 nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL(1*60);
313 sensors_cnt = 0;
314 TAILQ_FOREACH(s, &conf->ntp_sensors, entry)for((s) = ((&conf->ntp_sensors)->tqh_first); (s) !=
((void *)0); (s) = ((s)->entry.tqe_next))
{
315 if (conf->settime && s->offsets[0].offset)
316 priv_settime(s->offsets[0].offset, NULL((void *)0));
317 sensors_cnt++;
318 if (s->next > 0 && s->next < nextaction)
319 nextaction = s->next;
320 }
321 }
322
323 if (conf->settime &&
324 ((trial_cnt > 0 && sent_cnt == 0) ||
325 (peer_cnt == 0 && sensors_cnt == 0)))
326 priv_settime(0, "no valid peers configured");
327
328 clear_cdns = 1;
329 TAILQ_FOREACH(cstr, &conf->constraints, entry)for((cstr) = ((&conf->constraints)->tqh_first); (cstr
) != ((void *)0); (cstr) = ((cstr)->entry.tqe_next))
{
330 constraint_query(cstr, conf->status.synced);
331 if (cstr->state <= STATE_QUERY_SENT)
332 clear_cdns = 0;
333 }
334
335 if (ibuf_main->w.queued > 0)
336 pfd[PFD_PIPE_MAIN0].events |= POLLOUT0x0004;
337 if (ibuf_dns->w.queued > 0)
338 pfd[PFD_PIPE_DNS1].events |= POLLOUT0x0004;
339
340 TAILQ_FOREACH(cc, &ctl_conns, entry)for((cc) = ((&ctl_conns)->tqh_first); (cc) != ((void *
)0); (cc) = ((cc)->entry.tqe_next))
{
341 pfd[i].fd = cc->ibuf.fd;
342 pfd[i].events = POLLIN0x0001;
343 if (cc->ibuf.w.queued > 0)
344 pfd[i].events |= POLLOUT0x0004;
345 i++;
346 }
347 ctls = i;
348
349 now = getmonotime();
350 if (conf->constraint_median == 0 && clear_cdns &&
351 now - last_cdns_reset > CONSTRAINT_SCAN_INTERVAL(15*60)) {
352 log_debug("Reset constraint info");
353 constraint_reset();
354 last_cdns_reset = now;
355 nextaction = now + CONSTRAINT_RETRY_INTERVAL(15);
356 }
357 timeout = nextaction - now;
358 if (timeout < 0)
359 timeout = 0;
360
361 if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1)
362 if (errno(*__errno()) != EINTR4) {
363 log_warn("poll error");
364 ntp_quit = 1;
365 }
366
367 if (nfds > 0 && (pfd[PFD_PIPE_MAIN0].revents & POLLOUT0x0004))
368 if (msgbuf_write(&ibuf_main->w) <= 0 &&
369 errno(*__errno()) != EAGAIN35) {
370 log_warn("pipe write error (to parent)");
371 ntp_quit = 1;
372 }
373
374 if (nfds > 0 && pfd[PFD_PIPE_MAIN0].revents & (POLLIN0x0001|POLLERR0x0008)) {
375 nfds--;
376 if (ntp_dispatch_imsg() == -1) {
377 log_debug("pipe read error (from main)");
378 ntp_quit = 1;
379 }
380 }
381
382 if (nfds > 0 && (pfd[PFD_PIPE_DNS1].revents & POLLOUT0x0004))
383 if (msgbuf_write(&ibuf_dns->w) <= 0 &&
384 errno(*__errno()) != EAGAIN35) {
385 log_warn("pipe write error (to dns engine)");
386 ntp_quit = 1;
387 }
388
389 if (nfds > 0 && pfd[PFD_PIPE_DNS1].revents & (POLLIN0x0001|POLLERR0x0008)) {
390 nfds--;
391 if (ntp_dispatch_imsg_dns() == -1) {
392 log_warn("pipe read error (from dns engine)");
393 ntp_quit = 1;
394 }
395 }
396
397 if (nfds > 0 && pfd[PFD_SOCK_CTL2].revents & (POLLIN0x0001|POLLERR0x0008)) {
398 nfds--;
399 ctl_cnt += control_accept(fd_ctl);
400 }
401
402 for (j = PFD_MAX3; nfds > 0 && j < idx_peers; j++)
403 if (pfd[j].revents & (POLLIN0x0001|POLLERR0x0008)) {
404 nfds--;
405 if (server_dispatch(pfd[j].fd, conf) == -1) {
406 log_warn("pipe write error (conf)");
407 ntp_quit = 1;
408 }
409 }
410
411 for (; nfds > 0 && j < idx_clients; j++) {
412 if (pfd[j].revents & (POLLIN0x0001|POLLERR0x0008)) {
413 struct ntp_peer *pp = idx2peer[j - idx_peers];
414
415 nfds--;
416 switch (client_dispatch(pp, conf->settime,
417 conf->automatic)) {
418 case -1:
419 log_debug("no reply from %s "
420 "received", log_ntp_addr(pp->addr));
421 if (pp->trustlevel >=
422 TRUSTLEVEL_BADPEER6 &&
423 (pp->trustlevel /= 2) <
424 TRUSTLEVEL_BADPEER6)
425 log_info("peer %s now invalid",
426 log_ntp_addr(pp->addr));
427 break;
428 case 0: /* invalid replies are ignored */
429 break;
430 case 1:
431 last_action = now;
432 break;
433 }
434 }
435 }
436
437 for (; nfds > 0 && j < ctls; j++) {
438 nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
439 }
440
441 for (s = TAILQ_FIRST(&conf->ntp_sensors)((&conf->ntp_sensors)->tqh_first); s != NULL((void *)0);
442 s = next_s) {
443 next_s = TAILQ_NEXT(s, entry)((s)->entry.tqe_next);
444 if (s->next <= now) {
445 last_action = now;
446 sensor_query(s);
447 }
448 }
449
450 /*
451 * Compute maximum of scale_interval(INTERVAL_QUERY_NORMAL),
452 * if we did not process a time message for three times that
453 * interval, stop advertising we're synced.
454 */
455 interval = INTERVAL_QUERY_NORMAL30 * conf->scale;
456 interval += SCALE_INTERVAL(interval)((5) > ((interval) / 10) ? (5) : ((interval) / 10)) - 1;
457 if (conf->status.synced && last_action + 3 * interval < now) {
458 log_info("clock is now unsynced due to lack of replies");
459 conf->status.synced = 0;
460 conf->scale = 1;
461 priv_dns(IMSG_UNSYNCED, NULL((void *)0), 0);
462 }
463 }
464
465 msgbuf_write(&ibuf_main->w);
466 msgbuf_clear(&ibuf_main->w);
467 free(ibuf_main);
468 msgbuf_write(&ibuf_dns->w);
469 msgbuf_clear(&ibuf_dns->w);
470 free(ibuf_dns);
471
472 log_info("ntp engine exiting");
473 exit(0);
474}
475
476int
477ntp_dispatch_imsg(void)
478{
479 struct imsg imsg;
480 int n;
481
482 if (((n = imsg_read(ibuf_main)) == -1 && errno(*__errno()) != EAGAIN35) || n == 0)
483 return (-1);
484
485 for (;;) {
486 if ((n = imsg_get(ibuf_main, &imsg)) == -1)
487 return (-1);
488
489 if (n == 0)
490 break;
491
492 switch (imsg.hdr.type) {
493 case IMSG_ADJTIME:
494 memcpy(&n, imsg.data, sizeof(n));
495 if (n == 1 && !conf->status.synced) {
496 log_info("clock is now synced");
497 conf->status.synced = 1;
498 priv_dns(IMSG_SYNCED, NULL((void *)0), 0);
499 constraint_reset();
500 } else if (n == 0 && conf->status.synced) {
501 log_info("clock is now unsynced");
502 conf->status.synced = 0;
503 priv_dns(IMSG_UNSYNCED, NULL((void *)0), 0);
504 }
505 break;
506 case IMSG_CONSTRAINT_RESULT:
507 constraint_msg_result(imsg.hdr.peerid,
508 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr));
509 break;
510 case IMSG_CONSTRAINT_CLOSE:
511 constraint_msg_close(imsg.hdr.peerid,
512 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr));
513 break;
514 default:
515 break;
516 }
517 imsg_free(&imsg);
518 }
519 return (0);
520}
521
522int
523inpool(struct sockaddr_storage *a,
524 struct sockaddr_storage old[MAX_SERVERS_DNS8], size_t n)
525{
526 size_t i;
527
528 for (i = 0; i < n; i++) {
529 if (a->ss_family != old[i].ss_family)
530 continue;
531 if (a->ss_family == AF_INET2) {
532 if (((struct sockaddr_in *)a)->sin_addr.s_addr ==
533 ((struct sockaddr_in *)&old[i])->sin_addr.s_addr)
534 return 1;
535 } else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
536 &((struct sockaddr_in6 *)&old[i])->sin6_addr,
537 sizeof(struct sockaddr_in6)) == 0) {
538 return 1;
539 }
540 }
541 return 0;
542}
543
544int
545ntp_dispatch_imsg_dns(void)
546{
547 struct imsg imsg;
548 struct sockaddr_storage existing[MAX_SERVERS_DNS8];
549 struct ntp_peer *peer, *npeer, *tmp;
550 u_int16_t dlen;
551 u_char *p;
552 struct ntp_addr *h;
553 size_t addrcount, peercount;
554 int n;
555
556 if (((n = imsg_read(ibuf_dns)) == -1 && errno(*__errno()) != EAGAIN35) || n == 0)
557 return (-1);
558
559 for (;;) {
560 if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
561 return (-1);
562
563 if (n == 0)
564 break;
565
566 switch (imsg.hdr.type) {
567 case IMSG_HOST_DNS:
568 TAILQ_FOREACH(peer, &conf->ntp_peers, entry)for((peer) = ((&conf->ntp_peers)->tqh_first); (peer
) != ((void *)0); (peer) = ((peer)->entry.tqe_next))
569 if (peer->id == imsg.hdr.peerid)
570 break;
571 if (peer == NULL((void *)0)) {
572 log_warnx("IMSG_HOST_DNS with invalid peerID");
573 break;
574 }
575 if (peer->addr != NULL((void *)0)) {
576 log_warnx("IMSG_HOST_DNS but addr != NULL!");
577 break;
578 }
579
580 if (peer->addr_head.pool) {
581 n = 0;
582 peercount = 0;
583
584 TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,for ((npeer) = ((&conf->ntp_peers)->tqh_first); (npeer
) != ((void *)0) && ((tmp) = ((npeer)->entry.tqe_next
), 1); (npeer) = (tmp))
585 entry, tmp)for ((npeer) = ((&conf->ntp_peers)->tqh_first); (npeer
) != ((void *)0) && ((tmp) = ((npeer)->entry.tqe_next
), 1); (npeer) = (tmp))
{
586 if (npeer->addr_head.pool !=
587 peer->addr_head.pool)
588 continue;
589 peercount++;
590 if (npeer->id == peer->id)
591 continue;
592 if (npeer->addr != NULL((void *)0))
593 existing[n++] = npeer->addr->ss;
594 }
595 }
596
597 dlen = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
598 if (dlen == 0) { /* no data -> temp error */
599 log_warnx("DNS lookup tempfail");
600 peer->state = STATE_DNS_TEMPFAIL;
601 if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL4)
602 priv_settime(0, "of dns failures");
603 break;
604 }
605
606 p = (u_char *)imsg.data;
607 addrcount = dlen / (sizeof(struct sockaddr_storage) +
608 sizeof(int));
609
610 while (dlen >= sizeof(struct sockaddr_storage) +
611 sizeof(int)) {
612 if ((h = calloc(1, sizeof(struct ntp_addr))) ==
613 NULL((void *)0))
614 fatal(NULL((void *)0));
615 memcpy(&h->ss, p, sizeof(h->ss));
616 p += sizeof(h->ss);
617 dlen -= sizeof(h->ss);
618 memcpy(&h->notauth, p, sizeof(int));
619 p += sizeof(int);
620 dlen -= sizeof(int);
621 if (peer->addr_head.pool) {
622 if (peercount > addrcount) {
623 free(h);
624 continue;
625 }
626 if (inpool(&h->ss, existing,
627 n)) {
628 free(h);
629 continue;
630 }
631 log_debug("Adding address %s to %s",
632 log_ntp_addr(h), peer->addr_head.name);
633 npeer = new_peer();
634 npeer->weight = peer->weight;
635 npeer->query_addr4 = peer->query_addr4;
636 npeer->query_addr6 = peer->query_addr6;
637 h->next = NULL((void *)0);
638 npeer->addr = h;
639 npeer->addr_head.a = h;
640 npeer->addr_head.name =
641 peer->addr_head.name;
642 npeer->addr_head.pool =
643 peer->addr_head.pool;
644 client_peer_init(npeer);
645 npeer->state = STATE_DNS_DONE;
646 peer_add(npeer);
647 peercount++;
648 } else {
649 h->next = peer->addr;
650 peer->addr = h;
651 peer->addr_head.a = peer->addr;
652 peer->state = STATE_DNS_DONE;
653 }
654 }
655 if (dlen != 0)
656 fatalx("IMSG_HOST_DNS: dlen != 0");
657 if (peer->addr_head.pool)
658 peer_remove(peer);
659 else
660 client_addr_init(peer);
661 break;
662 case IMSG_CONSTRAINT_DNS:
663 constraint_msg_dns(imsg.hdr.peerid,
664 imsg.data, imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr));
665 break;
666 case IMSG_PROBE_ROOT:
667 dlen = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
668 if (dlen != sizeof(int))
669 fatalx("IMSG_PROBE_ROOT");
670 memcpy(&n, imsg.data, sizeof(int));
671 if (n < 0)
672 priv_settime(0, "dns probe failed");
673 break;
674 default:
675 break;
676 }
677 imsg_free(&imsg);
678 }
679 return (0);
680}
681
682void
683peer_add(struct ntp_peer *p)
684{
685 TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry)do { (p)->entry.tqe_next = ((void *)0); (p)->entry.tqe_prev
= (&conf->ntp_peers)->tqh_last; *(&conf->ntp_peers
)->tqh_last = (p); (&conf->ntp_peers)->tqh_last =
&(p)->entry.tqe_next; } while (0)
;
686 peer_cnt++;
687}
688
689void
690peer_remove(struct ntp_peer *p)
691{
692 TAILQ_REMOVE(&conf->ntp_peers, p, entry)do { if (((p)->entry.tqe_next) != ((void *)0)) (p)->entry
.tqe_next->entry.tqe_prev = (p)->entry.tqe_prev; else (
&conf->ntp_peers)->tqh_last = (p)->entry.tqe_prev
; *(p)->entry.tqe_prev = (p)->entry.tqe_next; ; ; } while
(0)
;
693 free(p);
694 peer_cnt--;
695}
696
697void
698peer_addr_head_clear(struct ntp_peer *p)
699{
700 host_dns_free(p->addr_head.a);
701 p->addr_head.a = NULL((void *)0);
702 p->addr = NULL((void *)0);
703}
704
705static void
706priv_adjfreq(double offset)
707{
708 double curtime, freq;
709
710 if (!conf->status.synced){
711 conf->freq.samples = 0;
712 return;
713 }
714
715 conf->freq.samples++;
716
717 if (conf->freq.samples <= 0)
718 return;
719
720 conf->freq.overall_offset += offset;
721 offset = conf->freq.overall_offset;
722
723 curtime = gettime_corrected();
724 conf->freq.xy += offset * curtime;
725 conf->freq.x += curtime;
726 conf->freq.y += offset;
727 conf->freq.xx += curtime * curtime;
728
729 if (conf->freq.samples % FREQUENCY_SAMPLES8 != 0)
730 return;
731
732 freq =
733 (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
734 /
735 (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
736
737 if (freq > MAX_FREQUENCY_ADJUST128e-5)
738 freq = MAX_FREQUENCY_ADJUST128e-5;
739 else if (freq < -MAX_FREQUENCY_ADJUST128e-5)
740 freq = -MAX_FREQUENCY_ADJUST128e-5;
741
742 imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
743 conf->filters |= FILTER_ADJFREQ0x01;
744 conf->freq.xy = 0.0;
745 conf->freq.x = 0.0;
746 conf->freq.y = 0.0;
747 conf->freq.xx = 0.0;
748 conf->freq.samples = 0;
749 conf->freq.overall_offset = 0.0;
750 conf->freq.num++;
751}
752
753int
754priv_adjtime(void)
755{
756 struct ntp_peer *p;
757 struct ntp_sensor *s;
758 int offset_cnt = 0, i = 0, j;
759 struct ntp_offset **offsets;
760 double offset_median;
761
762 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
{
763 if (p->trustlevel < TRUSTLEVEL_BADPEER6)
764 continue;
765 if (!p->update.good)
766 return (1);
767 offset_cnt += p->weight;
768 }
769
770 TAILQ_FOREACH(s, &conf->ntp_sensors, entry)for((s) = ((&conf->ntp_sensors)->tqh_first); (s) !=
((void *)0); (s) = ((s)->entry.tqe_next))
{
771 if (!s->update.good)
772 continue;
773 offset_cnt += s->weight;
774 }
775
776 if (offset_cnt == 0)
777 return (1);
778
779 if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL((void *)0))
780 fatal("calloc priv_adjtime");
781
782 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
{
783 if (p->trustlevel < TRUSTLEVEL_BADPEER6)
784 continue;
785 for (j = 0; j < p->weight; j++)
786 offsets[i++] = &p->update;
787 }
788
789 TAILQ_FOREACH(s, &conf->ntp_sensors, entry)for((s) = ((&conf->ntp_sensors)->tqh_first); (s) !=
((void *)0); (s) = ((s)->entry.tqe_next))
{
790 if (!s->update.good)
791 continue;
792 for (j = 0; j < s->weight; j++)
793 offsets[i++] = &s->update;
794 }
795
796 qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
797
798 i = offset_cnt / 2;
799 if (offset_cnt % 2 == 0)
800 if (offsets[i - 1]->delay < offsets[i]->delay)
801 i -= 1;
802 offset_median = offsets[i]->offset;
803 conf->status.rootdelay = offsets[i]->delay;
804 conf->status.stratum = offsets[i]->status.stratum;
805 conf->status.leap = offsets[i]->status.leap;
806
807 imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
808 &offset_median, sizeof(offset_median));
809
810 priv_adjfreq(offset_median);
811
812 conf->status.reftime = gettime();
813 conf->status.stratum++; /* one more than selected peer */
814 if (conf->status.stratum > NTP_MAXSTRATUM15)
815 conf->status.stratum = NTP_MAXSTRATUM15;
816 update_scale(offset_median);
817
818 conf->status.refid = offsets[i]->status.send_refid;
819
820 free(offsets);
821
822 TAILQ_FOREACH(p, &conf->ntp_peers, entry)for((p) = ((&conf->ntp_peers)->tqh_first); (p) != (
(void *)0); (p) = ((p)->entry.tqe_next))
{
823 for (i = 0; i < OFFSET_ARRAY_SIZE8; i++)
824 p->reply[i].offset -= offset_median;
825 p->update.good = 0;
826 }
827 TAILQ_FOREACH(s, &conf->ntp_sensors, entry)for((s) = ((&conf->ntp_sensors)->tqh_first); (s) !=
((void *)0); (s) = ((s)->entry.tqe_next))
{
828 for (i = 0; i < SENSOR_OFFSETS6; i++)
829 s->offsets[i].offset -= offset_median;
830 s->update.offset -= offset_median;
831 }
832
833 return (0);
834}
835
836int
837offset_compare(const void *aa, const void *bb)
838{
839 const struct ntp_offset * const *a;
840 const struct ntp_offset * const *b;
841
842 a = aa;
843 b = bb;
844
845 if ((*a)->offset < (*b)->offset)
846 return (-1);
847 else if ((*a)->offset > (*b)->offset)
848 return (1);
849 else
850 return (0);
851}
852
853void
854priv_settime(double offset, char *msg)
855{
856 if (offset == 0)
857 log_info("cancel settime because %s", msg);
858 imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
859 &offset, sizeof(offset));
860 conf->settime = 0;
861}
862
863void
864priv_dns(int cmd, char *name, u_int32_t peerid)
865{
866 u_int16_t dlen = 0;
867
868 if (name != NULL((void *)0))
869 dlen = strlen(name) + 1;
870 imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
871}
872
873void
874update_scale(double offset)
875{
876 offset += getoffset();
877 if (offset < 0)
878 offset = -offset;
879
880 if (offset > QSCALE_OFF_MAX0.050 || !conf->status.synced ||
881 conf->freq.num < 3)
882 conf->scale = 1;
883 else if (offset < QSCALE_OFF_MIN0.001)
884 conf->scale = QSCALE_OFF_MAX0.050 / QSCALE_OFF_MIN0.001;
885 else
886 conf->scale = QSCALE_OFF_MAX0.050 / offset;
887}
888
889time_t
890scale_interval(time_t requested)
891{
892 time_t interval, r;
893
894 interval = requested * conf->scale;
895 r = arc4random_uniform(SCALE_INTERVAL(interval)((5) > ((interval) / 10) ? (5) : ((interval) / 10)));
896 return (interval + r);
897}
898
899time_t
900error_interval(void)
901{
902 time_t interval, r;
903
904 interval = INTERVAL_QUERY_PATHETIC60 * QSCALE_OFF_MAX0.050 / QSCALE_OFF_MIN0.001;
905 r = arc4random_uniform(interval / 10);
906 return (interval + r);
907}