Bug Summary

File:src/usr.sbin/ospf6d/rde.c
Warning:line 1275, column 3
Value stored to 'type' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name rde.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/ospf6d/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/usr.sbin/ospf6d -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/ospf6d/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/ospf6d/rde.c
1/* $OpenBSD: rde.c,v 1.89 2021/01/19 09:54:08 claudio Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <net/if_types.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <err.h>
28#include <errno(*__errno()).h>
29#include <stdlib.h>
30#include <signal.h>
31#include <string.h>
32#include <pwd.h>
33#include <unistd.h>
34#include <event.h>
35
36#include "ospf6.h"
37#include "ospf6d.h"
38#include "ospfe.h"
39#include "log.h"
40#include "rde.h"
41
42#define MINIMUM(a, b)(((a) < (b)) ? (a) : (b)) (((a) < (b)) ? (a) : (b))
43
44void rde_sig_handler(int sig, short, void *);
45__dead__attribute__((__noreturn__)) void rde_shutdown(void);
46void rde_dispatch_imsg(int, short, void *);
47void rde_dispatch_parent(int, short, void *);
48void rde_dump_area(struct area *, int, pid_t);
49
50void rde_send_summary(pid_t);
51void rde_send_summary_area(struct area *, pid_t);
52void rde_nbr_init(u_int32_t);
53void rde_nbr_free(void);
54struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *);
55void rde_nbr_del(struct rde_nbr *);
56
57void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60void rde_req_list_free(struct rde_nbr *);
61
62struct iface *rde_asext_lookup(struct in6_addr, int);
63void rde_asext_get(struct kroute *);
64void rde_asext_put(struct kroute *);
65
66int comp_asext(struct lsa *, struct lsa *);
67struct lsa *orig_asext_lsa(struct kroute *, u_int16_t);
68struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
69struct lsa *orig_intra_lsa_net(struct area *, struct iface *,
70 struct vertex *);
71struct lsa *orig_intra_lsa_rtr(struct area *, struct vertex *);
72void append_prefix_lsa(struct lsa **, u_int16_t *,
73 struct lsa_prefix *);
74
75/* A 32-bit value != any ifindex.
76 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
77#define LS_ID_INTRA_RTR0x01000000 0x01000000
78
79/* Tree of prefixes with global scope on given a link,
80 * see orig_intra_lsa_*() */
81struct prefix_node {
82 RB_ENTRY(prefix_node)struct { struct prefix_node *rbe_left; struct prefix_node *rbe_right
; struct prefix_node *rbe_parent; int rbe_color; }
entry;
83 struct lsa_prefix *prefix;
84};
85RB_HEAD(prefix_tree, prefix_node)struct prefix_tree { struct prefix_node *rbh_root; };
86RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare)void prefix_tree_RB_INSERT_COLOR(struct prefix_tree *, struct
prefix_node *); void prefix_tree_RB_REMOVE_COLOR(struct prefix_tree
*, struct prefix_node *, struct prefix_node *); struct prefix_node
*prefix_tree_RB_REMOVE(struct prefix_tree *, struct prefix_node
*); struct prefix_node *prefix_tree_RB_INSERT(struct prefix_tree
*, struct prefix_node *); struct prefix_node *prefix_tree_RB_FIND
(struct prefix_tree *, struct prefix_node *); struct prefix_node
*prefix_tree_RB_NFIND(struct prefix_tree *, struct prefix_node
*); struct prefix_node *prefix_tree_RB_NEXT(struct prefix_node
*); struct prefix_node *prefix_tree_RB_PREV(struct prefix_node
*); struct prefix_node *prefix_tree_RB_MINMAX(struct prefix_tree
*, int);
;
87int prefix_compare(struct prefix_node *, struct prefix_node *);
88void prefix_tree_add(struct prefix_tree *, struct lsa_link *);
89
90struct ospfd_conf *rdeconf = NULL((void*)0), *nconf = NULL((void*)0);
91static struct imsgev *iev_ospfe;
92static struct imsgev *iev_main;
93struct rde_nbr *nbrself;
94struct lsa_tree asext_tree;
95
96/* ARGSUSED */
97void
98rde_sig_handler(int sig, short event, void *arg)
99{
100 /*
101 * signal handler rules don't apply, libevent decouples for us
102 */
103
104 switch (sig) {
105 case SIGINT2:
106 case SIGTERM15:
107 rde_shutdown();
108 /* NOTREACHED */
109 default:
110 fatalx("unexpected signal");
111 }
112}
113
114/* route decision engine */
115pid_t
116rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
117 int pipe_parent2ospfe[2])
118{
119 struct event ev_sigint, ev_sigterm;
120 struct timeval now;
121 struct passwd *pw;
122 pid_t pid;
123
124 switch (pid = fork()) {
125 case -1:
126 fatal("cannot fork");
127 /* NOTREACHED */
128 case 0:
129 break;
130 default:
131 return (pid);
132 }
133
134 rdeconf = xconf;
135
136 if ((pw = getpwnam(OSPF6D_USER"_ospf6d")) == NULL((void*)0))
137 fatal("getpwnam");
138
139 if (chroot(pw->pw_dir) == -1)
140 fatal("chroot");
141 if (chdir("/") == -1)
142 fatal("chdir(\"/\")");
143
144 setproctitle("route decision engine");
145 /*
146 * XXX needed with fork+exec
147 * log_init(debug, LOG_DAEMON);
148 * log_setverbose(verbose);
149 */
150
151 ospfd_process = PROC_RDE_ENGINE;
152 log_procinit(log_procnames[ospfd_process]);
153
154 if (setgroups(1, &pw->pw_gid) ||
155 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
156 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
157 fatal("can't drop privileges");
158
159 if (pledge("stdio", NULL((void*)0)) == -1)
160 fatal("pledge");
161
162 event_init();
163 rde_nbr_init(NBR_HASHSIZE128);
164 lsa_init(&asext_tree);
165
166 /* setup signal handler */
167 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL)event_set(&ev_sigint, 2, 0x08|0x10, rde_sig_handler, ((void
*)0))
;
168 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL)event_set(&ev_sigterm, 15, 0x08|0x10, rde_sig_handler, ((
void*)0))
;
169 signal_add(&ev_sigint, NULL)event_add(&ev_sigint, ((void*)0));
170 signal_add(&ev_sigterm, NULL)event_add(&ev_sigterm, ((void*)0));
171 signal(SIGPIPE13, SIG_IGN(void (*)(int))1);
172 signal(SIGHUP1, SIG_IGN(void (*)(int))1);
173
174 /* setup pipes */
175 close(pipe_ospfe2rde[0]);
176 close(pipe_parent2rde[0]);
177 close(pipe_parent2ospfe[0]);
178 close(pipe_parent2ospfe[1]);
179
180 if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL((void*)0) ||
181 (iev_main = malloc(sizeof(struct imsgev))) == NULL((void*)0))
182 fatal(NULL((void*)0));
183 imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
184 iev_ospfe->handler = rde_dispatch_imsg;
185 imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
186 iev_main->handler = rde_dispatch_parent;
187
188 /* setup event handler */
189 iev_ospfe->events = EV_READ0x02;
190 event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
191 iev_ospfe->handler, iev_ospfe);
192 event_add(&iev_ospfe->ev, NULL((void*)0));
193
194 iev_main->events = EV_READ0x02;
195 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
196 iev_main->handler, iev_main);
197 event_add(&iev_main->ev, NULL((void*)0));
198
199 evtimer_set(&rdeconf->ev, spf_timer, rdeconf)event_set(&rdeconf->ev, -1, 0, spf_timer, rdeconf);
200 cand_list_init();
201 rt_init();
202
203 /* remove unneeded stuff from config */
204 conf_clear_redist_list(&rdeconf->redist_list);
205
206 gettimeofday(&now, NULL((void*)0));
207 rdeconf->uptime = now.tv_sec;
208
209 event_dispatch();
210
211 rde_shutdown();
212 /* NOTREACHED */
213
214 return (0);
215}
216
217__dead__attribute__((__noreturn__)) void
218rde_shutdown(void)
219{
220 struct area *a;
221 struct vertex *v, *nv;
222
223 /* close pipes */
224 msgbuf_clear(&iev_ospfe->ibuf.w);
225 close(iev_ospfe->ibuf.fd);
226 msgbuf_clear(&iev_main->ibuf.w);
227 close(iev_main->ibuf.fd);
228
229 stop_spf_timer(rdeconf);
230 cand_list_clr();
231 rt_clear();
232
233 while ((a = LIST_FIRST(&rdeconf->area_list)((&rdeconf->area_list)->lh_first)) != NULL((void*)0)) {
234 LIST_REMOVE(a, entry)do { if ((a)->entry.le_next != ((void*)0)) (a)->entry.le_next
->entry.le_prev = (a)->entry.le_prev; *(a)->entry.le_prev
= (a)->entry.le_next; ; ; } while (0)
;
235 area_del(a);
236 }
237 for (v = RB_MIN(lsa_tree, &asext_tree)lsa_tree_RB_MINMAX(&asext_tree, -1); v != NULL((void*)0); v = nv) {
238 nv = RB_NEXT(lsa_tree, &asext_tree, v)lsa_tree_RB_NEXT(v);
239 vertex_free(v);
240 }
241 rde_nbr_free();
242
243 free(iev_ospfe);
244 free(iev_main);
245 free(rdeconf);
246
247 log_info("route decision engine exiting");
248 _exit(0);
249}
250
251int
252rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
253 u_int16_t datalen)
254{
255 return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
256 data, datalen));
257}
258
259/* ARGSUSED */
260void
261rde_dispatch_imsg(int fd, short event, void *bula)
262{
263 struct imsgev *iev = bula;
264 struct imsgbuf *ibuf = &iev->ibuf;
265 struct imsg imsg;
266 struct in_addr aid;
267 struct ls_req_hdr req_hdr;
268 struct lsa_hdr lsa_hdr, *db_hdr;
269 struct rde_nbr rn, *nbr;
270 struct timespec tp;
271 struct lsa *lsa;
272 struct area *area;
273 struct vertex *v;
274 char *buf;
275 ssize_t n;
276 time_t now;
277 int r, state, self, shut = 0, verbose;
278 u_int16_t l;
279
280 if (event & EV_READ0x02) {
281 if ((n = imsg_read(ibuf)) == -1 && errno(*__errno()) != EAGAIN35)
282 fatal("imsg_read error");
283 if (n == 0) /* connection closed */
284 shut = 1;
285 }
286 if (event & EV_WRITE0x04) {
287 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno(*__errno()) != EAGAIN35)
288 fatal("msgbuf_write");
289 if (n == 0) /* connection closed */
290 shut = 1;
291 }
292
293 clock_gettime(CLOCK_MONOTONIC3, &tp);
294 now = tp.tv_sec;
295
296 for (;;) {
297 if ((n = imsg_get(ibuf, &imsg)) == -1)
298 fatal("rde_dispatch_imsg: imsg_get error");
299 if (n == 0)
300 break;
301
302 switch (imsg.hdr.type) {
303 case IMSG_NEIGHBOR_UP:
304 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(rn))
305 fatalx("invalid size of OE request");
306 memcpy(&rn, imsg.data, sizeof(rn));
307
308 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL((void*)0))
309 fatalx("rde_dispatch_imsg: "
310 "neighbor already exists");
311 break;
312 case IMSG_NEIGHBOR_DOWN:
313 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
314 break;
315 case IMSG_NEIGHBOR_CHANGE:
316 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(state))
317 fatalx("invalid size of OE request");
318 memcpy(&state, imsg.data, sizeof(state));
319
320 nbr = rde_nbr_find(imsg.hdr.peerid);
321 if (nbr == NULL((void*)0))
322 break;
323
324 if (state != nbr->state &&
325 (nbr->state & NBR_STA_FULL0x0100 ||
326 state & NBR_STA_FULL0x0100)) {
327 nbr->state = state;
328 area_track(nbr->area);
329 orig_intra_area_prefix_lsas(nbr->area);
330 }
331
332 nbr->state = state;
333 if (nbr->state & NBR_STA_FULL0x0100)
334 rde_req_list_free(nbr);
335 break;
336 case IMSG_AREA_CHANGE:
337 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(state))
338 fatalx("invalid size of OE request");
339
340 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
{
341 if (area->id.s_addr == imsg.hdr.peerid)
342 break;
343 }
344 if (area == NULL((void*)0))
345 break;
346 memcpy(&state, imsg.data, sizeof(state));
347 area->active = state;
348 break;
349 case IMSG_DB_SNAPSHOT:
350 nbr = rde_nbr_find(imsg.hdr.peerid);
351 if (nbr == NULL((void*)0))
352 break;
353
354 lsa_snap(nbr);
355
356 imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
357 0, -1, NULL((void*)0), 0);
358 break;
359 case IMSG_DD:
360 nbr = rde_nbr_find(imsg.hdr.peerid);
361 if (nbr == NULL((void*)0))
362 break;
363
364 buf = imsg.data;
365 for (l = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
366 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
367 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
368 buf += sizeof(lsa_hdr);
369
370 v = lsa_find(nbr->iface, lsa_hdr.type,
371 lsa_hdr.ls_id, lsa_hdr.adv_rtr);
372 if (v == NULL((void*)0))
373 db_hdr = NULL((void*)0);
374 else
375 db_hdr = &v->lsa->hdr;
376
377 if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
378 /*
379 * only request LSAs that are
380 * newer or missing
381 */
382 rde_req_list_add(nbr, &lsa_hdr);
383 imsg_compose_event(iev_ospfe, IMSG_DD,
384 imsg.hdr.peerid, 0, -1, &lsa_hdr,
385 sizeof(lsa_hdr));
386 }
387 }
388 if (l != 0)
389 log_warnx("rde_dispatch_imsg: peerid %u, "
390 "trailing garbage in Database Description "
391 "packet", imsg.hdr.peerid);
392
393 imsg_compose_event(iev_ospfe, IMSG_DD_END,
394 imsg.hdr.peerid, 0, -1, NULL((void*)0), 0);
395 break;
396 case IMSG_LS_REQ:
397 nbr = rde_nbr_find(imsg.hdr.peerid);
398 if (nbr == NULL((void*)0))
399 break;
400
401 buf = imsg.data;
402 for (l = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
403 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
404 memcpy(&req_hdr, buf, sizeof(req_hdr));
405 buf += sizeof(req_hdr);
406
407 if ((v = lsa_find(nbr->iface,
408 req_hdr.type, req_hdr.ls_id,
409 req_hdr.adv_rtr)) == NULL((void*)0)) {
410 imsg_compose_event(iev_ospfe,
411 IMSG_LS_BADREQ, imsg.hdr.peerid,
412 0, -1, NULL((void*)0), 0);
413 continue;
414 }
415 imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
416 imsg.hdr.peerid, 0, -1, v->lsa,
417 ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
418 }
419 if (l != 0)
420 log_warnx("rde_dispatch_imsg: peerid %u, "
421 "trailing garbage in LS Request "
422 "packet", imsg.hdr.peerid);
423 break;
424 case IMSG_LS_UPD:
425 nbr = rde_nbr_find(imsg.hdr.peerid);
426 if (nbr == NULL((void*)0))
427 break;
428
429 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr));
430 if (lsa == NULL((void*)0))
431 fatal(NULL((void*)0));
432 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr));
433
434 if (!lsa_check(nbr, lsa,
435 imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr))) {
436 free(lsa);
437 break;
438 }
439
440 v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
441 lsa->hdr.adv_rtr);
442 if (v == NULL((void*)0))
443 db_hdr = NULL((void*)0);
444 else
445 db_hdr = &v->lsa->hdr;
446
447 if (nbr->self) {
448 lsa_merge(nbr, lsa, v);
449 /* lsa_merge frees the right lsa */
450 break;
451 }
452
453 r = lsa_newer(&lsa->hdr, db_hdr);
454 if (r > 0) {
455 /* new LSA newer than DB */
456 if (v && v->flooded &&
457 v->changed + MIN_LS_ARRIVAL1 >= now) {
458 free(lsa);
459 break;
460 }
461
462 rde_req_list_del(nbr, &lsa->hdr);
463
464 if (!(self = lsa_self(nbr, lsa, v)))
465 if (lsa_add(nbr, lsa))
466 /* delayed lsa */
467 break;
468
469 /* flood and perhaps ack LSA */
470 imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
471 imsg.hdr.peerid, 0, -1, lsa,
472 ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.len))
);
473
474 /* reflood self originated LSA */
475 if (self && v)
476 imsg_compose_event(iev_ospfe,
477 IMSG_LS_FLOOD, v->peerid, 0, -1,
478 v->lsa, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
479 /* new LSA was not added so free it */
480 if (self)
481 free(lsa);
482 } else if (r < 0) {
483 /*
484 * point 6 of "The Flooding Procedure"
485 * We are violating the RFC here because
486 * it does not make sense to reset a session
487 * because an equal LSA is already in the table.
488 * Only if the LSA sent is older than the one
489 * in the table we should reset the session.
490 */
491 if (rde_req_list_exists(nbr, &lsa->hdr)) {
492 imsg_compose_event(iev_ospfe,
493 IMSG_LS_BADREQ, imsg.hdr.peerid,
494 0, -1, NULL((void*)0), 0);
495 free(lsa);
496 break;
497 }
498
499 /* lsa no longer needed */
500 free(lsa);
501
502 /* new LSA older than DB */
503 if (ntohl(db_hdr->seq_num)(__uint32_t)(__builtin_constant_p(db_hdr->seq_num) ? (__uint32_t
)(((__uint32_t)(db_hdr->seq_num) & 0xff) << 24 |
((__uint32_t)(db_hdr->seq_num) & 0xff00) << 8 |
((__uint32_t)(db_hdr->seq_num) & 0xff0000) >> 8
| ((__uint32_t)(db_hdr->seq_num) & 0xff000000) >>
24) : __swap32md(db_hdr->seq_num))
== MAX_SEQ_NUM0x7fffffffU &&
504 ntohs(db_hdr->age)(__uint16_t)(__builtin_constant_p(db_hdr->age) ? (__uint16_t
)(((__uint16_t)(db_hdr->age) & 0xffU) << 8 | ((__uint16_t
)(db_hdr->age) & 0xff00U) >> 8) : __swap16md(db_hdr
->age))
== MAX_AGE3600)
505 /* seq-num wrap */
506 break;
507
508 if (v->changed + MIN_LS_ARRIVAL1 >= now)
509 break;
510
511 /* directly send current LSA, no ack */
512 imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
513 imsg.hdr.peerid, 0, -1, v->lsa,
514 ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
515 } else {
516 /* LSA equal send direct ack */
517 imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
518 imsg.hdr.peerid, 0, -1, &lsa->hdr,
519 sizeof(lsa->hdr));
520 free(lsa);
521 }
522 break;
523 case IMSG_LS_MAXAGE:
524 nbr = rde_nbr_find(imsg.hdr.peerid);
525 if (nbr == NULL((void*)0))
526 break;
527
528 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
529 sizeof(struct lsa_hdr))
530 fatalx("invalid size of OE request");
531 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
532
533 if (rde_nbr_loading(nbr->area))
534 break;
535
536 v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
537 lsa_hdr.adv_rtr);
538 if (v == NULL((void*)0))
539 db_hdr = NULL((void*)0);
540 else
541 db_hdr = &v->lsa->hdr;
542
543 /*
544 * only delete LSA if the one in the db is not newer
545 */
546 if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
547 lsa_del(nbr, &lsa_hdr);
548 break;
549 case IMSG_CTL_SHOW_DATABASE:
550 case IMSG_CTL_SHOW_DB_EXT:
551 case IMSG_CTL_SHOW_DB_LINK:
552 case IMSG_CTL_SHOW_DB_NET:
553 case IMSG_CTL_SHOW_DB_RTR:
554 case IMSG_CTL_SHOW_DB_INTRA:
555 case IMSG_CTL_SHOW_DB_SELF:
556 case IMSG_CTL_SHOW_DB_SUM:
557 case IMSG_CTL_SHOW_DB_ASBR:
558 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) &&
559 imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(aid)) {
560 log_warnx("rde_dispatch_imsg: wrong imsg len");
561 break;
562 }
563 if (imsg.hdr.len == IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) {
564 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
{
565 rde_dump_area(area, imsg.hdr.type,
566 imsg.hdr.pid);
567 }
568 lsa_dump(&asext_tree, imsg.hdr.type,
569 imsg.hdr.pid);
570 } else {
571 memcpy(&aid, imsg.data, sizeof(aid));
572 if ((area = area_find(rdeconf, aid)) != NULL((void*)0)) {
573 rde_dump_area(area, imsg.hdr.type,
574 imsg.hdr.pid);
575 if (!area->stub)
576 lsa_dump(&asext_tree,
577 imsg.hdr.type,
578 imsg.hdr.pid);
579 }
580 }
581 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
582 imsg.hdr.pid, -1, NULL((void*)0), 0);
583 break;
584 case IMSG_CTL_SHOW_RIB:
585 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
{
586 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
587 0, imsg.hdr.pid, -1, area, sizeof(*area));
588
589 rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
590 rt_dump(area->id, imsg.hdr.pid, RIB_NET);
591 }
592 aid.s_addr = 0;
593 rt_dump(aid, imsg.hdr.pid, RIB_EXT);
594
595 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
596 imsg.hdr.pid, -1, NULL((void*)0), 0);
597 break;
598 case IMSG_CTL_SHOW_SUM:
599 rde_send_summary(imsg.hdr.pid);
600 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
601 rde_send_summary_area(area, imsg.hdr.pid);
602 imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
603 imsg.hdr.pid, -1, NULL((void*)0), 0);
604 break;
605 case IMSG_IFINFO:
606 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
607 sizeof(int))
608 fatalx("IFINFO imsg with wrong len");
609
610 nbr = rde_nbr_find(imsg.hdr.peerid);
611 if (nbr == NULL((void*)0))
612 fatalx("IFINFO imsg with bad peerid");
613 memcpy(&nbr->iface->state, imsg.data, sizeof(int));
614
615 /* Resend LSAs if interface state changes. */
616 orig_intra_area_prefix_lsas(nbr->area);
617 break;
618 case IMSG_CTL_LOG_VERBOSE:
619 /* already checked by ospfe */
620 memcpy(&verbose, imsg.data, sizeof(verbose));
621 log_setverbose(verbose);
622 break;
623 default:
624 log_debug("rde_dispatch_imsg: unexpected imsg %d",
625 imsg.hdr.type);
626 break;
627 }
628 imsg_free(&imsg);
629 }
630 if (!shut)
631 imsg_event_add(iev);
632 else {
633 /* this pipe is dead, so remove the event handler */
634 event_del(&iev->ev);
635 event_loopexit(NULL((void*)0));
636 }
637}
638
639/* ARGSUSED */
640void
641rde_dispatch_parent(int fd, short event, void *bula)
642{
643 static struct area *narea;
644 struct area *area;
645 struct iface *iface, *ifp, *i;
646 struct ifaddrchange *ifc;
647 struct iface_addr *ia, *nia;
648 struct imsg imsg;
649 struct kroute kr;
650 struct imsgev *iev = bula;
651 struct imsgbuf *ibuf = &iev->ibuf;
652 ssize_t n;
653 int shut = 0, link_ok, prev_link_ok, orig_lsa;
654
655 if (event & EV_READ0x02) {
656 if ((n = imsg_read(ibuf)) == -1 && errno(*__errno()) != EAGAIN35)
657 fatal("imsg_read error");
658 if (n == 0) /* connection closed */
659 shut = 1;
660 }
661 if (event & EV_WRITE0x04) {
662 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno(*__errno()) != EAGAIN35)
663 fatal("msgbuf_write");
664 if (n == 0) /* connection closed */
665 shut = 1;
666 }
667
668 for (;;) {
669 if ((n = imsg_get(ibuf, &imsg)) == -1)
670 fatal("rde_dispatch_parent: imsg_get error");
671 if (n == 0)
672 break;
673
674 switch (imsg.hdr.type) {
675 case IMSG_NETWORK_ADD:
676 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(kr)) {
677 log_warnx("rde_dispatch_parent: "
678 "wrong imsg len");
679 break;
680 }
681 memcpy(&kr, imsg.data, sizeof(kr));
682 rde_asext_get(&kr);
683 break;
684 case IMSG_NETWORK_DEL:
685 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(kr)) {
686 log_warnx("rde_dispatch_parent: "
687 "wrong imsg len");
688 break;
689 }
690 memcpy(&kr, imsg.data, sizeof(kr));
691 rde_asext_put(&kr);
692 break;
693 case IMSG_IFINFO:
694 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
695 sizeof(struct iface))
696 fatalx("IFINFO imsg with wrong len");
697
698 ifp = imsg.data;
699
700 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
{
701 orig_lsa = 0;
702 LIST_FOREACH(i, &area->iface_list, entry)for((i) = ((&area->iface_list)->lh_first); (i)!= ((
void*)0); (i) = ((i)->entry.le_next))
{
703 if (strcmp(i->dependon,
704 ifp->name) == 0) {
705 i->depend_ok =
706 ifstate_is_up(ifp);
707 if (ifstate_is_up(i))
708 orig_lsa = 1;
709 }
710 }
711 if (orig_lsa)
712 orig_intra_area_prefix_lsas(area);
713 }
714
715 if (!(ifp->cflags & F_IFACE_CONFIGURED0x02))
716 break;
717 iface = if_find(ifp->ifindex);
718 if (iface == NULL((void*)0))
719 fatalx("interface lost in rde");
720
721 prev_link_ok = (iface->flags & IFF_UP0x1) &&
722 LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0);
723
724 if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
725 ifp->linkstate, ifp->baudrate, ifp->rdomain);
726
727 /* Resend LSAs if interface state changes. */
728 link_ok = (iface->flags & IFF_UP0x1) &&
729 LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0);
730 if (prev_link_ok == link_ok)
731 break;
732
733 orig_intra_area_prefix_lsas(iface->area);
734
735 break;
736 case IMSG_IFADDRNEW:
737 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
738 sizeof(struct ifaddrchange))
739 fatalx("IFADDRNEW imsg with wrong len");
740 ifc = imsg.data;
741
742 iface = if_find(ifc->ifindex);
743 if (iface == NULL((void*)0))
744 fatalx("IFADDRNEW interface lost in rde");
745
746 if ((ia = calloc(1, sizeof(struct iface_addr))) ==
747 NULL((void*)0))
748 fatal("rde_dispatch_parent IFADDRNEW");
749 ia->addr = ifc->addr;
750 ia->dstbrd = ifc->dstbrd;
751 ia->prefixlen = ifc->prefixlen;
752
753 TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry)do { (ia)->entry.tqe_next = ((void*)0); (ia)->entry.tqe_prev
= (&iface->ifa_list)->tqh_last; *(&iface->ifa_list
)->tqh_last = (ia); (&iface->ifa_list)->tqh_last
= &(ia)->entry.tqe_next; } while (0)
;
754 if (iface->area)
755 orig_intra_area_prefix_lsas(iface->area);
756 break;
757 case IMSG_IFADDRDEL:
758 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
759 sizeof(struct ifaddrchange))
760 fatalx("IFADDRDEL imsg with wrong len");
761 ifc = imsg.data;
762
763 iface = if_find(ifc->ifindex);
764 if (iface == NULL((void*)0))
765 fatalx("IFADDRDEL interface lost in rde");
766
767 for (ia = TAILQ_FIRST(&iface->ifa_list)((&iface->ifa_list)->tqh_first); ia != NULL((void*)0);
768 ia = nia) {
769 nia = TAILQ_NEXT(ia, entry)((ia)->entry.tqe_next);
770
771 if (IN6_ARE_ADDR_EQUAL(&ia->addr,(memcmp(&(&ia->addr)->__u6_addr.__u6_addr8[0], &
(&ifc->addr)->__u6_addr.__u6_addr8[0], sizeof(struct
in6_addr)) == 0)
772 &ifc->addr)(memcmp(&(&ia->addr)->__u6_addr.__u6_addr8[0], &
(&ifc->addr)->__u6_addr.__u6_addr8[0], sizeof(struct
in6_addr)) == 0)
) {
773 TAILQ_REMOVE(&iface->ifa_list, ia,do { if (((ia)->entry.tqe_next) != ((void*)0)) (ia)->entry
.tqe_next->entry.tqe_prev = (ia)->entry.tqe_prev; else (
&iface->ifa_list)->tqh_last = (ia)->entry.tqe_prev
; *(ia)->entry.tqe_prev = (ia)->entry.tqe_next; ; ; } while
(0)
774 entry)do { if (((ia)->entry.tqe_next) != ((void*)0)) (ia)->entry
.tqe_next->entry.tqe_prev = (ia)->entry.tqe_prev; else (
&iface->ifa_list)->tqh_last = (ia)->entry.tqe_prev
; *(ia)->entry.tqe_prev = (ia)->entry.tqe_next; ; ; } while
(0)
;
775 free(ia);
776 break;
777 }
778 }
779 if (iface->area)
780 orig_intra_area_prefix_lsas(iface->area);
781 break;
782 case IMSG_RECONF_CONF:
783 if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
784 NULL((void*)0))
785 fatal(NULL((void*)0));
786 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
787
788 LIST_INIT(&nconf->area_list)do { ((&nconf->area_list)->lh_first) = ((void*)0); }
while (0)
;
789 LIST_INIT(&nconf->cand_list)do { ((&nconf->cand_list)->lh_first) = ((void*)0); }
while (0)
;
790 break;
791 case IMSG_RECONF_AREA:
792 if ((narea = area_new()) == NULL((void*)0))
793 fatal(NULL((void*)0));
794 memcpy(narea, imsg.data, sizeof(struct area));
795
796 LIST_INIT(&narea->iface_list)do { ((&narea->iface_list)->lh_first) = ((void*)0);
} while (0)
;
797 LIST_INIT(&narea->nbr_list)do { ((&narea->nbr_list)->lh_first) = ((void*)0); }
while (0)
;
798 RB_INIT(&narea->lsa_tree)do { (&narea->lsa_tree)->rbh_root = ((void*)0); } while
(0)
;
799
800 LIST_INSERT_HEAD(&nconf->area_list, narea, entry)do { if (((narea)->entry.le_next = (&nconf->area_list
)->lh_first) != ((void*)0)) (&nconf->area_list)->
lh_first->entry.le_prev = &(narea)->entry.le_next; (
&nconf->area_list)->lh_first = (narea); (narea)->
entry.le_prev = &(&nconf->area_list)->lh_first;
} while (0)
;
801 break;
802 case IMSG_RECONF_END:
803 merge_config(rdeconf, nconf);
804 nconf = NULL((void*)0);
805 break;
806 default:
807 log_debug("rde_dispatch_parent: unexpected imsg %d",
808 imsg.hdr.type);
809 break;
810 }
811 imsg_free(&imsg);
812 }
813 if (!shut)
814 imsg_event_add(iev);
815 else {
816 /* this pipe is dead, so remove the event handler */
817 event_del(&iev->ev);
818 event_loopexit(NULL((void*)0));
819 }
820}
821
822void
823rde_dump_area(struct area *area, int imsg_type, pid_t pid)
824{
825 struct iface *iface;
826
827 /* dump header */
828 imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
829 area, sizeof(*area));
830
831 /* dump link local lsa */
832 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
{
833 imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
834 0, pid, -1, iface, sizeof(*iface));
835 lsa_dump(&iface->lsa_tree, imsg_type, pid);
836 }
837
838 /* dump area lsa */
839 lsa_dump(&area->lsa_tree, imsg_type, pid);
840}
841
842u_int32_t
843rde_router_id(void)
844{
845 return (rdeconf->rtr_id.s_addr);
846}
847
848void
849rde_send_change_kroute(struct rt_node *r)
850{
851 int krcount = 0;
852 struct kroute kr;
853 struct rt_nexthop *rn;
854 struct ibuf *wbuf;
855
856 if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
857 sizeof(kr))) == NULL((void*)0)) {
858 return;
859 }
860
861 TAILQ_FOREACH(rn, &r->nexthop, entry)for((rn) = ((&r->nexthop)->tqh_first); (rn) != ((void
*)0); (rn) = ((rn)->entry.tqe_next))
{
862 if (rn->invalid)
863 continue;
864 if (rn->connected)
865 /* skip self-originated routes */
866 continue;
867 krcount++;
868
869 bzero(&kr, sizeof(kr));
870 kr.prefix = r->prefix;
871 kr.nexthop = rn->nexthop;
872 if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop)(((&rn->nexthop)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&rn->nexthop)->__u6_addr.__u6_addr8[1] & 0xc0
) == 0x80))
||
873 IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop)(((&rn->nexthop)->__u6_addr.__u6_addr8[0] == 0xff) &&
(((&rn->nexthop)->__u6_addr.__u6_addr8[1] & 0x0f
) == 0x02))
)
874 kr.scope = rn->ifindex;
875 kr.ifindex = rn->ifindex;
876 kr.prefixlen = r->prefixlen;
877 kr.ext_tag = r->ext_tag;
878 imsg_add(wbuf, &kr, sizeof(kr));
879 }
880 if (krcount == 0) {
881 /* no valid nexthop or self originated, so remove */
882 ibuf_free(wbuf);
883 rde_send_delete_kroute(r);
884 return;
885 }
886
887 imsg_close(&iev_main->ibuf, wbuf);
888 imsg_event_add(iev_main);
889}
890
891void
892rde_send_delete_kroute(struct rt_node *r)
893{
894 struct kroute kr;
895
896 bzero(&kr, sizeof(kr));
897 kr.prefix = r->prefix;
898 kr.prefixlen = r->prefixlen;
899
900 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
901 &kr, sizeof(kr));
902}
903
904void
905rde_send_summary(pid_t pid)
906{
907 static struct ctl_sum sumctl;
908 struct timeval now;
909 struct area *area;
910 struct vertex *v;
911
912 bzero(&sumctl, sizeof(struct ctl_sum));
913
914 sumctl.rtr_id.s_addr = rde_router_id();
915 sumctl.spf_delay = rdeconf->spf_delay;
916 sumctl.spf_hold_time = rdeconf->spf_hold_time;
917
918 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
919 sumctl.num_area++;
920
921 RB_FOREACH(v, lsa_tree, &asext_tree)for ((v) = lsa_tree_RB_MINMAX(&asext_tree, -1); (v) != ((
void*)0); (v) = lsa_tree_RB_NEXT(v))
922 sumctl.num_ext_lsa++;
923
924 gettimeofday(&now, NULL((void*)0));
925 if (rdeconf->uptime < now.tv_sec)
926 sumctl.uptime = now.tv_sec - rdeconf->uptime;
927 else
928 sumctl.uptime = 0;
929
930 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
931 sizeof(sumctl));
932}
933
934void
935rde_send_summary_area(struct area *area, pid_t pid)
936{
937 static struct ctl_sum_area sumareactl;
938 struct iface *iface;
939 struct rde_nbr *nbr;
940 struct lsa_tree *tree = &area->lsa_tree;
941 struct vertex *v;
942
943 bzero(&sumareactl, sizeof(struct ctl_sum_area));
944
945 sumareactl.area.s_addr = area->id.s_addr;
946 sumareactl.num_spf_calc = area->num_spf_calc;
947
948 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
949 sumareactl.num_iface++;
950
951 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
952 if (nbr->state == NBR_STA_FULL0x0100 && !nbr->self)
953 sumareactl.num_adj_nbr++;
954
955 RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); (
v) = lsa_tree_RB_NEXT(v))
956 sumareactl.num_lsa++;
957
958 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
959 sizeof(sumareactl));
960}
961
962LIST_HEAD(rde_nbr_head, rde_nbr)struct rde_nbr_head { struct rde_nbr *lh_first; };
963
964struct nbr_table {
965 struct rde_nbr_head *hashtbl;
966 u_int32_t hashmask;
967} rdenbrtable;
968
969#define RDE_NBR_HASH(x)&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] \
970 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
971
972void
973rde_nbr_init(u_int32_t hashsize)
974{
975 struct rde_nbr_head *head;
976 u_int32_t hs, i;
977
978 for (hs = 1; hs < hashsize; hs <<= 1)
979 ;
980 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
981 if (rdenbrtable.hashtbl == NULL((void*)0))
982 fatal("rde_nbr_init");
983
984 for (i = 0; i < hs; i++)
985 LIST_INIT(&rdenbrtable.hashtbl[i])do { ((&rdenbrtable.hashtbl[i])->lh_first) = ((void*)0
); } while (0)
;
986
987 rdenbrtable.hashmask = hs - 1;
988
989 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL((void*)0))
990 fatal("rde_nbr_init");
991
992 nbrself->id.s_addr = rde_router_id();
993 nbrself->peerid = NBR_IDSELF1;
994 nbrself->state = NBR_STA_DOWN0x0001;
995 nbrself->self = 1;
996 head = RDE_NBR_HASH(NBR_IDSELF)&rdenbrtable.hashtbl[(1) & rdenbrtable.hashmask];
997 LIST_INSERT_HEAD(head, nbrself, hash)do { if (((nbrself)->hash.le_next = (head)->lh_first) !=
((void*)0)) (head)->lh_first->hash.le_prev = &(nbrself
)->hash.le_next; (head)->lh_first = (nbrself); (nbrself
)->hash.le_prev = &(head)->lh_first; } while (0)
;
998}
999
1000void
1001rde_nbr_free(void)
1002{
1003 free(nbrself);
1004 free(rdenbrtable.hashtbl);
1005}
1006
1007struct rde_nbr *
1008rde_nbr_find(u_int32_t peerid)
1009{
1010 struct rde_nbr_head *head;
1011 struct rde_nbr *nbr;
1012
1013 head = RDE_NBR_HASH(peerid)&rdenbrtable.hashtbl[(peerid) & rdenbrtable.hashmask];
1014
1015 LIST_FOREACH(nbr, head, hash)for((nbr) = ((head)->lh_first); (nbr)!= ((void*)0); (nbr) =
((nbr)->hash.le_next))
{
1016 if (nbr->peerid == peerid)
1017 return (nbr);
1018 }
1019
1020 return (NULL((void*)0));
1021}
1022
1023struct rde_nbr *
1024rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1025{
1026 struct rde_nbr_head *head;
1027 struct rde_nbr *nbr;
1028 struct area *area;
1029 struct iface *iface;
1030
1031 if (rde_nbr_find(peerid))
1032 return (NULL((void*)0));
1033 if ((area = area_find(rdeconf, new->area_id)) == NULL((void*)0))
1034 fatalx("rde_nbr_new: unknown area");
1035
1036 if ((iface = if_find(new->ifindex)) == NULL((void*)0))
1037 fatalx("rde_nbr_new: unknown interface");
1038
1039 if ((nbr = calloc(1, sizeof(*nbr))) == NULL((void*)0))
1040 fatal("rde_nbr_new");
1041
1042 memcpy(nbr, new, sizeof(*nbr));
1043 nbr->peerid = peerid;
1044 nbr->area = area;
1045 nbr->iface = iface;
1046
1047 TAILQ_INIT(&nbr->req_list)do { (&nbr->req_list)->tqh_first = ((void*)0); (&
nbr->req_list)->tqh_last = &(&nbr->req_list)
->tqh_first; } while (0)
;
1048
1049 head = RDE_NBR_HASH(peerid)&rdenbrtable.hashtbl[(peerid) & rdenbrtable.hashmask];
1050 LIST_INSERT_HEAD(head, nbr, hash)do { if (((nbr)->hash.le_next = (head)->lh_first) != ((
void*)0)) (head)->lh_first->hash.le_prev = &(nbr)->
hash.le_next; (head)->lh_first = (nbr); (nbr)->hash.le_prev
= &(head)->lh_first; } while (0)
;
1051 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry)do { if (((nbr)->entry.le_next = (&area->nbr_list)->
lh_first) != ((void*)0)) (&area->nbr_list)->lh_first
->entry.le_prev = &(nbr)->entry.le_next; (&area
->nbr_list)->lh_first = (nbr); (nbr)->entry.le_prev =
&(&area->nbr_list)->lh_first; } while (0)
;
1052
1053 return (nbr);
1054}
1055
1056void
1057rde_nbr_del(struct rde_nbr *nbr)
1058{
1059 if (nbr == NULL((void*)0))
1060 return;
1061
1062 rde_req_list_free(nbr);
1063
1064 LIST_REMOVE(nbr, entry)do { if ((nbr)->entry.le_next != ((void*)0)) (nbr)->entry
.le_next->entry.le_prev = (nbr)->entry.le_prev; *(nbr)->
entry.le_prev = (nbr)->entry.le_next; ; ; } while (0)
;
1065 LIST_REMOVE(nbr, hash)do { if ((nbr)->hash.le_next != ((void*)0)) (nbr)->hash
.le_next->hash.le_prev = (nbr)->hash.le_prev; *(nbr)->
hash.le_prev = (nbr)->hash.le_next; ; ; } while (0)
;
1066
1067 free(nbr);
1068}
1069
1070int
1071rde_nbr_loading(struct area *area)
1072{
1073 struct rde_nbr *nbr;
1074 int checkall = 0;
1075
1076 if (area == NULL((void*)0)) {
1077 area = LIST_FIRST(&rdeconf->area_list)((&rdeconf->area_list)->lh_first);
1078 checkall = 1;
1079 }
1080
1081 while (area != NULL((void*)0)) {
1082 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
{
1083 if (nbr->self)
1084 continue;
1085 if (nbr->state & NBR_STA_XCHNG0x0040 ||
1086 nbr->state & NBR_STA_LOAD0x0080)
1087 return (1);
1088 }
1089 if (!checkall)
1090 break;
1091 area = LIST_NEXT(area, entry)((area)->entry.le_next);
1092 }
1093
1094 return (0);
1095}
1096
1097struct rde_nbr *
1098rde_nbr_self(struct area *area)
1099{
1100 struct rde_nbr *nbr;
1101
1102 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
1103 if (nbr->self)
1104 return (nbr);
1105
1106 /* this may not happen */
1107 fatalx("rde_nbr_self: area without self");
1108 return (NULL((void*)0));
1109}
1110
1111/*
1112 * LSA req list
1113 */
1114void
1115rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1116{
1117 struct rde_req_entry *le;
1118
1119 if ((le = calloc(1, sizeof(*le))) == NULL((void*)0))
1120 fatal("rde_req_list_add");
1121
1122 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry)do { (le)->entry.tqe_next = ((void*)0); (le)->entry.tqe_prev
= (&nbr->req_list)->tqh_last; *(&nbr->req_list
)->tqh_last = (le); (&nbr->req_list)->tqh_last =
&(le)->entry.tqe_next; } while (0)
;
1123 le->type = lsa->type;
1124 le->ls_id = lsa->ls_id;
1125 le->adv_rtr = lsa->adv_rtr;
1126}
1127
1128int
1129rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1130{
1131 struct rde_req_entry *le;
1132
1133 TAILQ_FOREACH(le, &nbr->req_list, entry)for((le) = ((&nbr->req_list)->tqh_first); (le) != (
(void*)0); (le) = ((le)->entry.tqe_next))
{
1134 if ((lsa_hdr->type == le->type) &&
1135 (lsa_hdr->ls_id == le->ls_id) &&
1136 (lsa_hdr->adv_rtr == le->adv_rtr))
1137 return (1);
1138 }
1139 return (0);
1140}
1141
1142void
1143rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1144{
1145 struct rde_req_entry *le;
1146
1147 TAILQ_FOREACH(le, &nbr->req_list, entry)for((le) = ((&nbr->req_list)->tqh_first); (le) != (
(void*)0); (le) = ((le)->entry.tqe_next))
{
1148 if ((lsa_hdr->type == le->type) &&
1149 (lsa_hdr->ls_id == le->ls_id) &&
1150 (lsa_hdr->adv_rtr == le->adv_rtr)) {
1151 TAILQ_REMOVE(&nbr->req_list, le, entry)do { if (((le)->entry.tqe_next) != ((void*)0)) (le)->entry
.tqe_next->entry.tqe_prev = (le)->entry.tqe_prev; else (
&nbr->req_list)->tqh_last = (le)->entry.tqe_prev
; *(le)->entry.tqe_prev = (le)->entry.tqe_next; ; ; } while
(0)
;
1152 free(le);
1153 return;
1154 }
1155 }
1156}
1157
1158void
1159rde_req_list_free(struct rde_nbr *nbr)
1160{
1161 struct rde_req_entry *le;
1162
1163 while ((le = TAILQ_FIRST(&nbr->req_list)((&nbr->req_list)->tqh_first)) != NULL((void*)0)) {
1164 TAILQ_REMOVE(&nbr->req_list, le, entry)do { if (((le)->entry.tqe_next) != ((void*)0)) (le)->entry
.tqe_next->entry.tqe_prev = (le)->entry.tqe_prev; else (
&nbr->req_list)->tqh_last = (le)->entry.tqe_prev
; *(le)->entry.tqe_prev = (le)->entry.tqe_next; ; ; } while
(0)
;
1165 free(le);
1166 }
1167}
1168
1169/*
1170 * as-external LSA handling
1171 */
1172struct iface *
1173rde_asext_lookup(struct in6_addr prefix, int plen)
1174{
1175
1176 struct area *area;
1177 struct iface *iface;
1178 struct iface_addr *ia;
1179 struct in6_addr ina, inb;
1180
1181 LIST_FOREACH(area, &rdeconf->area_list, entry)for((area) = ((&rdeconf->area_list)->lh_first); (area
)!= ((void*)0); (area) = ((area)->entry.le_next))
{
1182 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
{
1183 TAILQ_FOREACH(ia, &iface->ifa_list, entry)for((ia) = ((&iface->ifa_list)->tqh_first); (ia) !=
((void*)0); (ia) = ((ia)->entry.tqe_next))
{
1184 if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)(((&ia->addr)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&ia->addr)->__u6_addr.__u6_addr8[1] & 0xc0)
== 0x80))
)
1185 continue;
1186
1187 inet6applymask(&ina, &ia->addr, ia->prefixlen);
1188 inet6applymask(&inb, &prefix, ia->prefixlen);
1189 if (IN6_ARE_ADDR_EQUAL(&ina, &inb)(memcmp(&(&ina)->__u6_addr.__u6_addr8[0], &(&
inb)->__u6_addr.__u6_addr8[0], sizeof(struct in6_addr)) ==
0)
&&
1190 (plen == -1 || plen == ia->prefixlen))
1191 return (iface);
1192 }
1193 }
1194 }
1195 return (NULL((void*)0));
1196}
1197
1198void
1199rde_asext_get(struct kroute *kr)
1200{
1201 struct vertex *v;
1202 struct lsa *lsa;
1203
1204 if (rde_asext_lookup(kr->prefix, kr->prefixlen)) {
1205 /* already announced as (stub) net LSA */
1206 log_debug("rde_asext_get: %s/%d is net LSA",
1207 log_in6addr(&kr->prefix), kr->prefixlen);
1208 return;
1209 }
1210
1211 /* update of seqnum is done by lsa_merge */
1212 if ((lsa = orig_asext_lsa(kr, DEFAULT_AGE0))) {
1213 v = lsa_find(NULL((void*)0), lsa->hdr.type, lsa->hdr.ls_id,
1214 lsa->hdr.adv_rtr);
1215 lsa_merge(nbrself, lsa, v);
1216 }
1217}
1218
1219void
1220rde_asext_put(struct kroute *kr)
1221{
1222 struct vertex *v;
1223 struct lsa *lsa;
1224 /*
1225 * just try to remove the LSA. If the prefix is announced as
1226 * stub net LSA lsa_find() will fail later and nothing will happen.
1227 */
1228
1229 /* remove by reflooding with MAX_AGE */
1230 if ((lsa = orig_asext_lsa(kr, MAX_AGE3600))) {
1231 v = lsa_find(NULL((void*)0), lsa->hdr.type, lsa->hdr.ls_id,
1232 lsa->hdr.adv_rtr);
1233
1234 /*
1235 * if v == NULL no LSA is in the table and
1236 * nothing has to be done.
1237 */
1238 if (v)
1239 lsa_merge(nbrself, lsa, v);
1240 else
1241 free(lsa);
1242 }
1243}
1244
1245/*
1246 * summary LSA stuff
1247 */
1248void
1249rde_summary_update(struct rt_node *rte, struct area *area)
1250{
1251 struct vertex *v = NULL((void*)0);
1252//XXX struct lsa *lsa;
1253 u_int16_t type = 0;
1254
1255 /* first check if we actually need to announce this route */
1256 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E0x02))
1257 return;
1258 /* never create summaries for as-ext LSA */
1259 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1260 return;
1261 /* no need for summary LSA in the originating area */
1262 if (rte->area.s_addr == area->id.s_addr)
1263 return;
1264 /* no need to originate inter-area routes to the backbone */
1265 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY((u_int32_t)(0x00000000)))
1266 return;
1267 /* TODO nexthop check, nexthop part of area -> no summary */
1268 if (rte->cost >= LS_INFINITY0xffffff)
1269 return;
1270 /* TODO AS border router specific checks */
1271 /* TODO inter-area network route stuff */
1272 /* TODO intra-area stuff -- condense LSA ??? */
1273
1274 if (rte->d_type == DT_NET) {
1275 type = LSA_TYPE_INTER_A_PREFIX0x2003;
Value stored to 'type' is never read
1276 } else if (rte->d_type == DT_RTR) {
1277 type = LSA_TYPE_INTER_A_ROUTER0x2004;
1278 } else
1279
1280#if 0 /* XXX a lot todo */
1281 /* update lsa but only if it was changed */
1282 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1283 lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1284 lsa_merge(rde_nbr_self(area), lsa, v);
1285
1286 if (v == NULL((void*)0))
1287 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1288#endif
1289
1290 /* suppressed/deleted routes are not found in the second lsa_find */
1291 if (v)
1292 v->cost = rte->cost;
1293}
1294
1295/*
1296 * Functions for self-originated LSAs
1297 */
1298
1299/* Prefix LSAs have variable size. We have to be careful to copy the right
1300 * amount of bytes, and to realloc() the right amount of memory. */
1301void
1302append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1303{
1304 struct lsa_prefix *copy;
1305 unsigned int lsa_prefix_len;
1306 unsigned int new_len;
1307 char *new_lsa;
1308
1309 lsa_prefix_len = sizeof(struct lsa_prefix)
1310 + LSA_PREFIXSIZE(prefix->prefixlen)(((prefix->prefixlen) + 31)/32 * 4);
1311
1312 new_len = *len + lsa_prefix_len;
1313
1314 /* Make sure we have enough space for this prefix. */
1315 if ((new_lsa = realloc(*lsa, new_len)) == NULL((void*)0))
1316 fatalx("append_prefix_lsa");
1317
1318 /* Append prefix to LSA. */
1319 copy = (struct lsa_prefix *)(new_lsa + *len);
1320 memcpy(copy, prefix, lsa_prefix_len);
1321
1322 *lsa = (struct lsa *)new_lsa;
1323 *len = new_len;
1324}
1325
1326int
1327prefix_compare(struct prefix_node *a, struct prefix_node *b)
1328{
1329 struct lsa_prefix *p;
1330 struct lsa_prefix *q;
1331 int i;
1332 int len;
1333
1334 p = a->prefix;
1335 q = b->prefix;
1336
1337 len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen))((((((p->prefixlen) + 31)/32 * 4)) < ((((q->prefixlen
) + 31)/32 * 4))) ? ((((p->prefixlen) + 31)/32 * 4)) : (((
(q->prefixlen) + 31)/32 * 4)))
;
1338
1339 i = memcmp(p + 1, q + 1, len);
1340 if (i)
1341 return (i);
1342 if (p->prefixlen < q->prefixlen)
1343 return (-1);
1344 if (p->prefixlen > q->prefixlen)
1345 return (1);
1346 return (0);
1347}
1348
1349void
1350prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1351{
1352 struct prefix_node *old;
1353 struct prefix_node *new;
1354 struct in6_addr addr;
1355 unsigned int len;
1356 unsigned int i;
1357 char *cur_prefix;
1358
1359 cur_prefix = (char *)(lsa + 1);
1360
1361 for (i = 0; i < ntohl(lsa->numprefix)(__uint32_t)(__builtin_constant_p(lsa->numprefix) ? (__uint32_t
)(((__uint32_t)(lsa->numprefix) & 0xff) << 24 | (
(__uint32_t)(lsa->numprefix) & 0xff00) << 8 | ((
__uint32_t)(lsa->numprefix) & 0xff0000) >> 8 | (
(__uint32_t)(lsa->numprefix) & 0xff000000) >> 24
) : __swap32md(lsa->numprefix))
; i++) {
1362 if ((new = calloc(1, sizeof(*new))) == NULL((void*)0))
1363 fatal("prefix_tree_add");
1364 new->prefix = (struct lsa_prefix *)cur_prefix;
1365
1366 len = sizeof(*new->prefix)
1367 + LSA_PREFIXSIZE(new->prefix->prefixlen)(((new->prefix->prefixlen) + 31)/32 * 4);
1368
1369 bzero(&addr, sizeof(addr));
1370 memcpy(&addr, new->prefix + 1,
1371 LSA_PREFIXSIZE(new->prefix->prefixlen)(((new->prefix->prefixlen) + 31)/32 * 4));
1372
1373 new->prefix->metric = 0;
1374
1375 if (!(IN6_IS_ADDR_LINKLOCAL(&addr)(((&addr)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&addr)->__u6_addr.__u6_addr8[1] & 0xc0) == 0x80
))
) &&
1376 (new->prefix->options & OSPF_PREFIX_NU0x01) == 0 &&
1377 (new->prefix->options & OSPF_PREFIX_LA0x02) == 0) {
1378 old = RB_INSERT(prefix_tree, tree, new)prefix_tree_RB_INSERT(tree, new);
1379 if (old != NULL((void*)0)) {
1380 old->prefix->options |= new->prefix->options;
1381 free(new);
1382 }
1383 } else
1384 free(new);
1385
1386 cur_prefix = cur_prefix + len;
1387 }
1388}
1389
1390RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)void prefix_tree_RB_INSERT_COLOR(struct prefix_tree *head, struct
prefix_node *elm) { struct prefix_node *parent, *gparent, *tmp
; while ((parent = (elm)->entry.rbe_parent) && (parent
)->entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent
; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent
)->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } void prefix_tree_RB_REMOVE_COLOR(struct
prefix_tree *head, struct prefix_node *parent, struct prefix_node
*elm) { struct prefix_node *tmp; while ((elm == ((void*)0) ||
(elm)->entry.rbe_color == 0) && elm != (head)->
rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = (
parent)->entry.rbe_right; if ((tmp)->entry.rbe_color ==
1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.
rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right
; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->
entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right)->
entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm =
parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp
)->entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right
)->entry.rbe_color == 0) { struct prefix_node *oleft; if (
(oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color
= 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)->
entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry
.rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent
= (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent
= (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry
.rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent)
->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent
)->entry.rbe_right = (oleft); } else (head)->rbh_root =
(oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry
.rbe_parent = (oleft); do {} while (0); if (((oleft)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry
.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry
.rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color =
0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)->
entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry
.rbe_left)->entry.rbe_parent = (parent); } do {} while (0)
; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); elm = (head)->rbh_root; break; } } else {
tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color
== 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry
.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.
rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.
rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent
= (parent); } do {} while (0); if (((tmp)->entry.rbe_parent
= (parent)->entry.rbe_parent)) { if ((parent) == ((parent
)->entry.rbe_parent)->entry.rbe_left) ((parent)->entry
.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry
.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->entry.rbe_right = (parent); (parent)->
entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void*)0)
|| ((tmp)->entry.rbe_left)->entry.rbe_color == 0) &&
((tmp)->entry.rbe_right == ((void*)0) || ((tmp)->entry
.rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color
= 1; elm = parent; parent = (elm)->entry.rbe_parent; } else
{ if ((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry
.rbe_left)->entry.rbe_color == 0) { struct prefix_node *oright
; if ((oright = (tmp)->entry.rbe_right)) (oright)->entry
.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright) =
(tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right = (
oright)->entry.rbe_left)) { ((oright)->entry.rbe_left)->
entry.rbe_parent = (tmp); } do {} while (0); if (((oright)->
entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((tmp) ==
((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp)->
entry.rbe_parent)->entry.rbe_left = (oright); else ((tmp)->
entry.rbe_parent)->entry.rbe_right = (oright); } else (head
)->rbh_root = (oright); (oright)->entry.rbe_left = (tmp
); (tmp)->entry.rbe_parent = (oright); do {} while (0); if
(((oright)->entry.rbe_parent)) do {} while (0); } while (
0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left
; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } } if (elm) (elm)->entry.rbe_color = 0; } struct prefix_node
* prefix_tree_RB_REMOVE(struct prefix_tree *head, struct prefix_node
*elm) { struct prefix_node *child, *parent, *old = elm; int color
; if ((elm)->entry.rbe_left == ((void*)0)) child = (elm)->
entry.rbe_right; else if ((elm)->entry.rbe_right == ((void
*)0)) child = (elm)->entry.rbe_left; else { struct prefix_node
*left; elm = (elm)->entry.rbe_right; while ((left = (elm)
->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right
; parent = (elm)->entry.rbe_parent; color = (elm)->entry
.rbe_color; if (child) (child)->entry.rbe_parent = parent;
if (parent) { if ((parent)->entry.rbe_left == elm) (parent
)->entry.rbe_left = child; else (parent)->entry.rbe_right
= child; do {} while (0); } else (head)->rbh_root = child
; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->
entry = (old)->entry; if ((old)->entry.rbe_parent) { if
(((old)->entry.rbe_parent)->entry.rbe_left == old) ((old
)->entry.rbe_parent)->entry.rbe_left = elm; else ((old)
->entry.rbe_parent)->entry.rbe_right = elm; do {} while
(0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left
)->entry.rbe_parent = elm; if ((old)->entry.rbe_right) (
(old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent
) { left = parent; do { do {} while (0); } while ((left = (left
)->entry.rbe_parent)); } goto color; } parent = (elm)->
entry.rbe_parent; color = (elm)->entry.rbe_color; if (child
) (child)->entry.rbe_parent = parent; if (parent) { if ((parent
)->entry.rbe_left == elm) (parent)->entry.rbe_left = child
; else (parent)->entry.rbe_right = child; do {} while (0);
} else (head)->rbh_root = child; color: if (color == 0) prefix_tree_RB_REMOVE_COLOR
(head, parent, child); return (old); } struct prefix_node * prefix_tree_RB_INSERT
(struct prefix_tree *head, struct prefix_node *elm) { struct prefix_node
*tmp; struct prefix_node *parent = ((void*)0); int comp = 0;
tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp =
(prefix_compare)(elm, parent); if (comp < 0) tmp = (tmp)->
entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right
; else return (tmp); } do { (elm)->entry.rbe_parent = parent
; (elm)->entry.rbe_left = (elm)->entry.rbe_right = ((void
*)0); (elm)->entry.rbe_color = 1; } while (0); if (parent !=
((void*)0)) { if (comp < 0) (parent)->entry.rbe_left =
elm; else (parent)->entry.rbe_right = elm; do {} while (0
); } else (head)->rbh_root = elm; prefix_tree_RB_INSERT_COLOR
(head, elm); return (((void*)0)); } struct prefix_node * prefix_tree_RB_FIND
(struct prefix_tree *head, struct prefix_node *elm) { struct prefix_node
*tmp = (head)->rbh_root; int comp; while (tmp) { comp = prefix_compare
(elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else
if (comp > 0) tmp = (tmp)->entry.rbe_right; else return
(tmp); } return (((void*)0)); } struct prefix_node * prefix_tree_RB_NFIND
(struct prefix_tree *head, struct prefix_node *elm) { struct prefix_node
*tmp = (head)->rbh_root; struct prefix_node *res = ((void
*)0); int comp; while (tmp) { comp = prefix_compare(elm, tmp)
; if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left
; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else
return (tmp); } return (res); } struct prefix_node * prefix_tree_RB_NEXT
(struct prefix_node *elm) { if ((elm)->entry.rbe_right) { elm
= (elm)->entry.rbe_right; while ((elm)->entry.rbe_left
) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry
.rbe_parent && (elm == ((elm)->entry.rbe_parent)->
entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while
((elm)->entry.rbe_parent && (elm == ((elm)->entry
.rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent
; elm = (elm)->entry.rbe_parent; } } return (elm); } struct
prefix_node * prefix_tree_RB_PREV(struct prefix_node *elm) {
if ((elm)->entry.rbe_left) { elm = (elm)->entry.rbe_left
; while ((elm)->entry.rbe_right) elm = (elm)->entry.rbe_right
; } else { if ((elm)->entry.rbe_parent && (elm == (
(elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm)
->entry.rbe_parent; else { while ((elm)->entry.rbe_parent
&& (elm == ((elm)->entry.rbe_parent)->entry.rbe_left
)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent
; } } return (elm); } struct prefix_node * prefix_tree_RB_MINMAX
(struct prefix_tree *head, int val) { struct prefix_node *tmp
= (head)->rbh_root; struct prefix_node *parent = ((void*)
0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->
entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return
(parent); }
1391
1392struct lsa *
1393orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1394{
1395 struct lsa *lsa;
1396 struct vertex *v;
1397 struct rde_nbr *nbr;
1398 struct prefix_node *node;
1399 struct prefix_tree tree;
1400 int num_full_nbr;
1401 u_int16_t len;
1402 u_int16_t numprefix;
1403
1404 log_debug("orig_intra_lsa_net: area %s, interface %s",
1405 inet_ntoa(area->id), iface->name);
1406
1407 RB_INIT(&tree)do { (&tree)->rbh_root = ((void*)0); } while (0);
1408
1409 if (iface->state & IF_STA_DR0x40) {
1410 num_full_nbr = 0;
1411 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
{
1412 if (nbr->self ||
1413 nbr->iface->ifindex != iface->ifindex ||
1414 (nbr->state & NBR_STA_FULL0x0100) == 0)
1415 continue;
1416 num_full_nbr++;
1417 v = lsa_find(iface, htons(LSA_TYPE_LINK)(__uint16_t)(__builtin_constant_p(0x0008) ? (__uint16_t)(((__uint16_t
)(0x0008) & 0xffU) << 8 | ((__uint16_t)(0x0008) &
0xff00U) >> 8) : __swap16md(0x0008))
,
1418 htonl(nbr->iface_id)(__uint32_t)(__builtin_constant_p(nbr->iface_id) ? (__uint32_t
)(((__uint32_t)(nbr->iface_id) & 0xff) << 24 | (
(__uint32_t)(nbr->iface_id) & 0xff00) << 8 | ((__uint32_t
)(nbr->iface_id) & 0xff0000) >> 8 | ((__uint32_t
)(nbr->iface_id) & 0xff000000) >> 24) : __swap32md
(nbr->iface_id))
, nbr->id.s_addr);
1419 if (v)
1420 prefix_tree_add(&tree, &v->lsa->data.link);
1421 }
1422 if (num_full_nbr == 0) {
1423 /* There are no adjacent neighbors on link.
1424 * If a copy of this LSA already exists in DB,
1425 * it needs to be flushed. orig_intra_lsa_rtr()
1426 * will take care of prefixes configured on
1427 * this interface. */
1428 if (!old)
1429 return NULL((void*)0);
1430 } else {
1431 /* Add our own prefixes configured for this link. */
1432 v = lsa_find(iface, htons(LSA_TYPE_LINK)(__uint16_t)(__builtin_constant_p(0x0008) ? (__uint16_t)(((__uint16_t
)(0x0008) & 0xffU) << 8 | ((__uint16_t)(0x0008) &
0xff00U) >> 8) : __swap16md(0x0008))
,
1433 htonl(iface->ifindex)(__uint32_t)(__builtin_constant_p(iface->ifindex) ? (__uint32_t
)(((__uint32_t)(iface->ifindex) & 0xff) << 24 | (
(__uint32_t)(iface->ifindex) & 0xff00) << 8 | ((
__uint32_t)(iface->ifindex) & 0xff0000) >> 8 | (
(__uint32_t)(iface->ifindex) & 0xff000000) >> 24
) : __swap32md(iface->ifindex))
, rde_router_id());
1434 if (v)
1435 prefix_tree_add(&tree, &v->lsa->data.link);
1436 }
1437 /* Continue only if a copy of this LSA already exists in DB.
1438 * It needs to be flushed. */
1439 } else if (!old)
1440 return NULL((void*)0);
1441
1442 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1443 if ((lsa = calloc(1, len)) == NULL((void*)0))
1444 fatal("orig_intra_lsa_net");
1445
1446 lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK)(__uint16_t)(__builtin_constant_p(0x2002) ? (__uint16_t)(((__uint16_t
)(0x2002) & 0xffU) << 8 | ((__uint16_t)(0x2002) &
0xff00U) >> 8) : __swap16md(0x2002))
;
1447 lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex)(__uint32_t)(__builtin_constant_p(iface->ifindex) ? (__uint32_t
)(((__uint32_t)(iface->ifindex) & 0xff) << 24 | (
(__uint32_t)(iface->ifindex) & 0xff00) << 8 | ((
__uint32_t)(iface->ifindex) & 0xff0000) >> 8 | (
(__uint32_t)(iface->ifindex) & 0xff000000) >> 24
) : __swap32md(iface->ifindex))
;
1448 lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1449
1450 numprefix = 0;
1451 RB_FOREACH(node, prefix_tree, &tree)for ((node) = prefix_tree_RB_MINMAX(&tree, -1); (node) !=
((void*)0); (node) = prefix_tree_RB_NEXT(node))
{
1452 append_prefix_lsa(&lsa, &len, node->prefix);
1453 numprefix++;
1454 }
1455
1456 lsa->data.pref_intra.numprefix = htons(numprefix)(__uint16_t)(__builtin_constant_p(numprefix) ? (__uint16_t)((
(__uint16_t)(numprefix) & 0xffU) << 8 | ((__uint16_t
)(numprefix) & 0xff00U) >> 8) : __swap16md(numprefix
))
;
1457
1458 while (!RB_EMPTY(&tree)((&tree)->rbh_root == ((void*)0)))
1459 free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree))prefix_tree_RB_REMOVE(&tree, (&tree)->rbh_root));
1460
1461 /* LSA header */
1462 /* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1463 lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
: htons(DEFAULT_AGE)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
1464 lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX)(__uint16_t)(__builtin_constant_p(0x2009) ? (__uint16_t)(((__uint16_t
)(0x2009) & 0xffU) << 8 | ((__uint16_t)(0x2009) &
0xff00U) >> 8) : __swap16md(0x2009))
;
1465 lsa->hdr.ls_id = htonl(iface->ifindex)(__uint32_t)(__builtin_constant_p(iface->ifindex) ? (__uint32_t
)(((__uint32_t)(iface->ifindex) & 0xff) << 24 | (
(__uint32_t)(iface->ifindex) & 0xff00) << 8 | ((
__uint32_t)(iface->ifindex) & 0xff0000) >> 8 | (
(__uint32_t)(iface->ifindex) & 0xff000000) >> 24
) : __swap32md(iface->ifindex))
;
1466 lsa->hdr.adv_rtr = rde_router_id();
1467 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM)(__uint32_t)(__builtin_constant_p(0x80000001U) ? (__uint32_t)
(((__uint32_t)(0x80000001U) & 0xff) << 24 | ((__uint32_t
)(0x80000001U) & 0xff00) << 8 | ((__uint32_t)(0x80000001U
) & 0xff0000) >> 8 | ((__uint32_t)(0x80000001U) &
0xff000000) >> 24) : __swap32md(0x80000001U))
;
1468 lsa->hdr.len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1469 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) &
0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md
(iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))))
;
1470
1471 return lsa;
1472}
1473
1474struct lsa *
1475orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1476{
1477 char lsa_prefix_buf[sizeof(struct lsa_prefix)
1478 + sizeof(struct in6_addr)];
1479 struct lsa *lsa;
1480 struct lsa_prefix *lsa_prefix;
1481 struct in6_addr *prefix;
1482 struct iface *iface;
1483 struct iface_addr *ia;
1484 struct rde_nbr *nbr;
1485 u_int16_t len;
1486 u_int16_t numprefix;
1487
1488 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1489 if ((lsa = calloc(1, len)) == NULL((void*)0))
1490 fatal("orig_intra_lsa_rtr");
1491
1492 lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER)(__uint16_t)(__builtin_constant_p(0x2001) ? (__uint16_t)(((__uint16_t
)(0x2001) & 0xffU) << 8 | ((__uint16_t)(0x2001) &
0xff00U) >> 8) : __swap16md(0x2001))
;
1493 lsa->data.pref_intra.ref_ls_id = 0;
1494 lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1495
1496 numprefix = 0;
1497 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
{
1498 if (!((iface->flags & IFF_UP0x1) &&
1499 LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0)) &&
1500 !(iface->if_type == IFT_CARP0xf7))
1501 /* interface or link state down
1502 * and not a carp interface */
1503 continue;
1504
1505 if (iface->if_type == IFT_CARP0xf7 &&
1506 (iface->linkstate == LINK_STATE_UNKNOWN0 ||
1507 iface->linkstate == LINK_STATE_INVALID1))
1508 /* carp interface in state invalid or unknown */
1509 continue;
1510
1511 if ((iface->state & IF_STA_DOWN0x01) &&
1512 !(iface->cflags & F_IFACE_PASSIVE0x01))
1513 /* passive interfaces stay in state DOWN */
1514 continue;
1515
1516 /* Broadcast links with adjacencies are handled
1517 * by orig_intra_lsa_net(), ignore. */
1518 if (iface->type == IF_TYPE_BROADCAST ||
1519 iface->type == IF_TYPE_NBMA) {
1520 if (iface->state & IF_STA_WAITING0x04)
1521 /* Skip, we're still waiting for
1522 * adjacencies to form. */
1523 continue;
1524
1525 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
1526 if (!nbr->self &&
1527 nbr->iface->ifindex == iface->ifindex &&
1528 nbr->state & NBR_STA_FULL0x0100)
1529 break;
1530 if (nbr)
1531 continue;
1532 }
1533
1534 lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1535
1536 TAILQ_FOREACH(ia, &iface->ifa_list, entry)for((ia) = ((&iface->ifa_list)->tqh_first); (ia) !=
((void*)0); (ia) = ((ia)->entry.tqe_next))
{
1537 if (IN6_IS_ADDR_LINKLOCAL(&ia->addr)(((&ia->addr)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&ia->addr)->__u6_addr.__u6_addr8[1] & 0xc0)
== 0x80))
)
1538 continue;
1539
1540 bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1541
1542 if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1543 iface->state & IF_STA_LOOPBACK0x02) {
1544 lsa_prefix->prefixlen = 128;
1545 lsa_prefix->metric = 0;
1546 } else if ((iface->if_type == IFT_CARP0xf7 &&
1547 iface->linkstate == LINK_STATE_DOWN2) ||
1548 !(iface->depend_ok)) {
1549 /* carp interfaces in state backup are
1550 * announced with high metric for faster
1551 * failover. */
1552 lsa_prefix->prefixlen = ia->prefixlen;
1553 lsa_prefix->metric = MAX_METRIC65535;
1554 } else {
1555 lsa_prefix->prefixlen = ia->prefixlen;
1556 lsa_prefix->metric = htons(iface->metric)(__uint16_t)(__builtin_constant_p(iface->metric) ? (__uint16_t
)(((__uint16_t)(iface->metric) & 0xffU) << 8 | (
(__uint16_t)(iface->metric) & 0xff00U) >> 8) : __swap16md
(iface->metric))
;
1557 }
1558
1559 if (lsa_prefix->prefixlen == 128)
1560 lsa_prefix->options |= OSPF_PREFIX_LA0x02;
1561
1562 log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1563 "%s/%d, metric %d", inet_ntoa(area->id),
1564 iface->name, log_in6addr(&ia->addr),
1565 lsa_prefix->prefixlen, ntohs(lsa_prefix->metric)(__uint16_t)(__builtin_constant_p(lsa_prefix->metric) ? (__uint16_t
)(((__uint16_t)(lsa_prefix->metric) & 0xffU) << 8
| ((__uint16_t)(lsa_prefix->metric) & 0xff00U) >>
8) : __swap16md(lsa_prefix->metric))
);
1566
1567 prefix = (struct in6_addr *)(lsa_prefix + 1);
1568 inet6applymask(prefix, &ia->addr,
1569 lsa_prefix->prefixlen);
1570 append_prefix_lsa(&lsa, &len, lsa_prefix);
1571 numprefix++;
1572 }
1573
1574 /* TOD: Add prefixes of directly attached hosts, too */
1575 /* TOD: Add prefixes for virtual links */
1576 }
1577
1578 /* If no prefixes were included, continue only if a copy of this
1579 * LSA already exists in DB. It needs to be flushed. */
1580 if (numprefix == 0 && !old) {
1581 free(lsa);
1582 return NULL((void*)0);
1583 }
1584
1585 lsa->data.pref_intra.numprefix = htons(numprefix)(__uint16_t)(__builtin_constant_p(numprefix) ? (__uint16_t)((
(__uint16_t)(numprefix) & 0xffU) << 8 | ((__uint16_t
)(numprefix) & 0xff00U) >> 8) : __swap16md(numprefix
))
;
1586
1587 /* LSA header */
1588 /* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1589 lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
: htons(DEFAULT_AGE)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
1590 lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX)(__uint16_t)(__builtin_constant_p(0x2009) ? (__uint16_t)(((__uint16_t
)(0x2009) & 0xffU) << 8 | ((__uint16_t)(0x2009) &
0xff00U) >> 8) : __swap16md(0x2009))
;
1591 lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR)(__uint32_t)(__builtin_constant_p(0x01000000) ? (__uint32_t)(
((__uint32_t)(0x01000000) & 0xff) << 24 | ((__uint32_t
)(0x01000000) & 0xff00) << 8 | ((__uint32_t)(0x01000000
) & 0xff0000) >> 8 | ((__uint32_t)(0x01000000) &
0xff000000) >> 24) : __swap32md(0x01000000))
;
1592 lsa->hdr.adv_rtr = rde_router_id();
1593 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM)(__uint32_t)(__builtin_constant_p(0x80000001U) ? (__uint32_t)
(((__uint32_t)(0x80000001U) & 0xff) << 24 | ((__uint32_t
)(0x80000001U) & 0xff00) << 8 | ((__uint32_t)(0x80000001U
) & 0xff0000) >> 8 | ((__uint32_t)(0x80000001U) &
0xff000000) >> 24) : __swap32md(0x80000001U))
;
1594 lsa->hdr.len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1595 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) &
0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md
(iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))))
;
1596
1597 return lsa;
1598}
1599
1600void
1601orig_intra_area_prefix_lsas(struct area *area)
1602{
1603 struct lsa *lsa;
1604 struct vertex *old;
1605 struct iface *iface;
1606
1607 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
{
1608 if (iface->type == IF_TYPE_BROADCAST ||
1609 iface->type == IF_TYPE_NBMA) {
1610 old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX)(__uint16_t)(__builtin_constant_p(0x2009) ? (__uint16_t)(((__uint16_t
)(0x2009) & 0xffU) << 8 | ((__uint16_t)(0x2009) &
0xff00U) >> 8) : __swap16md(0x2009))
,
1611 htonl(iface->ifindex)(__uint32_t)(__builtin_constant_p(iface->ifindex) ? (__uint32_t
)(((__uint32_t)(iface->ifindex) & 0xff) << 24 | (
(__uint32_t)(iface->ifindex) & 0xff00) << 8 | ((
__uint32_t)(iface->ifindex) & 0xff0000) >> 8 | (
(__uint32_t)(iface->ifindex) & 0xff000000) >> 24
) : __swap32md(iface->ifindex))
, rde_router_id());
1612 lsa = orig_intra_lsa_net(area, iface, old);
1613 if (lsa)
1614 lsa_merge(rde_nbr_self(area), lsa, old);
1615 }
1616 }
1617
1618 old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX)(__uint16_t)(__builtin_constant_p(0x2009) ? (__uint16_t)(((__uint16_t
)(0x2009) & 0xffU) << 8 | ((__uint16_t)(0x2009) &
0xff00U) >> 8) : __swap16md(0x2009))
,
1619 htonl(LS_ID_INTRA_RTR)(__uint32_t)(__builtin_constant_p(0x01000000) ? (__uint32_t)(
((__uint32_t)(0x01000000) & 0xff) << 24 | ((__uint32_t
)(0x01000000) & 0xff00) << 8 | ((__uint32_t)(0x01000000
) & 0xff0000) >> 8 | ((__uint32_t)(0x01000000) &
0xff000000) >> 24) : __swap32md(0x01000000))
, rde_router_id());
1620 lsa = orig_intra_lsa_rtr(area, old);
1621 if (lsa)
1622 lsa_merge(rde_nbr_self(area), lsa, old);
1623}
1624
1625int
1626comp_asext(struct lsa *a, struct lsa *b)
1627{
1628 /* compare prefixes, if they are equal or not */
1629 if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1630 return (-1);
1631 return (memcmp(
1632 (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1633 (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1634 LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)(((a->data.asext.prefix.prefixlen) + 31)/32 * 4)));
1635}
1636
1637struct lsa *
1638orig_asext_lsa(struct kroute *kr, u_int16_t age)
1639{
1640 struct lsa *lsa;
1641 u_int32_t ext_tag;
1642 u_int16_t len, ext_off;
1643
1644 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1645 LSA_PREFIXSIZE(kr->prefixlen)(((kr->prefixlen) + 31)/32 * 4);
1646
1647 /*
1648 * nexthop -- on connected routes we are the nexthop,
1649 * on all other cases we should announce the true nexthop
1650 * unless that nexthop is outside of the ospf cloud.
1651 * XXX for now we don't do this.
1652 */
1653
1654 ext_off = len;
1655 if (kr->ext_tag) {
1656 len += sizeof(ext_tag);
1657 }
1658 if ((lsa = calloc(1, len)) == NULL((void*)0))
1659 fatal("orig_asext_lsa");
1660
1661 log_debug("orig_asext_lsa: %s/%d age %d",
1662 log_in6addr(&kr->prefix), kr->prefixlen, age);
1663
1664 /* LSA header */
1665 lsa->hdr.age = htons(age)(__uint16_t)(__builtin_constant_p(age) ? (__uint16_t)(((__uint16_t
)(age) & 0xffU) << 8 | ((__uint16_t)(age) & 0xff00U
) >> 8) : __swap16md(age))
;
1666 lsa->hdr.type = htons(LSA_TYPE_EXTERNAL)(__uint16_t)(__builtin_constant_p(0x4005) ? (__uint16_t)(((__uint16_t
)(0x4005) & 0xffU) << 8 | ((__uint16_t)(0x4005) &
0xff00U) >> 8) : __swap16md(0x4005))
;
1667 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1668 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM)(__uint32_t)(__builtin_constant_p(0x80000001U) ? (__uint32_t)
(((__uint32_t)(0x80000001U) & 0xff) << 24 | ((__uint32_t
)(0x80000001U) & 0xff00) << 8 | ((__uint32_t)(0x80000001U
) & 0xff0000) >> 8 | ((__uint32_t)(0x80000001U) &
0xff000000) >> 24) : __swap32md(0x80000001U))
;
1669 lsa->hdr.len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1670
1671 lsa->data.asext.prefix.prefixlen = kr->prefixlen;
1672 memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1673 &kr->prefix, LSA_PREFIXSIZE(kr->prefixlen)(((kr->prefixlen) + 31)/32 * 4));
1674
1675 lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, comp_asext, lsa);
1676
1677 if (age == MAX_AGE3600) {
1678 /* inherit metric and ext_tag from the current LSA,
1679 * some routers don't like to get withdraws that are
1680 * different from what they have in their table.
1681 */
1682 struct vertex *v;
1683 v = lsa_find(NULL((void*)0), lsa->hdr.type, lsa->hdr.ls_id,
1684 lsa->hdr.adv_rtr);
1685 if (v != NULL((void*)0)) {
1686 kr->metric = ntohl(v->lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.metric
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.metric) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.metric) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.metric) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.metric))
;
1687 if (kr->metric & LSA_ASEXT_T_FLAG0x01000000) {
1688 memcpy(&ext_tag, (char *)v->lsa + ext_off,
1689 sizeof(ext_tag));
1690 kr->ext_tag = ntohl(ext_tag)(__uint32_t)(__builtin_constant_p(ext_tag) ? (__uint32_t)(((__uint32_t
)(ext_tag) & 0xff) << 24 | ((__uint32_t)(ext_tag) &
0xff00) << 8 | ((__uint32_t)(ext_tag) & 0xff0000) >>
8 | ((__uint32_t)(ext_tag) & 0xff000000) >> 24) : __swap32md
(ext_tag))
;
1691 }
1692 kr->metric &= LSA_METRIC_MASK0x00ffffff;
1693 }
1694 }
1695
1696 if (kr->ext_tag) {
1697 lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG)(__uint32_t)(__builtin_constant_p(kr->metric | 0x01000000)
? (__uint32_t)(((__uint32_t)(kr->metric | 0x01000000) &
0xff) << 24 | ((__uint32_t)(kr->metric | 0x01000000
) & 0xff00) << 8 | ((__uint32_t)(kr->metric | 0x01000000
) & 0xff0000) >> 8 | ((__uint32_t)(kr->metric | 0x01000000
) & 0xff000000) >> 24) : __swap32md(kr->metric |
0x01000000))
;
1698 ext_tag = htonl(kr->ext_tag)(__uint32_t)(__builtin_constant_p(kr->ext_tag) ? (__uint32_t
)(((__uint32_t)(kr->ext_tag) & 0xff) << 24 | ((__uint32_t
)(kr->ext_tag) & 0xff00) << 8 | ((__uint32_t)(kr
->ext_tag) & 0xff0000) >> 8 | ((__uint32_t)(kr->
ext_tag) & 0xff000000) >> 24) : __swap32md(kr->ext_tag
))
;
1699 memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1700 } else {
1701 lsa->data.asext.metric = htonl(kr->metric)(__uint32_t)(__builtin_constant_p(kr->metric) ? (__uint32_t
)(((__uint32_t)(kr->metric) & 0xff) << 24 | ((__uint32_t
)(kr->metric) & 0xff00) << 8 | ((__uint32_t)(kr->
metric) & 0xff0000) >> 8 | ((__uint32_t)(kr->metric
) & 0xff000000) >> 24) : __swap32md(kr->metric))
;
1702 }
1703
1704 lsa->hdr.ls_chksum = 0;
1705 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) &
0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md
(iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))))
;
1706
1707 return (lsa);
1708}
1709
1710struct lsa *
1711orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1712{
1713#if 0 /* XXX a lot todo */
1714 struct lsa *lsa;
1715 u_int16_t len;
1716
1717 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1718 if ((lsa = calloc(1, len)) == NULL((void*)0))
1719 fatal("orig_sum_lsa");
1720
1721 /* LSA header */
1722 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE)(__uint16_t)(__builtin_constant_p(invalid ? 3600 : 0) ? (__uint16_t
)(((__uint16_t)(invalid ? 3600 : 0) & 0xffU) << 8 |
((__uint16_t)(invalid ? 3600 : 0) & 0xff00U) >> 8)
: __swap16md(invalid ? 3600 : 0))
;
1723 lsa->hdr.type = type;
1724 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1725 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM)(__uint32_t)(__builtin_constant_p(0x80000001U) ? (__uint32_t)
(((__uint32_t)(0x80000001U) & 0xff) << 24 | ((__uint32_t
)(0x80000001U) & 0xff00) << 8 | ((__uint32_t)(0x80000001U
) & 0xff0000) >> 8 | ((__uint32_t)(0x80000001U) &
0xff000000) >> 24) : __swap32md(0x80000001U))
;
1726 lsa->hdr.len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1727
1728 /* prefix and mask */
1729 /*
1730 * TODO ls_id must be unique, for overlapping routes this may
1731 * not be true. In this case a hack needs to be done to
1732 * make the ls_id unique.
1733 */
1734 lsa->hdr.ls_id = rte->prefix.s_addr;
1735 if (type == LSA_TYPE_SUM_NETWORK)
1736 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1737 else
1738 lsa->data.sum.mask = 0; /* must be zero per RFC */
1739
1740 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK)(__uint32_t)(__builtin_constant_p(rte->cost & 0x00ffffff
) ? (__uint32_t)(((__uint32_t)(rte->cost & 0x00ffffff)
& 0xff) << 24 | ((__uint32_t)(rte->cost & 0x00ffffff
) & 0xff00) << 8 | ((__uint32_t)(rte->cost &
0x00ffffff) & 0xff0000) >> 8 | ((__uint32_t)(rte->
cost & 0x00ffffff) & 0xff000000) >> 24) : __swap32md
(rte->cost & 0x00ffffff))
;
1741
1742 lsa->hdr.ls_chksum = 0;
1743 lsa->hdr.ls_chksum =
1744 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) &
0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md
(iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))))
;
1745
1746 return (lsa);
1747#endif
1748 return NULL((void*)0);
1749}