Bug Summary

File:src/usr.sbin/bgpd/rde.c
Warning:line 1999, column 8
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name rde.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/bgpd/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/usr.sbin/bgpd -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/bgpd/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/bgpd/rde.c
1/* $OpenBSD: rde.c,v 1.532 2021/08/09 08:15:34 claudio Exp $ */
2
3/*
4 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5 * Copyright (c) 2016 Job Snijders <job@instituut.net>
6 * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7 * Copyright (c) 2018 Sebastian Benoit <benno@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/types.h>
23#include <sys/time.h>
24#include <sys/resource.h>
25
26#include <errno(*__errno()).h>
27#include <pwd.h>
28#include <poll.h>
29#include <signal.h>
30#include <stdio.h>
31#include <stdlib.h>
32#include <string.h>
33#include <syslog.h>
34#include <unistd.h>
35
36#include "bgpd.h"
37#include "rde.h"
38#include "session.h"
39#include "log.h"
40
41#define PFD_PIPE_MAIN0 0
42#define PFD_PIPE_SESSION1 1
43#define PFD_PIPE_SESSION_CTL2 2
44#define PFD_PIPE_ROA3 3
45#define PFD_PIPE_COUNT4 4
46
47void rde_sighdlr(int);
48void rde_dispatch_imsg_session(struct imsgbuf *);
49void rde_dispatch_imsg_parent(struct imsgbuf *);
50void rde_dispatch_imsg_rtr(struct imsgbuf *);
51void rde_dispatch_imsg_peer(struct rde_peer *, void *);
52void rde_update_dispatch(struct rde_peer *, struct imsg *);
53int rde_update_update(struct rde_peer *, u_int32_t,
54 struct filterstate *, struct bgpd_addr *, u_int8_t);
55void rde_update_withdraw(struct rde_peer *, u_int32_t,
56 struct bgpd_addr *, u_int8_t);
57int rde_attr_parse(u_char *, u_int16_t, struct rde_peer *,
58 struct filterstate *, struct mpattr *);
59int rde_attr_add(struct filterstate *, u_char *, u_int16_t);
60u_int8_t rde_attr_missing(struct rde_aspath *, int, u_int16_t);
61int rde_get_mp_nexthop(u_char *, u_int16_t, u_int8_t,
62 struct filterstate *);
63void rde_as4byte_fixup(struct rde_peer *, struct rde_aspath *);
64void rde_reflector(struct rde_peer *, struct rde_aspath *);
65
66void rde_dump_ctx_new(struct ctl_show_rib_request *, pid_t,
67 enum imsg_type);
68void rde_dump_ctx_throttle(pid_t, int);
69void rde_dump_ctx_terminate(pid_t);
70void rde_dump_mrt_new(struct mrt *, pid_t, int);
71
72int rde_l3vpn_import(struct rde_community *, struct l3vpn *);
73static void rde_commit_pftable(void);
74void rde_reload_done(void);
75static void rde_softreconfig_in_done(void *, u_int8_t);
76static void rde_softreconfig_out_done(void *, u_int8_t);
77static void rde_softreconfig_done(void);
78static void rde_softreconfig_out(struct rib_entry *, void *);
79static void rde_softreconfig_in(struct rib_entry *, void *);
80static void rde_softreconfig_sync_reeval(struct rib_entry *, void *);
81static void rde_softreconfig_sync_fib(struct rib_entry *, void *);
82static void rde_softreconfig_sync_done(void *, u_int8_t);
83static void rde_roa_reload(void);
84int rde_update_queue_pending(void);
85void rde_update_queue_runner(void);
86void rde_update6_queue_runner(u_int8_t);
87struct rde_prefixset *rde_find_prefixset(char *, struct rde_prefixset_head *);
88void rde_mark_prefixsets_dirty(struct rde_prefixset_head *,
89 struct rde_prefixset_head *);
90u_int8_t rde_roa_validity(struct rde_prefixset *,
91 struct bgpd_addr *, u_int8_t, u_int32_t);
92
93static void rde_peer_recv_eor(struct rde_peer *, u_int8_t);
94static void rde_peer_send_eor(struct rde_peer *, u_int8_t);
95
96void network_add(struct network_config *, struct filterstate *);
97void network_delete(struct network_config *);
98static void network_dump_upcall(struct rib_entry *, void *);
99static void network_flush_upcall(struct rib_entry *, void *);
100
101void rde_shutdown(void);
102int ovs_match(struct prefix *, u_int32_t);
103
104static struct imsgbuf *ibuf_se;
105static struct imsgbuf *ibuf_se_ctl;
106static struct imsgbuf *ibuf_rtr;
107static struct imsgbuf *ibuf_main;
108static struct bgpd_config *conf, *nconf;
109static struct rde_prefixset rde_roa, roa_new;
110
111volatile sig_atomic_t rde_quit = 0;
112struct filter_head *out_rules, *out_rules_tmp;
113struct rde_memstats rdemem;
114int softreconfig;
115static int rde_eval_all;
116
117extern struct rde_peer_head peerlist;
118extern struct rde_peer *peerself;
119
120struct rde_dump_ctx {
121 LIST_ENTRY(rde_dump_ctx)struct { struct rde_dump_ctx *le_next; struct rde_dump_ctx **
le_prev; }
entry;
122 struct ctl_show_rib_request req;
123 u_int32_t peerid;
124 u_int8_t throttled;
125};
126
127LIST_HEAD(, rde_dump_ctx)struct { struct rde_dump_ctx *lh_first; } rde_dump_h = LIST_HEAD_INITIALIZER(rde_dump_h){ ((void*)0) };
128
129struct rde_mrt_ctx {
130 LIST_ENTRY(rde_mrt_ctx)struct { struct rde_mrt_ctx *le_next; struct rde_mrt_ctx **le_prev
; }
entry;
131 struct mrt mrt;
132};
133
134LIST_HEAD(, rde_mrt_ctx)struct { struct rde_mrt_ctx *lh_first; } rde_mrts = LIST_HEAD_INITIALIZER(rde_mrts){ ((void*)0) };
135u_int rde_mrt_cnt;
136
137void
138rde_sighdlr(int sig)
139{
140 switch (sig) {
141 case SIGINT2:
142 case SIGTERM15:
143 rde_quit = 1;
144 break;
145 }
146}
147
148u_int32_t peerhashsize = 1024;
149u_int32_t pathhashsize = 128 * 1024;
150u_int32_t attrhashsize = 16 * 1024;
151u_int32_t nexthophashsize = 1024;
152
153void
154rde_main(int debug, int verbose)
155{
156 struct passwd *pw;
157 struct pollfd *pfd = NULL((void*)0);
158 struct rde_mrt_ctx *mctx, *xmctx;
159 void *newp;
160 u_int pfd_elms = 0, i, j;
161 int timeout;
162 u_int8_t aid;
163
164 log_init(debug, LOG_DAEMON(3<<3));
165 log_setverbose(verbose);
166
167 log_procinit(log_procnames[PROC_RDE]);
168
169 if ((pw = getpwnam(BGPD_USER"_bgpd")) == NULL((void*)0))
170 fatal("getpwnam");
171
172 if (chroot(pw->pw_dir) == -1)
173 fatal("chroot");
174 if (chdir("/") == -1)
175 fatal("chdir(\"/\")");
176
177 setproctitle("route decision engine");
178
179 if (setgroups(1, &pw->pw_gid) ||
180 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
181 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
182 fatal("can't drop privileges");
183
184 if (pledge("stdio recvfd", NULL((void*)0)) == -1)
185 fatal("pledge");
186
187 signal(SIGTERM15, rde_sighdlr);
188 signal(SIGINT2, rde_sighdlr);
189 signal(SIGPIPE13, SIG_IGN(void (*)(int))1);
190 signal(SIGHUP1, SIG_IGN(void (*)(int))1);
191 signal(SIGALRM14, SIG_IGN(void (*)(int))1);
192 signal(SIGUSR130, SIG_IGN(void (*)(int))1);
193
194 if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL((void*)0))
195 fatal(NULL((void*)0));
196 imsg_init(ibuf_main, 3);
197
198 /* initialize the RIB structures */
199 pt_init();
200 path_init(pathhashsize);
201 aspath_init(pathhashsize);
202 communities_init(attrhashsize);
203 attr_init(attrhashsize);
204 nexthop_init(nexthophashsize);
205 peer_init(peerhashsize);
206
207 /* make sure the default RIBs are setup */
208 rib_new("Adj-RIB-In", 0, F_RIB_NOFIB0x0004 | F_RIB_NOEVALUATE0x0002);
209
210 out_rules = calloc(1, sizeof(struct filter_head));
211 if (out_rules == NULL((void*)0))
212 fatal(NULL((void*)0));
213 TAILQ_INIT(out_rules)do { (out_rules)->tqh_first = ((void*)0); (out_rules)->
tqh_last = &(out_rules)->tqh_first; } while (0)
;
214
215 conf = new_config();
216 log_info("route decision engine ready");
217
218 while (rde_quit == 0) {
219 if (pfd_elms < PFD_PIPE_COUNT4 + rde_mrt_cnt) {
220 if ((newp = reallocarray(pfd,
221 PFD_PIPE_COUNT4 + rde_mrt_cnt,
222 sizeof(struct pollfd))) == NULL((void*)0)) {
223 /* panic for now */
224 log_warn("could not resize pfd from %u -> %u"
225 " entries", pfd_elms, PFD_PIPE_COUNT4 +
226 rde_mrt_cnt);
227 fatalx("exiting");
228 }
229 pfd = newp;
230 pfd_elms = PFD_PIPE_COUNT4 + rde_mrt_cnt;
231 }
232 timeout = -1;
233 bzero(pfd, sizeof(struct pollfd) * pfd_elms);
234
235 set_pollfd(&pfd[PFD_PIPE_MAIN0], ibuf_main);
236 set_pollfd(&pfd[PFD_PIPE_SESSION1], ibuf_se);
237 set_pollfd(&pfd[PFD_PIPE_SESSION_CTL2], ibuf_se_ctl);
238 set_pollfd(&pfd[PFD_PIPE_ROA3], ibuf_rtr);
239
240 i = PFD_PIPE_COUNT4;
241 for (mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first); mctx != 0; mctx = xmctx) {
242 xmctx = LIST_NEXT(mctx, entry)((mctx)->entry.le_next);
243
244 if (i >= pfd_elms)
245 fatalx("poll pfd too small");
246 if (mctx->mrt.wbuf.queued) {
247 pfd[i].fd = mctx->mrt.wbuf.fd;
248 pfd[i].events = POLLOUT0x0004;
249 i++;
250 } else if (mctx->mrt.state == MRT_STATE_REMOVE) {
251 close(mctx->mrt.wbuf.fd);
252 LIST_REMOVE(mctx, entry)do { if ((mctx)->entry.le_next != ((void*)0)) (mctx)->entry
.le_next->entry.le_prev = (mctx)->entry.le_prev; *(mctx
)->entry.le_prev = (mctx)->entry.le_next; ; ; } while (
0)
;
253 free(mctx);
254 rde_mrt_cnt--;
255 }
256 }
257
258 if (rib_dump_pending() || rde_update_queue_pending() ||
259 nexthop_pending() || peer_imsg_pending())
260 timeout = 0;
261
262 if (poll(pfd, i, timeout) == -1) {
263 if (errno(*__errno()) != EINTR4)
264 fatal("poll error");
265 continue;
266 }
267
268 if (handle_pollfd(&pfd[PFD_PIPE_MAIN0], ibuf_main) == -1)
269 fatalx("Lost connection to parent");
270 else
271 rde_dispatch_imsg_parent(ibuf_main);
272
273 if (handle_pollfd(&pfd[PFD_PIPE_SESSION1], ibuf_se) == -1) {
274 log_warnx("RDE: Lost connection to SE");
275 msgbuf_clear(&ibuf_se->w);
276 free(ibuf_se);
277 ibuf_se = NULL((void*)0);
278 } else
279 rde_dispatch_imsg_session(ibuf_se);
280
281 if (handle_pollfd(&pfd[PFD_PIPE_SESSION_CTL2], ibuf_se_ctl) ==
282 -1) {
283 log_warnx("RDE: Lost connection to SE control");
284 msgbuf_clear(&ibuf_se_ctl->w);
285 free(ibuf_se_ctl);
286 ibuf_se_ctl = NULL((void*)0);
287 } else
288 rde_dispatch_imsg_session(ibuf_se_ctl);
289
290 if (handle_pollfd(&pfd[PFD_PIPE_ROA3], ibuf_rtr) == -1) {
291 log_warnx("RDE: Lost connection to ROA");
292 msgbuf_clear(&ibuf_rtr->w);
293 free(ibuf_rtr);
294 ibuf_rtr = NULL((void*)0);
295 } else
296 rde_dispatch_imsg_rtr(ibuf_rtr);
297
298 for (j = PFD_PIPE_COUNT4, mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first);
299 j < i && mctx != 0; j++) {
300 if (pfd[j].fd == mctx->mrt.wbuf.fd &&
301 pfd[j].revents & POLLOUT0x0004)
302 mrt_write(&mctx->mrt);
303 mctx = LIST_NEXT(mctx, entry)((mctx)->entry.le_next);
304 }
305
306 peer_foreach(rde_dispatch_imsg_peer, NULL((void*)0));
307 rib_dump_runner();
308 nexthop_runner();
309 if (ibuf_se && ibuf_se->w.queued < SESS_MSG_HIGH_MARK2000) {
310 rde_update_queue_runner();
311 for (aid = AID_INET62; aid < AID_MAX5; aid++)
312 rde_update6_queue_runner(aid);
313 }
314 /* commit pftable once per poll loop */
315 rde_commit_pftable();
316 }
317
318 /* do not clean up on shutdown on production, it takes ages. */
319 if (debug)
320 rde_shutdown();
321
322 free_config(conf);
323 free(pfd);
324
325 /* close pipes */
326 if (ibuf_se) {
327 msgbuf_clear(&ibuf_se->w);
328 close(ibuf_se->fd);
329 free(ibuf_se);
330 }
331 if (ibuf_se_ctl) {
332 msgbuf_clear(&ibuf_se_ctl->w);
333 close(ibuf_se_ctl->fd);
334 free(ibuf_se_ctl);
335 }
336 msgbuf_clear(&ibuf_main->w);
337 close(ibuf_main->fd);
338 free(ibuf_main);
339
340 while ((mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first)) != NULL((void*)0)) {
341 msgbuf_clear(&mctx->mrt.wbuf);
342 close(mctx->mrt.wbuf.fd);
343 LIST_REMOVE(mctx, entry)do { if ((mctx)->entry.le_next != ((void*)0)) (mctx)->entry
.le_next->entry.le_prev = (mctx)->entry.le_prev; *(mctx
)->entry.le_prev = (mctx)->entry.le_next; ; ; } while (
0)
;
344 free(mctx);
345 }
346
347 log_info("route decision engine exiting");
348 exit(0);
349}
350
351struct network_config netconf_s, netconf_p;
352struct filterstate netconf_state;
353struct filter_set_head session_set = TAILQ_HEAD_INITIALIZER(session_set){ ((void*)0), &(session_set).tqh_first };
354struct filter_set_head parent_set = TAILQ_HEAD_INITIALIZER(parent_set){ ((void*)0), &(parent_set).tqh_first };
355
356void
357rde_dispatch_imsg_session(struct imsgbuf *ibuf)
358{
359 struct imsg imsg;
360 struct peer p;
361 struct peer_config pconf;
362 struct ctl_show_set cset;
363 struct ctl_show_rib csr;
364 struct ctl_show_rib_request req;
365 struct rde_peer *peer;
366 struct rde_aspath *asp;
367 struct rde_hashstats rdehash;
368 struct filter_set *s;
369 struct as_set *aset;
370 struct rde_prefixset *pset;
371 u_int8_t *asdata;
372 ssize_t n;
373 size_t aslen;
374 int verbose;
375 u_int16_t len;
376
377 while (ibuf) {
378 if ((n = imsg_get(ibuf, &imsg)) == -1)
379 fatal("rde_dispatch_imsg_session: imsg_get error");
380 if (n == 0)
381 break;
382
383 switch (imsg.hdr.type) {
384 case IMSG_UPDATE:
385 case IMSG_SESSION_UP:
386 case IMSG_SESSION_DOWN:
387 case IMSG_SESSION_STALE:
388 case IMSG_SESSION_FLUSH:
389 case IMSG_SESSION_RESTARTED:
390 case IMSG_REFRESH:
391 if ((peer = peer_get(imsg.hdr.peerid)) == NULL((void*)0)) {
392 log_warnx("rde_dispatch: unknown peer id %d",
393 imsg.hdr.peerid);
394 break;
395 }
396 peer_imsg_push(peer, &imsg);
397 break;
398 case IMSG_SESSION_ADD:
399 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(pconf))
400 fatalx("incorrect size of session request");
401 memcpy(&pconf, imsg.data, sizeof(pconf));
402 peer_add(imsg.hdr.peerid, &pconf);
403 /* make sure rde_eval_all is on if needed. */
404 if (pconf.flags & PEERFLAG_EVALUATE_ALL0x04)
405 rde_eval_all = 1;
406 break;
407 case IMSG_NETWORK_ADD:
408 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
409 sizeof(struct network_config)) {
410 log_warnx("rde_dispatch: wrong imsg len");
411 break;
412 }
413 memcpy(&netconf_s, imsg.data, sizeof(netconf_s));
414 TAILQ_INIT(&netconf_s.attrset)do { (&netconf_s.attrset)->tqh_first = ((void*)0); (&
netconf_s.attrset)->tqh_last = &(&netconf_s.attrset
)->tqh_first; } while (0)
;
415 rde_filterstate_prep(&netconf_state, NULL((void*)0), NULL((void*)0), NULL((void*)0),
416 0);
417 asp = &netconf_state.aspath;
418 asp->aspath = aspath_get(NULL((void*)0), 0);
419 asp->origin = ORIGIN_IGP0;
420 asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 |
421 F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400 |
422 F_ANN_DYNAMIC0x00800;
423 break;
424 case IMSG_NETWORK_ASPATH:
425 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) <
426 sizeof(csr)) {
427 log_warnx("rde_dispatch: wrong imsg len");
428 bzero(&netconf_s, sizeof(netconf_s));
429 break;
430 }
431 aslen = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) - sizeof(csr);
432 asdata = imsg.data;
433 asdata += sizeof(struct ctl_show_rib);
434 memcpy(&csr, imsg.data, sizeof(csr));
435 asp = &netconf_state.aspath;
436 asp->lpref = csr.local_pref;
437 asp->med = csr.med;
438 asp->weight = csr.weight;
439 asp->flags = csr.flags;
440 asp->origin = csr.origin;
441 asp->flags |= F_PREFIX_ANNOUNCED0x00400 | F_ANN_DYNAMIC0x00800;
442 aspath_put(asp->aspath);
443 asp->aspath = aspath_get(asdata, aslen);
444 break;
445 case IMSG_NETWORK_ATTR:
446 if (imsg.hdr.len <= IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) {
447 log_warnx("rde_dispatch: wrong imsg len");
448 break;
449 }
450 /* parse optional path attributes */
451 len = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
452 if (rde_attr_add(&netconf_state, imsg.data,
453 len) == -1) {
454 log_warnx("rde_dispatch: bad network "
455 "attribute");
456 rde_filterstate_clean(&netconf_state);
457 bzero(&netconf_s, sizeof(netconf_s));
458 break;
459 }
460 break;
461 case IMSG_NETWORK_DONE:
462 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) {
463 log_warnx("rde_dispatch: wrong imsg len");
464 break;
465 }
466 TAILQ_CONCAT(&netconf_s.attrset, &session_set, entry)do { if (!(((&session_set)->tqh_first) == ((void*)0)))
{ *(&netconf_s.attrset)->tqh_last = (&session_set
)->tqh_first; (&session_set)->tqh_first->entry.tqe_prev
= (&netconf_s.attrset)->tqh_last; (&netconf_s.attrset
)->tqh_last = (&session_set)->tqh_last; do { ((&
session_set))->tqh_first = ((void*)0); ((&session_set)
)->tqh_last = &((&session_set))->tqh_first; } while
(0); } } while (0)
;
467 switch (netconf_s.prefix.aid) {
468 case AID_INET1:
469 if (netconf_s.prefixlen > 32)
470 goto badnet;
471 network_add(&netconf_s, &netconf_state);
472 break;
473 case AID_INET62:
474 if (netconf_s.prefixlen > 128)
475 goto badnet;
476 network_add(&netconf_s, &netconf_state);
477 break;
478 case 0:
479 /* something failed beforehands */
480 break;
481 default:
482badnet:
483 log_warnx("request to insert invalid network");
484 break;
485 }
486 rde_filterstate_clean(&netconf_state);
487 break;
488 case IMSG_NETWORK_REMOVE:
489 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
490 sizeof(struct network_config)) {
491 log_warnx("rde_dispatch: wrong imsg len");
492 break;
493 }
494 memcpy(&netconf_s, imsg.data, sizeof(netconf_s));
495 TAILQ_INIT(&netconf_s.attrset)do { (&netconf_s.attrset)->tqh_first = ((void*)0); (&
netconf_s.attrset)->tqh_last = &(&netconf_s.attrset
)->tqh_first; } while (0)
;
496
497 switch (netconf_s.prefix.aid) {
498 case AID_INET1:
499 if (netconf_s.prefixlen > 32)
500 goto badnetdel;
501 network_delete(&netconf_s);
502 break;
503 case AID_INET62:
504 if (netconf_s.prefixlen > 128)
505 goto badnetdel;
506 network_delete(&netconf_s);
507 break;
508 default:
509badnetdel:
510 log_warnx("request to remove invalid network");
511 break;
512 }
513 break;
514 case IMSG_NETWORK_FLUSH:
515 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) {
516 log_warnx("rde_dispatch: wrong imsg len");
517 break;
518 }
519 if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0,
520 RDE_RUNNER_ROUNDS100, NULL((void*)0), network_flush_upcall,
521 NULL((void*)0), NULL((void*)0)) == -1)
522 log_warn("rde_dispatch: IMSG_NETWORK_FLUSH");
523 break;
524 case IMSG_FILTER_SET:
525 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
526 sizeof(struct filter_set)) {
527 log_warnx("rde_dispatch: wrong imsg len");
528 break;
529 }
530 if ((s = malloc(sizeof(struct filter_set))) == NULL((void*)0))
531 fatal(NULL((void*)0));
532 memcpy(s, imsg.data, sizeof(struct filter_set));
533 if (s->type == ACTION_SET_NEXTHOP) {
534 s->action.nh_ref =
535 nexthop_get(&s->action.nexthop);
536 s->type = ACTION_SET_NEXTHOP_REF;
537 }
538 TAILQ_INSERT_TAIL(&session_set, s, entry)do { (s)->entry.tqe_next = ((void*)0); (s)->entry.tqe_prev
= (&session_set)->tqh_last; *(&session_set)->tqh_last
= (s); (&session_set)->tqh_last = &(s)->entry.
tqe_next; } while (0)
;
539 break;
540 case IMSG_CTL_SHOW_NETWORK:
541 case IMSG_CTL_SHOW_RIB:
542 case IMSG_CTL_SHOW_RIB_PREFIX:
543 if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(req)) {
544 log_warnx("rde_dispatch: wrong imsg len");
545 break;
546 }
547 memcpy(&req, imsg.data, sizeof(req));
548 rde_dump_ctx_new(&req, imsg.hdr.pid, imsg.hdr.type);
549 break;
550 case IMSG_CTL_SHOW_NEIGHBOR:
551 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
552 sizeof(struct peer)) {
553 log_warnx("rde_dispatch: wrong imsg len");
554 break;
555 }
556 memcpy(&p, imsg.data, sizeof(struct peer));
557 peer = peer_get(p.conf.id);
558 if (peer != NULL((void*)0)) {
559 p.stats.prefix_cnt = peer->prefix_cnt;
560 p.stats.prefix_out_cnt = peer->prefix_out_cnt;
561 p.stats.prefix_rcvd_update =
562 peer->prefix_rcvd_update;
563 p.stats.prefix_rcvd_withdraw =
564 peer->prefix_rcvd_withdraw;
565 p.stats.prefix_rcvd_eor =
566 peer->prefix_rcvd_eor;
567 p.stats.prefix_sent_update =
568 peer->prefix_sent_update;
569 p.stats.prefix_sent_withdraw =
570 peer->prefix_sent_withdraw;
571 p.stats.prefix_sent_eor =
572 peer->prefix_sent_eor;
573 }
574 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NEIGHBOR, 0,
575 imsg.hdr.pid, -1, &p, sizeof(struct peer));
576 break;
577 case IMSG_CTL_SHOW_RIB_MEM:
578 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0,
579 imsg.hdr.pid, -1, &rdemem, sizeof(rdemem));
580 path_hash_stats(&rdehash);
581 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
582 imsg.hdr.pid, -1, &rdehash, sizeof(rdehash));
583 aspath_hash_stats(&rdehash);
584 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
585 imsg.hdr.pid, -1, &rdehash, sizeof(rdehash));
586 communities_hash_stats(&rdehash);
587 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
588 imsg.hdr.pid, -1, &rdehash, sizeof(rdehash));
589 attr_hash_stats(&rdehash);
590 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
591 imsg.hdr.pid, -1, &rdehash, sizeof(rdehash));
592 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
593 -1, NULL((void*)0), 0);
594 break;
595 case IMSG_CTL_SHOW_SET:
596 /* first roa set */
597 pset = &rde_roa;
598 memset(&cset, 0, sizeof(cset));
599 cset.type = ROA_SET;
600 strlcpy(cset.name, "RPKI ROA", sizeof(cset.name));
601 cset.lastchange = pset->lastchange;
602 cset.v4_cnt = pset->th.v4_cnt;
603 cset.v6_cnt = pset->th.v6_cnt;
604 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
605 imsg.hdr.pid, -1, &cset, sizeof(cset));
606
607 SIMPLEQ_FOREACH(aset, &conf->as_sets, entry)for((aset) = ((&conf->as_sets)->sqh_first); (aset) !=
((void*)0); (aset) = ((aset)->entry.sqe_next))
{
608 memset(&cset, 0, sizeof(cset));
609 cset.type = ASNUM_SET;
610 strlcpy(cset.name, aset->name,
611 sizeof(cset.name));
612 cset.lastchange = aset->lastchange;
613 cset.as_cnt = set_nmemb(aset->set);
614 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
615 imsg.hdr.pid, -1, &cset, sizeof(cset));
616 }
617 SIMPLEQ_FOREACH(pset, &conf->rde_prefixsets, entry)for((pset) = ((&conf->rde_prefixsets)->sqh_first); (
pset) != ((void*)0); (pset) = ((pset)->entry.sqe_next))
{
618 memset(&cset, 0, sizeof(cset));
619 cset.type = PREFIX_SET;
620 strlcpy(cset.name, pset->name,
621 sizeof(cset.name));
622 cset.lastchange = pset->lastchange;
623 cset.v4_cnt = pset->th.v4_cnt;
624 cset.v6_cnt = pset->th.v6_cnt;
625 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
626 imsg.hdr.pid, -1, &cset, sizeof(cset));
627 }
628 SIMPLEQ_FOREACH(pset, &conf->rde_originsets, entry)for((pset) = ((&conf->rde_originsets)->sqh_first); (
pset) != ((void*)0); (pset) = ((pset)->entry.sqe_next))
{
629 memset(&cset, 0, sizeof(cset));
630 cset.type = ORIGIN_SET;
631 strlcpy(cset.name, pset->name,
632 sizeof(cset.name));
633 cset.lastchange = pset->lastchange;
634 cset.v4_cnt = pset->th.v4_cnt;
635 cset.v6_cnt = pset->th.v6_cnt;
636 imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
637 imsg.hdr.pid, -1, &cset, sizeof(cset));
638 }
639 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
640 -1, NULL((void*)0), 0);
641 break;
642 case IMSG_CTL_LOG_VERBOSE:
643 /* already checked by SE */
644 memcpy(&verbose, imsg.data, sizeof(verbose));
645 log_setverbose(verbose);
646 break;
647 case IMSG_CTL_END:
648 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
649 -1, NULL((void*)0), 0);
650 break;
651 case IMSG_CTL_TERMINATE:
652 rde_dump_ctx_terminate(imsg.hdr.pid);
653 break;
654 case IMSG_XON:
655 if (imsg.hdr.peerid) {
656 peer = peer_get(imsg.hdr.peerid);
657 if (peer)
658 peer->throttled = 0;
659 } else {
660 rde_dump_ctx_throttle(imsg.hdr.pid, 0);
661 }
662 break;
663 case IMSG_XOFF:
664 if (imsg.hdr.peerid) {
665 peer = peer_get(imsg.hdr.peerid);
666 if (peer)
667 peer->throttled = 1;
668 } else {
669 rde_dump_ctx_throttle(imsg.hdr.pid, 1);
670 }
671 break;
672 case IMSG_RECONF_DRAIN:
673 imsg_compose(ibuf_se, IMSG_RECONF_DRAIN, 0, 0,
674 -1, NULL((void*)0), 0);
675 break;
676 default:
677 break;
678 }
679 imsg_free(&imsg);
680 }
681}
682
683void
684rde_dispatch_imsg_parent(struct imsgbuf *ibuf)
685{
686 static struct rde_prefixset *last_prefixset;
687 static struct as_set *last_as_set;
688 static struct l3vpn *vpn;
689 struct imsg imsg;
690 struct mrt xmrt;
691 struct roa roa;
692 struct rde_rib rr;
693 struct filterstate state;
694 struct imsgbuf *i;
695 struct filter_head *nr;
696 struct filter_rule *r;
697 struct filter_set *s;
698 struct rib *rib;
699 struct rde_prefixset *ps;
700 struct rde_aspath *asp;
701 struct prefixset_item psi;
702 char *name;
703 size_t nmemb;
704 int n, fd, rv;
705 u_int16_t rid;
706
707 while (ibuf) {
708 if ((n = imsg_get(ibuf, &imsg)) == -1)
709 fatal("rde_dispatch_imsg_parent: imsg_get error");
710 if (n == 0)
711 break;
712
713 switch (imsg.hdr.type) {
714 case IMSG_SOCKET_CONN:
715 case IMSG_SOCKET_CONN_CTL:
716 case IMSG_SOCKET_CONN_RTR:
717 if ((fd = imsg.fd) == -1) {
718 log_warnx("expected to receive imsg fd "
719 "but didn't receive any");
720 break;
721 }
722 if ((i = malloc(sizeof(struct imsgbuf))) == NULL((void*)0))
723 fatal(NULL((void*)0));
724 imsg_init(i, fd);
725 switch (imsg.hdr.type) {
726 case IMSG_SOCKET_CONN:
727 if (ibuf_se) {
728 log_warnx("Unexpected imsg connection "
729 "to SE received");
730 msgbuf_clear(&ibuf_se->w);
731 free(ibuf_se);
732 }
733 ibuf_se = i;
734 break;
735 case IMSG_SOCKET_CONN_CTL:
736 if (ibuf_se_ctl) {
737 log_warnx("Unexpected imsg ctl "
738 "connection to SE received");
739 msgbuf_clear(&ibuf_se_ctl->w);
740 free(ibuf_se_ctl);
741 }
742 ibuf_se_ctl = i;
743 break;
744 case IMSG_SOCKET_CONN_RTR:
745 if (ibuf_rtr) {
746 log_warnx("Unexpected imsg ctl "
747 "connection to ROA received");
748 msgbuf_clear(&ibuf_rtr->w);
749 free(ibuf_rtr);
750 }
751 ibuf_rtr = i;
752 break;
753 }
754 break;
755 case IMSG_NETWORK_ADD:
756 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
757 sizeof(struct network_config)) {
758 log_warnx("rde_dispatch: wrong imsg len");
759 break;
760 }
761 memcpy(&netconf_p, imsg.data, sizeof(netconf_p));
762 TAILQ_INIT(&netconf_p.attrset)do { (&netconf_p.attrset)->tqh_first = ((void*)0); (&
netconf_p.attrset)->tqh_last = &(&netconf_p.attrset
)->tqh_first; } while (0)
;
763 break;
764 case IMSG_NETWORK_DONE:
765 TAILQ_CONCAT(&netconf_p.attrset, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void*)0))) {
*(&netconf_p.attrset)->tqh_last = (&parent_set)->
tqh_first; (&parent_set)->tqh_first->entry.tqe_prev
= (&netconf_p.attrset)->tqh_last; (&netconf_p.attrset
)->tqh_last = (&parent_set)->tqh_last; do { ((&
parent_set))->tqh_first = ((void*)0); ((&parent_set))->
tqh_last = &((&parent_set))->tqh_first; } while (0
); } } while (0)
;
766
767 rde_filterstate_prep(&state, NULL((void*)0), NULL((void*)0), NULL((void*)0), 0);
768 asp = &state.aspath;
769 asp->aspath = aspath_get(NULL((void*)0), 0);
770 asp->origin = ORIGIN_IGP0;
771 asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 |
772 F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400;
773
774 network_add(&netconf_p, &state);
775 rde_filterstate_clean(&state);
776 break;
777 case IMSG_NETWORK_REMOVE:
778 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
779 sizeof(struct network_config)) {
780 log_warnx("rde_dispatch: wrong imsg len");
781 break;
782 }
783 memcpy(&netconf_p, imsg.data, sizeof(netconf_p));
784 TAILQ_INIT(&netconf_p.attrset)do { (&netconf_p.attrset)->tqh_first = ((void*)0); (&
netconf_p.attrset)->tqh_last = &(&netconf_p.attrset
)->tqh_first; } while (0)
;
785 network_delete(&netconf_p);
786 break;
787 case IMSG_RECONF_CONF:
788 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
789 sizeof(struct bgpd_config))
790 fatalx("IMSG_RECONF_CONF bad len");
791 out_rules_tmp = calloc(1, sizeof(struct filter_head));
792 if (out_rules_tmp == NULL((void*)0))
793 fatal(NULL((void*)0));
794 TAILQ_INIT(out_rules_tmp)do { (out_rules_tmp)->tqh_first = ((void*)0); (out_rules_tmp
)->tqh_last = &(out_rules_tmp)->tqh_first; } while (
0)
;
795 nconf = new_config();
796 copy_config(nconf, imsg.data);
797
798 for (rid = 0; rid < rib_size; rid++) {
799 if ((rib = rib_byid(rid)) == NULL((void*)0))
800 continue;
801 rib->state = RECONF_DELETE;
802 rib->fibstate = RECONF_NONE;
803 }
804 break;
805 case IMSG_RECONF_RIB:
806 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
807 sizeof(struct rde_rib))
808 fatalx("IMSG_RECONF_RIB bad len");
809 memcpy(&rr, imsg.data, sizeof(rr));
810 rib = rib_byid(rib_find(rr.name));
811 if (rib == NULL((void*)0)) {
812 rib = rib_new(rr.name, rr.rtableid, rr.flags);
813 } else if (rib->flags == rr.flags &&
814 rib->rtableid == rr.rtableid) {
815 /* no change to rib apart from filters */
816 rib->state = RECONF_KEEP;
817 } else {
818 /* reload rib because somehing changed */
819 rib->flags_tmp = rr.flags;
820 rib->rtableid_tmp = rr.rtableid;
821 rib->state = RECONF_RELOAD;
822 }
823 break;
824 case IMSG_RECONF_FILTER:
825 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
826 sizeof(struct filter_rule))
827 fatalx("IMSG_RECONF_FILTER bad len");
828 if ((r = malloc(sizeof(struct filter_rule))) == NULL((void*)0))
829 fatal(NULL((void*)0));
830 memcpy(r, imsg.data, sizeof(struct filter_rule));
831 if (r->match.prefixset.name[0] != '\0') {
832 r->match.prefixset.ps =
833 rde_find_prefixset(r->match.prefixset.name,
834 &nconf->rde_prefixsets);
835 if (r->match.prefixset.ps == NULL((void*)0))
836 log_warnx("%s: no prefixset for %s",
837 __func__, r->match.prefixset.name);
838 }
839 if (r->match.originset.name[0] != '\0') {
840 r->match.originset.ps =
841 rde_find_prefixset(r->match.originset.name,
842 &nconf->rde_originsets);
843 if (r->match.originset.ps == NULL((void*)0))
844 log_warnx("%s: no origin-set for %s",
845 __func__, r->match.originset.name);
846 }
847 if (r->match.as.flags & AS_FLAG_AS_SET_NAME0x02) {
848 struct as_set * aset;
849
850 aset = as_sets_lookup(&nconf->as_sets,
851 r->match.as.name);
852 if (aset == NULL((void*)0)) {
853 log_warnx("%s: no as-set for %s",
854 __func__, r->match.as.name);
855 } else {
856 r->match.as.flags = AS_FLAG_AS_SET0x04;
857 r->match.as.aset = aset;
858 }
859 }
860 TAILQ_INIT(&r->set)do { (&r->set)->tqh_first = ((void*)0); (&r->
set)->tqh_last = &(&r->set)->tqh_first; } while
(0)
;
861 TAILQ_CONCAT(&r->set, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void*)0))) {
*(&r->set)->tqh_last = (&parent_set)->tqh_first
; (&parent_set)->tqh_first->entry.tqe_prev = (&
r->set)->tqh_last; (&r->set)->tqh_last = (&
parent_set)->tqh_last; do { ((&parent_set))->tqh_first
= ((void*)0); ((&parent_set))->tqh_last = &((&
parent_set))->tqh_first; } while (0); } } while (0)
;
862 if ((rib = rib_byid(rib_find(r->rib))) == NULL((void*)0)) {
863 log_warnx("IMSG_RECONF_FILTER: filter rule "
864 "for nonexistent rib %s", r->rib);
865 free(r);
866 break;
867 }
868 r->peer.ribid = rib->id;
869 if (r->dir == DIR_IN) {
870 nr = rib->in_rules_tmp;
871 if (nr == NULL((void*)0)) {
872 nr = calloc(1,
873 sizeof(struct filter_head));
874 if (nr == NULL((void*)0))
875 fatal(NULL((void*)0));
876 TAILQ_INIT(nr)do { (nr)->tqh_first = ((void*)0); (nr)->tqh_last = &
(nr)->tqh_first; } while (0)
;
877 rib->in_rules_tmp = nr;
878 }
879 TAILQ_INSERT_TAIL(nr, r, entry)do { (r)->entry.tqe_next = ((void*)0); (r)->entry.tqe_prev
= (nr)->tqh_last; *(nr)->tqh_last = (r); (nr)->tqh_last
= &(r)->entry.tqe_next; } while (0)
;
880 } else
881 TAILQ_INSERT_TAIL(out_rules_tmp, r, entry)do { (r)->entry.tqe_next = ((void*)0); (r)->entry.tqe_prev
= (out_rules_tmp)->tqh_last; *(out_rules_tmp)->tqh_last
= (r); (out_rules_tmp)->tqh_last = &(r)->entry.tqe_next
; } while (0)
;
882 break;
883 case IMSG_RECONF_PREFIX_SET:
884 case IMSG_RECONF_ORIGIN_SET:
885 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
886 sizeof(ps->name))
887 fatalx("IMSG_RECONF_PREFIX_SET bad len");
888 ps = calloc(1, sizeof(struct rde_prefixset));
889 if (ps == NULL((void*)0))
890 fatal(NULL((void*)0));
891 memcpy(ps->name, imsg.data, sizeof(ps->name));
892 if (imsg.hdr.type == IMSG_RECONF_ORIGIN_SET) {
893 SIMPLEQ_INSERT_TAIL(&nconf->rde_originsets, ps,do { (ps)->entry.sqe_next = ((void*)0); *(&nconf->rde_originsets
)->sqh_last = (ps); (&nconf->rde_originsets)->sqh_last
= &(ps)->entry.sqe_next; } while (0)
894 entry)do { (ps)->entry.sqe_next = ((void*)0); *(&nconf->rde_originsets
)->sqh_last = (ps); (&nconf->rde_originsets)->sqh_last
= &(ps)->entry.sqe_next; } while (0)
;
895 } else {
896 SIMPLEQ_INSERT_TAIL(&nconf->rde_prefixsets, ps,do { (ps)->entry.sqe_next = ((void*)0); *(&nconf->rde_prefixsets
)->sqh_last = (ps); (&nconf->rde_prefixsets)->sqh_last
= &(ps)->entry.sqe_next; } while (0)
897 entry)do { (ps)->entry.sqe_next = ((void*)0); *(&nconf->rde_prefixsets
)->sqh_last = (ps); (&nconf->rde_prefixsets)->sqh_last
= &(ps)->entry.sqe_next; } while (0)
;
898 }
899 last_prefixset = ps;
900 break;
901 case IMSG_RECONF_ROA_ITEM:
902 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(roa))
903 fatalx("IMSG_RECONF_ROA_ITEM bad len");
904 memcpy(&roa, imsg.data, sizeof(roa));
905 rv = trie_roa_add(&last_prefixset->th, &roa);
906 break;
907 case IMSG_RECONF_PREFIX_SET_ITEM:
908 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(psi))
909 fatalx("IMSG_RECONF_PREFIX_SET_ITEM bad len");
910 memcpy(&psi, imsg.data, sizeof(psi));
911 if (last_prefixset == NULL((void*)0))
912 fatalx("King Bula has no prefixset");
913 rv = trie_add(&last_prefixset->th,
914 &psi.p.addr, psi.p.len,
915 psi.p.len_min, psi.p.len_max);
916 if (rv == -1)
917 log_warnx("trie_add(%s) %s/%u failed",
918 last_prefixset->name, log_addr(&psi.p.addr),
919 psi.p.len);
920 break;
921 case IMSG_RECONF_AS_SET:
922 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
923 sizeof(nmemb) + SET_NAME_LEN128)
924 fatalx("IMSG_RECONF_AS_SET bad len");
925 memcpy(&nmemb, imsg.data, sizeof(nmemb));
926 name = (char *)imsg.data + sizeof(nmemb);
927 if (as_sets_lookup(&nconf->as_sets, name) != NULL((void*)0))
928 fatalx("duplicate as-set %s", name);
929 last_as_set = as_sets_new(&nconf->as_sets, name, nmemb,
930 sizeof(u_int32_t));
931 break;
932 case IMSG_RECONF_AS_SET_ITEMS:
933 nmemb = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr);
934 nmemb /= sizeof(u_int32_t);
935 if (set_add(last_as_set->set, imsg.data, nmemb) != 0)
936 fatal(NULL((void*)0));
937 break;
938 case IMSG_RECONF_AS_SET_DONE:
939 set_prep(last_as_set->set);
940 last_as_set = NULL((void*)0);
941 break;
942 case IMSG_RECONF_VPN:
943 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
944 sizeof(struct l3vpn))
945 fatalx("IMSG_RECONF_VPN bad len");
946 if ((vpn = malloc(sizeof(struct l3vpn))) == NULL((void*)0))
947 fatal(NULL((void*)0));
948 memcpy(vpn, imsg.data, sizeof(struct l3vpn));
949 TAILQ_INIT(&vpn->import)do { (&vpn->import)->tqh_first = ((void*)0); (&
vpn->import)->tqh_last = &(&vpn->import)->
tqh_first; } while (0)
;
950 TAILQ_INIT(&vpn->export)do { (&vpn->export)->tqh_first = ((void*)0); (&
vpn->export)->tqh_last = &(&vpn->export)->
tqh_first; } while (0)
;
951 TAILQ_INIT(&vpn->net_l)do { (&vpn->net_l)->tqh_first = ((void*)0); (&vpn
->net_l)->tqh_last = &(&vpn->net_l)->tqh_first
; } while (0)
;
952 SIMPLEQ_INSERT_TAIL(&nconf->l3vpns, vpn, entry)do { (vpn)->entry.sqe_next = ((void*)0); *(&nconf->
l3vpns)->sqh_last = (vpn); (&nconf->l3vpns)->sqh_last
= &(vpn)->entry.sqe_next; } while (0)
;
953 break;
954 case IMSG_RECONF_VPN_EXPORT:
955 if (vpn == NULL((void*)0)) {
956 log_warnx("rde_dispatch_imsg_parent: "
957 "IMSG_RECONF_VPN_EXPORT unexpected");
958 break;
959 }
960 TAILQ_CONCAT(&vpn->export, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void*)0))) {
*(&vpn->export)->tqh_last = (&parent_set)->
tqh_first; (&parent_set)->tqh_first->entry.tqe_prev
= (&vpn->export)->tqh_last; (&vpn->export)->
tqh_last = (&parent_set)->tqh_last; do { ((&parent_set
))->tqh_first = ((void*)0); ((&parent_set))->tqh_last
= &((&parent_set))->tqh_first; } while (0); } } while
(0)
;
961 break;
962 case IMSG_RECONF_VPN_IMPORT:
963 if (vpn == NULL((void*)0)) {
964 log_warnx("rde_dispatch_imsg_parent: "
965 "IMSG_RECONF_VPN_IMPORT unexpected");
966 break;
967 }
968 TAILQ_CONCAT(&vpn->import, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void*)0))) {
*(&vpn->import)->tqh_last = (&parent_set)->
tqh_first; (&parent_set)->tqh_first->entry.tqe_prev
= (&vpn->import)->tqh_last; (&vpn->import)->
tqh_last = (&parent_set)->tqh_last; do { ((&parent_set
))->tqh_first = ((void*)0); ((&parent_set))->tqh_last
= &((&parent_set))->tqh_first; } while (0); } } while
(0)
;
969 break;
970 case IMSG_RECONF_VPN_DONE:
971 break;
972 case IMSG_RECONF_DRAIN:
973 imsg_compose(ibuf_main, IMSG_RECONF_DRAIN, 0, 0,
974 -1, NULL((void*)0), 0);
975 break;
976 case IMSG_RECONF_DONE:
977 if (nconf == NULL((void*)0))
978 fatalx("got IMSG_RECONF_DONE but no config");
979 last_prefixset = NULL((void*)0);
980
981 rde_reload_done();
982 break;
983 case IMSG_NEXTHOP_UPDATE:
984 nexthop_update(imsg.data);
985 break;
986 case IMSG_FILTER_SET:
987 if (imsg.hdr.len > IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
988 sizeof(struct filter_set))
989 fatalx("IMSG_FILTER_SET bad len");
990 if ((s = malloc(sizeof(struct filter_set))) == NULL((void*)0))
991 fatal(NULL((void*)0));
992 memcpy(s, imsg.data, sizeof(struct filter_set));
993 if (s->type == ACTION_SET_NEXTHOP) {
994 s->action.nh_ref =
995 nexthop_get(&s->action.nexthop);
996 s->type = ACTION_SET_NEXTHOP_REF;
997 }
998 TAILQ_INSERT_TAIL(&parent_set, s, entry)do { (s)->entry.tqe_next = ((void*)0); (s)->entry.tqe_prev
= (&parent_set)->tqh_last; *(&parent_set)->tqh_last
= (s); (&parent_set)->tqh_last = &(s)->entry.tqe_next
; } while (0)
;
999 break;
1000 case IMSG_MRT_OPEN:
1001 case IMSG_MRT_REOPEN:
1002 if (imsg.hdr.len > IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
1003 sizeof(struct mrt)) {
1004 log_warnx("wrong imsg len");
1005 break;
1006 }
1007 memcpy(&xmrt, imsg.data, sizeof(xmrt));
1008 if ((fd = imsg.fd) == -1)
1009 log_warnx("expected to receive fd for mrt dump "
1010 "but didn't receive any");
1011 else if (xmrt.type == MRT_TABLE_DUMP ||
1012 xmrt.type == MRT_TABLE_DUMP_MP ||
1013 xmrt.type == MRT_TABLE_DUMP_V2) {
1014 rde_dump_mrt_new(&xmrt, imsg.hdr.pid, fd);
1015 } else
1016 close(fd);
1017 break;
1018 case IMSG_MRT_CLOSE:
1019 /* ignore end message because a dump is atomic */
1020 break;
1021 default:
1022 fatalx("unhandled IMSG %u", imsg.hdr.type);
1023 }
1024 imsg_free(&imsg);
1025 }
1026}
1027
1028void
1029rde_dispatch_imsg_rtr(struct imsgbuf *ibuf)
1030{
1031 struct imsg imsg;
1032 struct roa roa;
1033 int n;
1034
1035 while (ibuf) {
1036 if ((n = imsg_get(ibuf, &imsg)) == -1)
1037 fatal("rde_dispatch_imsg_parent: imsg_get error");
1038 if (n == 0)
1039 break;
1040
1041 switch (imsg.hdr.type) {
1042 case IMSG_RECONF_ROA_SET:
1043 /* start of update */
1044 break;
1045 case IMSG_RECONF_ROA_ITEM:
1046 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) !=
1047 sizeof(roa))
1048 fatalx("IMSG_RECONF_ROA_ITEM bad len");
1049 memcpy(&roa, imsg.data, sizeof(roa));
1050 if (trie_roa_add(&roa_new.th, &roa) != 0) {
1051 struct bgpd_addr p = {
1052 .aid = roa.aid,
1053 .v6ba.v6 = roa.prefix.inet6
1054 };
1055 log_warnx("trie_roa_add %s/%u failed",
1056 log_addr(&p), roa.prefixlen);
1057 }
1058 break;
1059 case IMSG_RECONF_DONE:
1060 /* end of update */
1061 rde_roa_reload();
1062 break;
1063 }
1064 imsg_free(&imsg);
1065 }
1066}
1067
1068void
1069rde_dispatch_imsg_peer(struct rde_peer *peer, void *bula)
1070{
1071 struct route_refresh rr;
1072 struct session_up sup;
1073 struct imsg imsg;
1074 u_int8_t aid;
1075
1076 if (!peer_imsg_pop(peer, &imsg))
1077 return;
1078
1079 switch (imsg.hdr.type) {
1080 case IMSG_UPDATE:
1081 if (peer->state != PEER_UP)
1082 break;
1083 rde_update_dispatch(peer, &imsg);
1084 break;
1085 case IMSG_SESSION_UP:
1086 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(sup))
1087 fatalx("incorrect size of session request");
1088 memcpy(&sup, imsg.data, sizeof(sup));
1089 if (peer_up(peer, &sup) == -1) {
1090 peer->state = PEER_DOWN;
1091 imsg_compose(ibuf_se, IMSG_SESSION_DOWN, peer->conf.id,
1092 0, -1, NULL((void*)0), 0);
1093 }
1094 break;
1095 case IMSG_SESSION_DOWN:
1096 peer_down(peer, NULL((void*)0));
1097 break;
1098 case IMSG_SESSION_STALE:
1099 case IMSG_SESSION_FLUSH:
1100 case IMSG_SESSION_RESTARTED:
1101 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(aid)) {
1102 log_warnx("%s: wrong imsg len", __func__);
1103 break;
1104 }
1105 memcpy(&aid, imsg.data, sizeof(aid));
1106 if (aid >= AID_MAX5) {
1107 log_warnx("%s: bad AID", __func__);
1108 break;
1109 }
1110
1111 switch (imsg.hdr.type) {
1112 case IMSG_SESSION_STALE:
1113 peer_stale(peer, aid);
1114 break;
1115 case IMSG_SESSION_FLUSH:
1116 peer_flush(peer, aid, peer->staletime[aid]);
1117 break;
1118 case IMSG_SESSION_RESTARTED:
1119 if (peer->staletime[aid])
1120 peer_flush(peer, aid, peer->staletime[aid]);
1121 break;
1122 }
1123 break;
1124 case IMSG_REFRESH:
1125 if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(rr)) {
1126 log_warnx("%s: wrong imsg len", __func__);
1127 break;
1128 }
1129 memcpy(&rr, imsg.data, sizeof(rr));
1130 if (rr.aid >= AID_MAX5) {
1131 log_warnx("%s: bad AID", __func__);
1132 break;
1133 }
1134 switch (rr.subtype) {
1135 case ROUTE_REFRESH_REQUEST0:
1136 peer_dump(peer, rr.aid);
1137 break;
1138 case ROUTE_REFRESH_BEGIN_RR1:
1139 /* check if graceful restart EOR was received */
1140 if ((peer->recv_eor & (1 << rr.aid)) == 0) {
1141 log_peer_warnx(&peer->conf,
1142 "received %s BoRR before EoR",
1143 aid2str(rr.aid));
1144 break;
1145 }
1146 peer_begin_rrefresh(peer, rr.aid);
1147 break;
1148 case ROUTE_REFRESH_END_RR2:
1149 if ((peer->recv_eor & (1 << rr.aid)) != 0 &&
1150 peer->staletime[rr.aid])
1151 peer_flush(peer, rr.aid,
1152 peer->staletime[rr.aid]);
1153 else
1154 log_peer_warnx(&peer->conf,
1155 "received unexpected %s EoRR",
1156 aid2str(rr.aid));
1157 break;
1158 default:
1159 log_warnx("%s: bad subtype %d", __func__, rr.subtype);
1160 break;
1161 }
1162 break;
1163 default:
1164 log_warnx("%s: unhandled imsg type %d", __func__,
1165 imsg.hdr.type);
1166 break;
1167 }
1168
1169 imsg_free(&imsg);
1170}
1171
1172/* handle routing updates from the session engine. */
1173void
1174rde_update_dispatch(struct rde_peer *peer, struct imsg *imsg)
1175{
1176 struct filterstate state;
1177 struct bgpd_addr prefix;
1178 struct mpattr mpa;
1179 u_char *p, *mpp = NULL((void*)0);
1180 int pos = 0;
1181 u_int16_t afi, len, mplen;
1182 u_int16_t withdrawn_len;
1183 u_int16_t attrpath_len;
1184 u_int16_t nlri_len;
1185 u_int8_t aid, prefixlen, safi, subtype;
1186 u_int32_t fas, pathid;
1187
1188 p = imsg->data;
1189
1190 if (imsg->hdr.len < IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2) {
1191 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void*)0), 0);
1192 return;
1193 }
1194
1195 memcpy(&len, p, 2);
1196 withdrawn_len = ntohs(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1197 p += 2;
1198 if (imsg->hdr.len < IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2 + withdrawn_len + 2) {
1199 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void*)0), 0);
1200 return;
1201 }
1202
1203 p += withdrawn_len;
1204 memcpy(&len, p, 2);
1205 attrpath_len = len = ntohs(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1206 p += 2;
1207 if (imsg->hdr.len <
1208 IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2 + withdrawn_len + 2 + attrpath_len) {
1209 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void*)0), 0);
1210 return;
1211 }
1212
1213 nlri_len =
1214 imsg->hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) - 4 - withdrawn_len - attrpath_len;
1215
1216 if (attrpath_len == 0) {
1217 /* 0 = no NLRI information in this message */
1218 if (nlri_len != 0) {
1219 /* crap at end of update which should not be there */
1220 rde_update_err(peer, ERR_UPDATE,
1221 ERR_UPD_ATTRLIST, NULL((void*)0), 0);
1222 return;
1223 }
1224 if (withdrawn_len == 0) {
1225 /* EoR marker */
1226 rde_peer_recv_eor(peer, AID_INET1);
1227 return;
1228 }
1229 }
1230
1231 bzero(&mpa, sizeof(mpa));
1232 rde_filterstate_prep(&state, NULL((void*)0), NULL((void*)0), NULL((void*)0), 0);
1233 if (attrpath_len != 0) { /* 0 = no NLRI information in this message */
1234 /* parse path attributes */
1235 while (len > 0) {
1236 if ((pos = rde_attr_parse(p, len, peer, &state,
1237 &mpa)) < 0)
1238 goto done;
1239 p += pos;
1240 len -= pos;
1241 }
1242
1243 /* check for missing but necessary attributes */
1244 if ((subtype = rde_attr_missing(&state.aspath, peer->conf.ebgp,
1245 nlri_len))) {
1246 rde_update_err(peer, ERR_UPDATE, ERR_UPD_MISSNG_WK_ATTR,
1247 &subtype, sizeof(u_int8_t));
1248 goto done;
1249 }
1250
1251 rde_as4byte_fixup(peer, &state.aspath);
1252
1253 /* enforce remote AS if requested */
1254 if (state.aspath.flags & F_ATTR_ASPATH0x00002 &&
1255 peer->conf.enforce_as == ENFORCE_AS_ON) {
1256 fas = aspath_neighbor(state.aspath.aspath);
1257 if (peer->conf.remote_as != fas) {
1258 log_peer_warnx(&peer->conf, "bad path, "
1259 "starting with %s, "
1260 "enforce neighbor-as enabled", log_as(fas));
1261 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
1262 NULL((void*)0), 0);
1263 goto done;
1264 }
1265 }
1266
1267 /* aspath needs to be loop free. This is not a hard error. */
1268 if (state.aspath.flags & F_ATTR_ASPATH0x00002 &&
1269 peer->conf.ebgp &&
1270 peer->conf.enforce_local_as == ENFORCE_AS_ON &&
1271 !aspath_loopfree(state.aspath.aspath, peer->conf.local_as))
1272 state.aspath.flags |= F_ATTR_LOOP0x00200;
1273
1274 rde_reflector(peer, &state.aspath);
1275 }
1276
1277 p = imsg->data;
1278 len = withdrawn_len;
1279 p += 2;
1280
1281 /* withdraw prefix */
1282 while (len > 0) {
1283 if (peer->capa.mp[AID_INET1] == 0) {
1284 log_peer_warnx(&peer->conf,
1285 "bad withdraw, %s disabled", aid2str(AID_INET1));
1286 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1287 NULL((void*)0), 0);
1288 goto done;
1289 }
1290
1291 if (peer_has_add_path(peer, AID_INET1, CAPA_AP_RECV0x01)) {
1292 if (len <= sizeof(pathid)) {
1293 log_peer_warnx(&peer->conf,
1294 "bad withdraw prefix");
1295 rde_update_err(peer, ERR_UPDATE,
1296 ERR_UPD_NETWORK, NULL((void*)0), 0);
1297 goto done;
1298 }
1299 memcpy(&pathid, p, sizeof(pathid));
1300 pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t
)(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) &
0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >>
8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md
(pathid))
;
1301 p += sizeof(pathid);
1302 len -= sizeof(pathid);
1303 } else
1304 pathid = 0;
1305
1306 if ((pos = nlri_get_prefix(p, len, &prefix,
1307 &prefixlen)) == -1) {
1308 /*
1309 * the RFC does not mention what we should do in
1310 * this case. Let's do the same as in the NLRI case.
1311 */
1312 log_peer_warnx(&peer->conf, "bad withdraw prefix");
1313 rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1314 NULL((void*)0), 0);
1315 goto done;
1316 }
1317 p += pos;
1318 len -= pos;
1319
1320 rde_update_withdraw(peer, pathid, &prefix, prefixlen);
1321 }
1322
1323 /* withdraw MP_UNREACH_NLRI if available */
1324 if (mpa.unreach_len != 0) {
1325 mpp = mpa.unreach;
1326 mplen = mpa.unreach_len;
1327 memcpy(&afi, mpp, 2);
1328 mpp += 2;
1329 mplen -= 2;
1330 afi = ntohs(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t
)(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U
) >> 8) : __swap16md(afi))
;
1331 safi = *mpp++;
1332 mplen--;
1333
1334 if (afi2aid(afi, safi, &aid) == -1) {
1335 log_peer_warnx(&peer->conf,
1336 "bad AFI/SAFI pair in withdraw");
1337 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1338 NULL((void*)0), 0);
1339 goto done;
1340 }
1341
1342 if (peer->capa.mp[aid] == 0) {
1343 log_peer_warnx(&peer->conf,
1344 "bad withdraw, %s disabled", aid2str(aid));
1345 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1346 NULL((void*)0), 0);
1347 goto done;
1348 }
1349
1350 if ((state.aspath.flags & ~F_ATTR_MP_UNREACH0x00080) == 0 &&
1351 mplen == 0) {
1352 /* EoR marker */
1353 rde_peer_recv_eor(peer, aid);
1354 }
1355
1356 while (mplen > 0) {
1357 if (peer_has_add_path(peer, aid, CAPA_AP_RECV0x01)) {
1358 if (mplen <= sizeof(pathid)) {
1359 log_peer_warnx(&peer->conf,
1360 "bad %s withdraw prefix",
1361 aid2str(aid));
1362 rde_update_err(peer, ERR_UPDATE,
1363 ERR_UPD_OPTATTR,
1364 mpa.unreach, mpa.unreach_len);
1365 goto done;
1366 }
1367 memcpy(&pathid, mpp, sizeof(pathid));
1368 pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t
)(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) &
0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >>
8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md
(pathid))
;
1369 mpp += sizeof(pathid);
1370 mplen -= sizeof(pathid);
1371 } else
1372 pathid = 0;
1373
1374 switch (aid) {
1375 case AID_INET62:
1376 if ((pos = nlri_get_prefix6(mpp, mplen,
1377 &prefix, &prefixlen)) == -1) {
1378 log_peer_warnx(&peer->conf,
1379 "bad IPv6 withdraw prefix");
1380 rde_update_err(peer, ERR_UPDATE,
1381 ERR_UPD_OPTATTR,
1382 mpa.unreach, mpa.unreach_len);
1383 goto done;
1384 }
1385 break;
1386 case AID_VPN_IPv43:
1387 if ((pos = nlri_get_vpn4(mpp, mplen,
1388 &prefix, &prefixlen, 1)) == -1) {
1389 log_peer_warnx(&peer->conf,
1390 "bad VPNv4 withdraw prefix");
1391 rde_update_err(peer, ERR_UPDATE,
1392 ERR_UPD_OPTATTR,
1393 mpa.unreach, mpa.unreach_len);
1394 goto done;
1395 }
1396 break;
1397 case AID_VPN_IPv64:
1398 if ((pos = nlri_get_vpn6(mpp, mplen,
1399 &prefix, &prefixlen, 1)) == -1) {
1400 log_peer_warnx(&peer->conf,
1401 "bad VPNv6 withdraw prefix");
1402 rde_update_err(peer, ERR_UPDATE,
1403 ERR_UPD_OPTATTR, mpa.unreach,
1404 mpa.unreach_len);
1405 goto done;
1406 }
1407 break;
1408 default:
1409 /* ignore unsupported multiprotocol AF */
1410 break;
1411 }
1412
1413 mpp += pos;
1414 mplen -= pos;
1415
1416 rde_update_withdraw(peer, pathid, &prefix, prefixlen);
1417 }
1418
1419 if ((state.aspath.flags & ~F_ATTR_MP_UNREACH0x00080) == 0)
1420 goto done;
1421 }
1422
1423 /* shift to NLRI information */
1424 p += 2 + attrpath_len;
1425
1426 /* parse nlri prefix */
1427 while (nlri_len > 0) {
1428 if (peer->capa.mp[AID_INET1] == 0) {
1429 log_peer_warnx(&peer->conf,
1430 "bad update, %s disabled", aid2str(AID_INET1));
1431 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1432 NULL((void*)0), 0);
1433 goto done;
1434 }
1435
1436 if (peer_has_add_path(peer, AID_INET1, CAPA_AP_RECV0x01)) {
1437 if (nlri_len <= sizeof(pathid)) {
1438 log_peer_warnx(&peer->conf,
1439 "bad nlri prefix");
1440 rde_update_err(peer, ERR_UPDATE,
1441 ERR_UPD_NETWORK, NULL((void*)0), 0);
1442 goto done;
1443 }
1444 memcpy(&pathid, p, sizeof(pathid));
1445 pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t
)(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) &
0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >>
8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md
(pathid))
;
1446 p += sizeof(pathid);
1447 nlri_len -= sizeof(pathid);
1448 } else
1449 pathid = 0;
1450
1451 if ((pos = nlri_get_prefix(p, nlri_len, &prefix,
1452 &prefixlen)) == -1) {
1453 log_peer_warnx(&peer->conf, "bad nlri prefix");
1454 rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1455 NULL((void*)0), 0);
1456 goto done;
1457 }
1458 p += pos;
1459 nlri_len -= pos;
1460
1461 if (rde_update_update(peer, pathid, &state,
1462 &prefix, prefixlen) == -1)
1463 goto done;
1464
1465 }
1466
1467 /* add MP_REACH_NLRI if available */
1468 if (mpa.reach_len != 0) {
1469 mpp = mpa.reach;
1470 mplen = mpa.reach_len;
1471 memcpy(&afi, mpp, 2);
1472 mpp += 2;
1473 mplen -= 2;
1474 afi = ntohs(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t
)(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U
) >> 8) : __swap16md(afi))
;
1475 safi = *mpp++;
1476 mplen--;
1477
1478 if (afi2aid(afi, safi, &aid) == -1) {
1479 log_peer_warnx(&peer->conf,
1480 "bad AFI/SAFI pair in update");
1481 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1482 NULL((void*)0), 0);
1483 goto done;
1484 }
1485
1486 if (peer->capa.mp[aid] == 0) {
1487 log_peer_warnx(&peer->conf,
1488 "bad update, %s disabled", aid2str(aid));
1489 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1490 NULL((void*)0), 0);
1491 goto done;
1492 }
1493
1494 /* unlock the previously locked nexthop, it is no longer used */
1495 nexthop_unref(state.nexthop);
1496 state.nexthop = NULL((void*)0);
1497 if ((pos = rde_get_mp_nexthop(mpp, mplen, aid, &state)) == -1) {
1498 log_peer_warnx(&peer->conf, "bad nlri nexthop");
1499 rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1500 mpa.reach, mpa.reach_len);
1501 goto done;
1502 }
1503 mpp += pos;
1504 mplen -= pos;
1505
1506 while (mplen > 0) {
1507 if (peer_has_add_path(peer, aid, CAPA_AP_RECV0x01)) {
1508 if (mplen <= sizeof(pathid)) {
1509 log_peer_warnx(&peer->conf,
1510 "bad %s nlri prefix", aid2str(aid));
1511 rde_update_err(peer, ERR_UPDATE,
1512 ERR_UPD_OPTATTR,
1513 mpa.reach, mpa.reach_len);
1514 goto done;
1515 }
1516 memcpy(&pathid, mpp, sizeof(pathid));
1517 pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t
)(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) &
0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >>
8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md
(pathid))
;
1518 mpp += sizeof(pathid);
1519 mplen -= sizeof(pathid);
1520 } else
1521 pathid = 0;
1522
1523 switch (aid) {
1524 case AID_INET62:
1525 if ((pos = nlri_get_prefix6(mpp, mplen,
1526 &prefix, &prefixlen)) == -1) {
1527 log_peer_warnx(&peer->conf,
1528 "bad IPv6 nlri prefix");
1529 rde_update_err(peer, ERR_UPDATE,
1530 ERR_UPD_OPTATTR,
1531 mpa.reach, mpa.reach_len);
1532 goto done;
1533 }
1534 break;
1535 case AID_VPN_IPv43:
1536 if ((pos = nlri_get_vpn4(mpp, mplen,
1537 &prefix, &prefixlen, 0)) == -1) {
1538 log_peer_warnx(&peer->conf,
1539 "bad VPNv4 nlri prefix");
1540 rde_update_err(peer, ERR_UPDATE,
1541 ERR_UPD_OPTATTR,
1542 mpa.reach, mpa.reach_len);
1543 goto done;
1544 }
1545 break;
1546 case AID_VPN_IPv64:
1547 if ((pos = nlri_get_vpn6(mpp, mplen,
1548 &prefix, &prefixlen, 0)) == -1) {
1549 log_peer_warnx(&peer->conf,
1550 "bad VPNv6 nlri prefix");
1551 rde_update_err(peer, ERR_UPDATE,
1552 ERR_UPD_OPTATTR,
1553 mpa.reach, mpa.reach_len);
1554 goto done;
1555 }
1556 break;
1557 default:
1558 /* ignore unsupported multiprotocol AF */
1559 break;
1560 }
1561
1562 mpp += pos;
1563 mplen -= pos;
1564
1565 if (rde_update_update(peer, pathid, &state,
1566 &prefix, prefixlen) == -1)
1567 goto done;
1568 }
1569 }
1570
1571done:
1572 rde_filterstate_clean(&state);
1573}
1574
1575int
1576rde_update_update(struct rde_peer *peer, u_int32_t path_id,
1577 struct filterstate *in, struct bgpd_addr *prefix, u_int8_t prefixlen)
1578{
1579 struct filterstate state;
1580 enum filter_actions action;
1581 u_int8_t vstate;
1582 u_int16_t i;
1583 const char *wmsg = "filtered, withdraw";
1584
1585 peer->prefix_rcvd_update++;
1586 vstate = rde_roa_validity(&rde_roa, prefix, prefixlen,
1587 aspath_origin(in->aspath.aspath));
1588
1589 /* add original path to the Adj-RIB-In */
1590 if (prefix_update(rib_byid(RIB_ADJ_IN0), peer, path_id, in,
1591 prefix, prefixlen, vstate) == 1)
1592 peer->prefix_cnt++;
1593
1594 /* max prefix checker */
1595 if (peer->conf.max_prefix && peer->prefix_cnt > peer->conf.max_prefix) {
1596 log_peer_warnx(&peer->conf, "prefix limit reached (>%u/%u)",
1597 peer->prefix_cnt, peer->conf.max_prefix);
1598 rde_update_err(peer, ERR_CEASE, ERR_CEASE_MAX_PREFIX, NULL((void*)0), 0);
1599 return (-1);
1600 }
1601
1602 if (in->aspath.flags & F_ATTR_PARSE_ERR0x10000)
1603 wmsg = "path invalid, withdraw";
1604
1605 for (i = RIB_LOC_START1; i < rib_size; i++) {
1606 struct rib *rib = rib_byid(i);
1607 if (rib == NULL((void*)0))
1608 continue;
1609 rde_filterstate_prep(&state, &in->aspath, &in->communities,
1610 in->nexthop, in->nhflags);
1611 /* input filter */
1612 action = rde_filter(rib->in_rules, peer, peer, prefix,
1613 prefixlen, vstate, &state);
1614
1615 if (action == ACTION_ALLOW) {
1616 rde_update_log("update", i, peer,
1617 &state.nexthop->exit_nexthop, prefix,
1618 prefixlen);
1619 prefix_update(rib, peer, path_id, &state, prefix,
1620 prefixlen, vstate);
1621 } else if (prefix_withdraw(rib, peer, path_id, prefix,
1622 prefixlen)) {
1623 rde_update_log(wmsg, i, peer,
1624 NULL((void*)0), prefix, prefixlen);
1625 }
1626
1627 /* clear state */
1628 rde_filterstate_clean(&state);
1629 }
1630 return (0);
1631}
1632
1633void
1634rde_update_withdraw(struct rde_peer *peer, u_int32_t path_id,
1635 struct bgpd_addr *prefix, u_int8_t prefixlen)
1636{
1637 u_int16_t i;
1638
1639 for (i = RIB_LOC_START1; i < rib_size; i++) {
1640 struct rib *rib = rib_byid(i);
1641 if (rib == NULL((void*)0))
1642 continue;
1643 if (prefix_withdraw(rib, peer, path_id, prefix, prefixlen))
1644 rde_update_log("withdraw", i, peer, NULL((void*)0), prefix,
1645 prefixlen);
1646 }
1647
1648 /* remove original path form the Adj-RIB-In */
1649 if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peer, path_id,
1650 prefix, prefixlen))
1651 peer->prefix_cnt--;
1652
1653 peer->prefix_rcvd_withdraw++;
1654}
1655
1656/*
1657 * BGP UPDATE parser functions
1658 */
1659
1660/* attribute parser specific makros */
1661#define UPD_READ(t, p, plen, n) \
1662 do { \
1663 memcpy(t, p, n); \
1664 p += n; \
1665 plen += n; \
1666 } while (0)
1667
1668#define CHECK_FLAGS(s, t, m) \
1669 (((s) & ~(ATTR_DEFMASK(0x0f | 0x10) | (m))) == (t))
1670
1671int
1672rde_attr_parse(u_char *p, u_int16_t len, struct rde_peer *peer,
1673 struct filterstate *state, struct mpattr *mpa)
1674{
1675 struct bgpd_addr nexthop;
1676 struct rde_aspath *a = &state->aspath;
1677 u_char *op = p, *npath;
1678 u_int32_t tmp32, zero = 0;
1679 int error;
1680 u_int16_t attr_len, nlen;
1681 u_int16_t plen = 0;
1682 u_int8_t flags;
1683 u_int8_t type;
1684 u_int8_t tmp8;
1685
1686 if (len < 3) {
1687bad_len:
1688 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLEN, op, len);
1689 return (-1);
1690 }
1691
1692 UPD_READ(&flags, p, plen, 1);
1693 UPD_READ(&type, p, plen, 1);
1694
1695 if (flags & ATTR_EXTLEN0x10) {
1696 if (len - plen < 2)
1697 goto bad_len;
1698 UPD_READ(&attr_len, p, plen, 2);
1699 attr_len = ntohs(attr_len)(__uint16_t)(__builtin_constant_p(attr_len) ? (__uint16_t)(((
__uint16_t)(attr_len) & 0xffU) << 8 | ((__uint16_t)
(attr_len) & 0xff00U) >> 8) : __swap16md(attr_len))
;
1700 } else {
1701 UPD_READ(&tmp8, p, plen, 1);
1702 attr_len = tmp8;
1703 }
1704
1705 if (len - plen < attr_len)
1706 goto bad_len;
1707
1708 /* adjust len to the actual attribute size including header */
1709 len = plen + attr_len;
1710
1711 switch (type) {
1712 case ATTR_UNDEF:
1713 /* ignore and drop path attributes with a type code of 0 */
1714 plen += attr_len;
1715 break;
1716 case ATTR_ORIGIN:
1717 if (attr_len != 1)
1718 goto bad_len;
1719
1720 if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) {
1721bad_flags:
1722 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRFLAGS,
1723 op, len);
1724 return (-1);
1725 }
1726
1727 UPD_READ(&a->origin, p, plen, 1);
1728 if (a->origin > ORIGIN_INCOMPLETE2) {
1729 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ORIGIN,
1730 op, len);
1731 return (-1);
1732 }
1733 if (a->flags & F_ATTR_ORIGIN0x00001)
1734 goto bad_list;
1735 a->flags |= F_ATTR_ORIGIN0x00001;
1736 break;
1737 case ATTR_ASPATH:
1738 if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0))
1739 goto bad_flags;
1740 error = aspath_verify(p, attr_len, peer_has_as4byte(peer),
1741 peer_accept_no_as_set(peer));
1742 if (error == AS_ERR_SOFT-4) {
1743 /*
1744 * soft errors like unexpected segment types are
1745 * not considered fatal and the path is just
1746 * marked invalid.
1747 */
1748 a->flags |= F_ATTR_PARSE_ERR0x10000;
1749 } else if (error != 0) {
1750 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
1751 NULL((void*)0), 0);
1752 return (-1);
1753 }
1754 if (a->flags & F_ATTR_ASPATH0x00002)
1755 goto bad_list;
1756 if (peer_has_as4byte(peer)) {
1757 npath = p;
1758 nlen = attr_len;
1759 } else {
1760 npath = aspath_inflate(p, attr_len, &nlen);
1761 if (npath == NULL((void*)0))
1762 fatal("aspath_inflate");
1763 }
1764 if (error == AS_ERR_SOFT-4) {
1765 char *str;
1766
1767 aspath_asprint(&str, npath, nlen);
1768 log_peer_warnx(&peer->conf, "bad ASPATH %s, "
1769 "path invalidated and prefix withdrawn",
1770 str ? str : "(bad aspath)");
1771 free(str);
1772 }
1773 a->flags |= F_ATTR_ASPATH0x00002;
1774 a->aspath = aspath_get(npath, nlen);
1775 if (npath != p)
1776 free(npath);
1777 plen += attr_len;
1778 break;
1779 case ATTR_NEXTHOP:
1780 if (attr_len != 4)
1781 goto bad_len;
1782 if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0))
1783 goto bad_flags;
1784 if (a->flags & F_ATTR_NEXTHOP0x00004)
1785 goto bad_list;
1786 a->flags |= F_ATTR_NEXTHOP0x00004;
1787
1788 bzero(&nexthop, sizeof(nexthop));
1789 nexthop.aid = AID_INET1;
1790 UPD_READ(&nexthop.v4ba.v4.s_addr, p, plen, 4);
1791 /*
1792 * Check if the nexthop is a valid IP address. We consider
1793 * multicast and experimental addresses as invalid.
1794 */
1795 tmp32 = ntohl(nexthop.v4.s_addr)(__uint32_t)(__builtin_constant_p(nexthop.ba.v4.s_addr) ? (__uint32_t
)(((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff) << 24
| ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff00) <<
8 | ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff0000) >>
8 | ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff000000) >>
24) : __swap32md(nexthop.ba.v4.s_addr))
;
1796 if (IN_MULTICAST(tmp32)(((u_int32_t)(tmp32) & ((u_int32_t)(0xf0000000))) == ((u_int32_t
)(0xe0000000)))
|| IN_BADCLASS(tmp32)(((u_int32_t)(tmp32) & ((u_int32_t)(0xf0000000))) == ((u_int32_t
)(0xf0000000)))
) {
1797 rde_update_err(peer, ERR_UPDATE, ERR_UPD_NEXTHOP,
1798 op, len);
1799 return (-1);
1800 }
1801 nexthop_unref(state->nexthop); /* just to be sure */
1802 state->nexthop = nexthop_get(&nexthop);
1803 break;
1804 case ATTR_MED:
1805 if (attr_len != 4)
1806 goto bad_len;
1807 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0))
1808 goto bad_flags;
1809 if (a->flags & F_ATTR_MED0x00010)
1810 goto bad_list;
1811 a->flags |= F_ATTR_MED0x00010;
1812
1813 UPD_READ(&tmp32, p, plen, 4);
1814 a->med = ntohl(tmp32)(__uint32_t)(__builtin_constant_p(tmp32) ? (__uint32_t)(((__uint32_t
)(tmp32) & 0xff) << 24 | ((__uint32_t)(tmp32) &
0xff00) << 8 | ((__uint32_t)(tmp32) & 0xff0000) >>
8 | ((__uint32_t)(tmp32) & 0xff000000) >> 24) : __swap32md
(tmp32))
;
1815 break;
1816 case ATTR_LOCALPREF:
1817 if (attr_len != 4)
1818 goto bad_len;
1819 if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0))
1820 goto bad_flags;
1821 if (peer->conf.ebgp) {
1822 /* ignore local-pref attr on non ibgp peers */
1823 plen += 4;
1824 break;
1825 }
1826 if (a->flags & F_ATTR_LOCALPREF0x00008)
1827 goto bad_list;
1828 a->flags |= F_ATTR_LOCALPREF0x00008;
1829
1830 UPD_READ(&tmp32, p, plen, 4);
1831 a->lpref = ntohl(tmp32)(__uint32_t)(__builtin_constant_p(tmp32) ? (__uint32_t)(((__uint32_t
)(tmp32) & 0xff) << 24 | ((__uint32_t)(tmp32) &
0xff00) << 8 | ((__uint32_t)(tmp32) & 0xff0000) >>
8 | ((__uint32_t)(tmp32) & 0xff000000) >> 24) : __swap32md
(tmp32))
;
1832 break;
1833 case ATTR_ATOMIC_AGGREGATE:
1834 if (attr_len != 0)
1835 goto bad_len;
1836 if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0))
1837 goto bad_flags;
1838 goto optattr;
1839 case ATTR_AGGREGATOR:
1840 if ((!peer_has_as4byte(peer) && attr_len != 6) ||
1841 (peer_has_as4byte(peer) && attr_len != 8)) {
1842 /*
1843 * ignore attribute in case of error as per
1844 * RFC 7606
1845 */
1846 log_peer_warnx(&peer->conf, "bad AGGREGATOR, "
1847 "partial attribute ignored");
1848 plen += attr_len;
1849 break;
1850 }
1851 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1852 ATTR_PARTIAL0x20))
1853 goto bad_flags;
1854 if (!peer_has_as4byte(peer)) {
1855 /* need to inflate aggregator AS to 4-byte */
1856 u_char t[8];
1857 t[0] = t[1] = 0;
1858 UPD_READ(&t[2], p, plen, 2);
1859 UPD_READ(&t[4], p, plen, 4);
1860 if (memcmp(t, &zero, sizeof(u_int32_t)) == 0) {
1861 /* As per RFC7606 use "attribute discard". */
1862 log_peer_warnx(&peer->conf, "bad AGGREGATOR, "
1863 "AS 0 not allowed, attribute discarded");
1864 break;
1865 }
1866 if (attr_optadd(a, flags, type, t,
1867 sizeof(t)) == -1)
1868 goto bad_list;
1869 break;
1870 }
1871 /* 4-byte ready server take the default route */
1872 if (memcmp(p, &zero, sizeof(u_int32_t)) == 0) {
1873 /* As per RFC7606 use "attribute discard" here. */
1874 char *pfmt = log_fmt_peer(&peer->conf);
1875 log_debug("%s: bad AGGREGATOR, "
1876 "AS 0 not allowed, attribute discarded", pfmt);
1877 free(pfmt);
1878 plen += attr_len;
1879 break;
1880 }
1881 goto optattr;
1882 case ATTR_COMMUNITIES:
1883 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1884 ATTR_PARTIAL0x20))
1885 goto bad_flags;
1886 if (community_add(&state->communities, flags, p,
1887 attr_len) == -1) {
1888 /*
1889 * mark update as bad and withdraw all routes as per
1890 * RFC 7606
1891 */
1892 a->flags |= F_ATTR_PARSE_ERR0x10000;
1893 log_peer_warnx(&peer->conf, "bad COMMUNITIES, "
1894 "path invalidated and prefix withdrawn");
1895 break;
1896 }
1897 plen += attr_len;
1898 break;
1899 case ATTR_LARGE_COMMUNITIES:
1900 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1901 ATTR_PARTIAL0x20))
1902 goto bad_flags;
1903 if (community_large_add(&state->communities, flags, p,
1904 attr_len) == -1) {
1905 /*
1906 * mark update as bad and withdraw all routes as per
1907 * RFC 7606
1908 */
1909 a->flags |= F_ATTR_PARSE_ERR0x10000;
1910 log_peer_warnx(&peer->conf, "bad LARGE COMMUNITIES, "
1911 "path invalidated and prefix withdrawn");
1912 break;
1913 }
1914 plen += attr_len;
1915 break;
1916 case ATTR_EXT_COMMUNITIES:
1917 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1918 ATTR_PARTIAL0x20))
1919 goto bad_flags;
1920 if (community_ext_add(&state->communities, flags, p,
1921 attr_len) == -1) {
1922 /*
1923 * mark update as bad and withdraw all routes as per
1924 * RFC 7606
1925 */
1926 a->flags |= F_ATTR_PARSE_ERR0x10000;
1927 log_peer_warnx(&peer->conf, "bad EXT_COMMUNITIES, "
1928 "path invalidated and prefix withdrawn");
1929 break;
1930 }
1931 plen += attr_len;
1932 break;
1933 case ATTR_ORIGINATOR_ID:
1934 if (attr_len != 4)
1935 goto bad_len;
1936 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0))
1937 goto bad_flags;
1938 goto optattr;
1939 case ATTR_CLUSTER_LIST:
1940 if (attr_len % 4 != 0)
1941 goto bad_len;
1942 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0))
1943 goto bad_flags;
1944 goto optattr;
1945 case ATTR_MP_REACH_NLRI:
1946 if (attr_len < 4)
1947 goto bad_len;
1948 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0))
1949 goto bad_flags;
1950 /* the validity is checked in rde_update_dispatch() */
1951 if (a->flags & F_ATTR_MP_REACH0x00040)
1952 goto bad_list;
1953 a->flags |= F_ATTR_MP_REACH0x00040;
1954
1955 mpa->reach = p;
1956 mpa->reach_len = attr_len;
1957 plen += attr_len;
1958 break;
1959 case ATTR_MP_UNREACH_NLRI:
1960 if (attr_len < 3)
1961 goto bad_len;
1962 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0))
1963 goto bad_flags;
1964 /* the validity is checked in rde_update_dispatch() */
1965 if (a->flags & F_ATTR_MP_UNREACH0x00080)
1966 goto bad_list;
1967 a->flags |= F_ATTR_MP_UNREACH0x00080;
1968
1969 mpa->unreach = p;
1970 mpa->unreach_len = attr_len;
1971 plen += attr_len;
1972 break;
1973 case ATTR_AS4_AGGREGATOR:
1974 if (attr_len != 8) {
1975 /* see ATTR_AGGREGATOR ... */
1976 if ((flags & ATTR_PARTIAL0x20) == 0)
1977 goto bad_len;
1978 log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, "
1979 "partial attribute ignored");
1980 plen += attr_len;
1981 break;
1982 }
1983 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1984 ATTR_PARTIAL0x20))
1985 goto bad_flags;
1986 if (memcmp(p, &zero, sizeof(u_int32_t)) == 0) {
1987 /* As per RFC6793 use "attribute discard" here. */
1988 log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, "
1989 "AS 0 not allowed, attribute discarded");
1990 plen += attr_len;
1991 break;
1992 }
1993 a->flags |= F_ATTR_AS4BYTE_NEW0x00100;
1994 goto optattr;
1995 case ATTR_AS4_PATH:
1996 if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40,
1997 ATTR_PARTIAL0x20))
1998 goto bad_flags;
1999 if ((error = aspath_verify(p, attr_len, 1,
Although the value stored to 'error' is used in the enclosing expression, the value is never actually read from 'error'
2000 peer_accept_no_as_set(peer))) != 0) {
2001 /* As per RFC6793 use "attribute discard" here. */
2002 log_peer_warnx(&peer->conf, "bad AS4_PATH, "
2003 "attribute discarded");
2004 plen += attr_len;
2005 break;
2006 }
2007 a->flags |= F_ATTR_AS4BYTE_NEW0x00100;
2008 goto optattr;
2009 default:
2010 if ((flags & ATTR_OPTIONAL0x80) == 0) {
2011 rde_update_err(peer, ERR_UPDATE, ERR_UPD_UNKNWN_WK_ATTR,
2012 op, len);
2013 return (-1);
2014 }
2015optattr:
2016 if (attr_optadd(a, flags, type, p, attr_len) == -1) {
2017bad_list:
2018 rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST,
2019 NULL((void*)0), 0);
2020 return (-1);
2021 }
2022
2023 plen += attr_len;
2024 break;
2025 }
2026
2027 return (plen);
2028}
2029
2030int
2031rde_attr_add(struct filterstate *state, u_char *p, u_int16_t len)
2032{
2033 u_int16_t attr_len;
2034 u_int16_t plen = 0;
2035 u_int8_t flags;
2036 u_int8_t type;
2037 u_int8_t tmp8;
2038
2039 if (len < 3)
2040 return (-1);
2041
2042 UPD_READ(&flags, p, plen, 1);
2043 UPD_READ(&type, p, plen, 1);
2044
2045 if (flags & ATTR_EXTLEN0x10) {
2046 if (len - plen < 2)
2047 return (-1);
2048 UPD_READ(&attr_len, p, plen, 2);
2049 attr_len = ntohs(attr_len)(__uint16_t)(__builtin_constant_p(attr_len) ? (__uint16_t)(((
__uint16_t)(attr_len) & 0xffU) << 8 | ((__uint16_t)
(attr_len) & 0xff00U) >> 8) : __swap16md(attr_len))
;
2050 } else {
2051 UPD_READ(&tmp8, p, plen, 1);
2052 attr_len = tmp8;
2053 }
2054
2055 if (len - plen < attr_len)
2056 return (-1);
2057
2058 switch (type) {
2059 case ATTR_COMMUNITIES:
2060 return community_add(&state->communities, flags, p, attr_len);
2061 case ATTR_LARGE_COMMUNITIES:
2062 return community_large_add(&state->communities, flags, p,
2063 attr_len);
2064 case ATTR_EXT_COMMUNITIES:
2065 return community_ext_add(&state->communities, flags, p,
2066 attr_len);
2067 }
2068
2069 if (attr_optadd(&state->aspath, flags, type, p, attr_len) == -1)
2070 return (-1);
2071 return (0);
2072}
2073
2074#undef UPD_READ
2075#undef CHECK_FLAGS
2076
2077u_int8_t
2078rde_attr_missing(struct rde_aspath *a, int ebgp, u_int16_t nlrilen)
2079{
2080 /* ATTR_MP_UNREACH_NLRI may be sent alone */
2081 if (nlrilen == 0 && a->flags & F_ATTR_MP_UNREACH0x00080 &&
2082 (a->flags & F_ATTR_MP_REACH0x00040) == 0)
2083 return (0);
2084
2085 if ((a->flags & F_ATTR_ORIGIN0x00001) == 0)
2086 return (ATTR_ORIGIN);
2087 if ((a->flags & F_ATTR_ASPATH0x00002) == 0)
2088 return (ATTR_ASPATH);
2089 if ((a->flags & F_ATTR_MP_REACH0x00040) == 0 &&
2090 (a->flags & F_ATTR_NEXTHOP0x00004) == 0)
2091 return (ATTR_NEXTHOP);
2092 if (!ebgp)
2093 if ((a->flags & F_ATTR_LOCALPREF0x00008) == 0)
2094 return (ATTR_LOCALPREF);
2095 return (0);
2096}
2097
2098int
2099rde_get_mp_nexthop(u_char *data, u_int16_t len, u_int8_t aid,
2100 struct filterstate *state)
2101{
2102 struct bgpd_addr nexthop;
2103 u_int8_t totlen, nhlen;
2104
2105 if (len == 0)
2106 return (-1);
2107
2108 nhlen = *data++;
2109 totlen = 1;
2110 len--;
2111
2112 if (nhlen > len)
2113 return (-1);
2114
2115 bzero(&nexthop, sizeof(nexthop));
2116 nexthop.aid = aid;
2117 switch (aid) {
2118 case AID_INET62:
2119 /*
2120 * RFC2545 describes that there may be a link-local
2121 * address carried in nexthop. Yikes!
2122 * This is not only silly, it is wrong and we just ignore
2123 * this link-local nexthop. The bgpd session doesn't run
2124 * over the link-local address so why should all other
2125 * traffic.
2126 */
2127 if (nhlen != 16 && nhlen != 32) {
2128 log_warnx("bad multiprotocol nexthop, bad size");
2129 return (-1);
2130 }
2131 memcpy(&nexthop.v6ba.v6.s6_addr__u6_addr.__u6_addr8, data, 16);
2132 break;
2133 case AID_VPN_IPv64:
2134 if (nhlen != 24) {
2135 log_warnx("bad multiprotocol nexthop, bad size %d",
2136 nhlen);
2137 return (-1);
2138 }
2139 memcpy(&nexthop.v6ba.v6, data + sizeof(u_int64_t),
2140 sizeof(nexthop.v6ba.v6));
2141 nexthop.aid = AID_INET62;
2142 break;
2143 case AID_VPN_IPv43:
2144 /*
2145 * Neither RFC4364 nor RFC3107 specify the format of the
2146 * nexthop in an explicit way. The quality of RFC went down
2147 * the toilet the larger the number got.
2148 * RFC4364 is very confusing about VPN-IPv4 address and the
2149 * VPN-IPv4 prefix that carries also a MPLS label.
2150 * So the nexthop is a 12-byte address with a 64bit RD and
2151 * an IPv4 address following. In the nexthop case the RD can
2152 * be ignored.
2153 * Since the nexthop has to be in the main IPv4 table just
2154 * create an AID_INET nexthop. So we don't need to handle
2155 * AID_VPN_IPv4 in nexthop and kroute.
2156 */
2157 if (nhlen != 12) {
2158 log_warnx("bad multiprotocol nexthop, bad size");
2159 return (-1);
2160 }
2161 nexthop.aid = AID_INET1;
2162 memcpy(&nexthop.v4ba.v4, data + sizeof(u_int64_t),
2163 sizeof(nexthop.v4ba.v4));
2164 break;
2165 default:
2166 log_warnx("bad multiprotocol nexthop, bad AID");
2167 return (-1);
2168 }
2169
2170 nexthop_unref(state->nexthop); /* just to be sure */
2171 state->nexthop = nexthop_get(&nexthop);
2172
2173 /* ignore reserved (old SNPA) field as per RFC4760 */
2174 totlen += nhlen + 1;
2175 data += nhlen + 1;
2176
2177 return (totlen);
2178}
2179
2180void
2181rde_update_err(struct rde_peer *peer, u_int8_t error, u_int8_t suberr,
2182 void *data, u_int16_t size)
2183{
2184 struct ibuf *wbuf;
2185
2186 if ((wbuf = imsg_create(ibuf_se, IMSG_UPDATE_ERR, peer->conf.id, 0,
2187 size + sizeof(error) + sizeof(suberr))) == NULL((void*)0))
2188 fatal("%s %d imsg_create error", __func__, __LINE__2188);
2189 if (imsg_add(wbuf, &error, sizeof(error)) == -1 ||
2190 imsg_add(wbuf, &suberr, sizeof(suberr)) == -1 ||
2191 imsg_add(wbuf, data, size) == -1)
2192 fatal("%s %d imsg_add error", __func__, __LINE__2192);
2193 imsg_close(ibuf_se, wbuf);
2194 peer->state = PEER_ERR;
2195}
2196
2197void
2198rde_update_log(const char *message, u_int16_t rid,
2199 const struct rde_peer *peer, const struct bgpd_addr *next,
2200 const struct bgpd_addr *prefix, u_int8_t prefixlen)
2201{
2202 char *l = NULL((void*)0);
2203 char *n = NULL((void*)0);
2204 char *p = NULL((void*)0);
2205
2206 if (!((conf->log & BGPD_LOG_UPDATES0x0001) ||
2207 (peer->flags & PEERFLAG_LOG_UPDATES0x02)))
2208 return;
2209
2210 if (next != NULL((void*)0))
2211 if (asprintf(&n, " via %s", log_addr(next)) == -1)
2212 n = NULL((void*)0);
2213 if (asprintf(&p, "%s/%u", log_addr(prefix), prefixlen) == -1)
2214 p = NULL((void*)0);
2215 l = log_fmt_peer(&peer->conf);
2216 log_info("Rib %s: %s AS%s: %s %s%s", rib_byid(rid)->name,
2217 l, log_as(peer->conf.remote_as), message,
2218 p ? p : "out of memory", n ? n : "");
2219
2220 free(l);
2221 free(n);
2222 free(p);
2223}
2224
2225/*
2226 * 4-Byte ASN helper function.
2227 * Two scenarios need to be considered:
2228 * - NEW session with NEW attributes present -> just remove the attributes
2229 * - OLD session with NEW attributes present -> try to merge them
2230 */
2231void
2232rde_as4byte_fixup(struct rde_peer *peer, struct rde_aspath *a)
2233{
2234 struct attr *nasp, *naggr, *oaggr;
2235 u_int32_t as;
2236
2237 /*
2238 * if either ATTR_AS4_AGGREGATOR or ATTR_AS4_PATH is present
2239 * try to fixup the attributes.
2240 * Do not fixup if F_ATTR_PARSE_ERR is set.
2241 */
2242 if (!(a->flags & F_ATTR_AS4BYTE_NEW0x00100) || a->flags & F_ATTR_PARSE_ERR0x10000)
2243 return;
2244
2245 /* first get the attributes */
2246 nasp = attr_optget(a, ATTR_AS4_PATH);
2247 naggr = attr_optget(a, ATTR_AS4_AGGREGATOR);
2248
2249 if (peer_has_as4byte(peer)) {
2250 /* NEW session using 4-byte ASNs */
2251 if (nasp) {
2252 log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2253 "but sent AS4_PATH attribute.");
2254 attr_free(a, nasp);
2255 }
2256 if (naggr) {
2257 log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2258 "but sent AS4_AGGREGATOR attribute.");
2259 attr_free(a, naggr);
2260 }
2261 return;
2262 }
2263 /* OLD session using 2-byte ASNs */
2264 /* try to merge the new attributes into the old ones */
2265 if ((oaggr = attr_optget(a, ATTR_AGGREGATOR))) {
2266 memcpy(&as, oaggr->data, sizeof(as));
2267 if (ntohl(as)(__uint32_t)(__builtin_constant_p(as) ? (__uint32_t)(((__uint32_t
)(as) & 0xff) << 24 | ((__uint32_t)(as) & 0xff00
) << 8 | ((__uint32_t)(as) & 0xff0000) >> 8 |
((__uint32_t)(as) & 0xff000000) >> 24) : __swap32md
(as))
!= AS_TRANS23456) {
2268 /* per RFC ignore AS4_PATH and AS4_AGGREGATOR */
2269 if (nasp)
2270 attr_free(a, nasp);
2271 if (naggr)
2272 attr_free(a, naggr);
2273 return;
2274 }
2275 if (naggr) {
2276 /* switch over to new AGGREGATOR */
2277 attr_free(a, oaggr);
2278 if (attr_optadd(a, ATTR_OPTIONAL0x80 | ATTR_TRANSITIVE0x40,
2279 ATTR_AGGREGATOR, naggr->data, naggr->len))
2280 fatalx("attr_optadd failed but impossible");
2281 }
2282 }
2283 /* there is no need for AS4_AGGREGATOR any more */
2284 if (naggr)
2285 attr_free(a, naggr);
2286
2287 /* merge AS4_PATH with ASPATH */
2288 if (nasp)
2289 aspath_merge(a, nasp);
2290}
2291
2292
2293/*
2294 * route reflector helper function
2295 */
2296void
2297rde_reflector(struct rde_peer *peer, struct rde_aspath *asp)
2298{
2299 struct attr *a;
2300 u_int8_t *p;
2301 u_int16_t len;
2302 u_int32_t id;
2303
2304 /* do not consider updates with parse errors */
2305 if (asp->flags & F_ATTR_PARSE_ERR0x10000)
2306 return;
2307
2308 /* check for originator id if eq router_id drop */
2309 if ((a = attr_optget(asp, ATTR_ORIGINATOR_ID)) != NULL((void*)0)) {
2310 if (memcmp(&conf->bgpid, a->data, sizeof(conf->bgpid)) == 0) {
2311 /* this is coming from myself */
2312 asp->flags |= F_ATTR_LOOP0x00200;
2313 return;
2314 }
2315 } else if (conf->flags & BGPD_FLAG_REFLECTOR0x0004) {
2316 if (peer->conf.ebgp)
2317 id = conf->bgpid;
2318 else
2319 id = htonl(peer->remote_bgpid)(__uint32_t)(__builtin_constant_p(peer->remote_bgpid) ? (__uint32_t
)(((__uint32_t)(peer->remote_bgpid) & 0xff) << 24
| ((__uint32_t)(peer->remote_bgpid) & 0xff00) <<
8 | ((__uint32_t)(peer->remote_bgpid) & 0xff0000) >>
8 | ((__uint32_t)(peer->remote_bgpid) & 0xff000000) >>
24) : __swap32md(peer->remote_bgpid))
;
2320 if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_ORIGINATOR_ID,
2321 &id, sizeof(u_int32_t)) == -1)
2322 fatalx("attr_optadd failed but impossible");
2323 }
2324
2325 /* check for own id in the cluster list */
2326 if (conf->flags & BGPD_FLAG_REFLECTOR0x0004) {
2327 if ((a = attr_optget(asp, ATTR_CLUSTER_LIST)) != NULL((void*)0)) {
2328 for (len = 0; len < a->len;
2329 len += sizeof(conf->clusterid))
2330 /* check if coming from my cluster */
2331 if (memcmp(&conf->clusterid, a->data + len,
2332 sizeof(conf->clusterid)) == 0) {
2333 asp->flags |= F_ATTR_LOOP0x00200;
2334 return;
2335 }
2336
2337 /* prepend own clusterid by replacing attribute */
2338 len = a->len + sizeof(conf->clusterid);
2339 if (len < a->len)
2340 fatalx("rde_reflector: cluster-list overflow");
2341 if ((p = malloc(len)) == NULL((void*)0))
2342 fatal("rde_reflector");
2343 memcpy(p, &conf->clusterid, sizeof(conf->clusterid));
2344 memcpy(p + sizeof(conf->clusterid), a->data, a->len);
2345 attr_free(asp, a);
2346 if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_CLUSTER_LIST,
2347 p, len) == -1)
2348 fatalx("attr_optadd failed but impossible");
2349 free(p);
2350 } else if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_CLUSTER_LIST,
2351 &conf->clusterid, sizeof(conf->clusterid)) == -1)
2352 fatalx("attr_optadd failed but impossible");
2353 }
2354}
2355
2356/*
2357 * control specific functions
2358 */
2359static void
2360rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags,
2361 int adjout)
2362{
2363 struct ctl_show_rib rib;
2364 struct ibuf *wbuf;
2365 struct attr *a;
2366 struct nexthop *nexthop;
2367 struct rib_entry *re;
2368 struct rde_peer *peer;
2369 void *bp;
2370 time_t staletime;
2371 size_t aslen;
2372 u_int8_t l;
2373
2374 nexthop = prefix_nexthop(p);
2375 peer = prefix_peer(p);
2376 bzero(&rib, sizeof(rib));
2377 rib.age = getmonotime() - p->lastchange;
2378 rib.local_pref = asp->lpref;
2379 rib.med = asp->med;
2380 rib.weight = asp->weight;
2381 strlcpy(rib.descr, peer->conf.descr, sizeof(rib.descr));
2382 memcpy(&rib.remote_addr, &peer->remote_addr,
2383 sizeof(rib.remote_addr));
2384 rib.remote_id = peer->remote_bgpid;
2385 if (nexthop != NULL((void*)0)) {
2386 memcpy(&rib.true_nexthop, &nexthop->true_nexthop,
2387 sizeof(rib.true_nexthop));
2388 memcpy(&rib.exit_nexthop, &nexthop->exit_nexthop,
2389 sizeof(rib.exit_nexthop));
2390 } else {
2391 /* announced network may have a NULL nexthop */
2392 bzero(&rib.true_nexthop, sizeof(rib.true_nexthop));
2393 bzero(&rib.exit_nexthop, sizeof(rib.exit_nexthop));
2394 rib.true_nexthop.aid = p->pt->aid;
2395 rib.exit_nexthop.aid = p->pt->aid;
2396 }
2397 pt_getaddr(p->pt, &rib.prefix);
2398 rib.prefixlen = p->pt->prefixlen;
2399 rib.origin = asp->origin;
2400 rib.validation_state = p->validation_state;
2401 rib.flags = 0;
2402 re = prefix_re(p);
2403 if (re != NULL((void*)0) && re->active == p)
2404 rib.flags |= F_PREF_ACTIVE0x02;
2405 if (!peer->conf.ebgp)
2406 rib.flags |= F_PREF_INTERNAL0x04;
2407 if (asp->flags & F_PREFIX_ANNOUNCED0x00400)
2408 rib.flags |= F_PREF_ANNOUNCE0x08;
2409 if (nexthop == NULL((void*)0) || nexthop->state == NEXTHOP_REACH)
2410 rib.flags |= F_PREF_ELIGIBLE0x01;
2411 if (asp->flags & F_ATTR_LOOP0x00200)
2412 rib.flags &= ~F_PREF_ELIGIBLE0x01;
2413 if (asp->flags & F_ATTR_PARSE_ERR0x10000)
2414 rib.flags |= F_PREF_INVALID0x20;
2415 staletime = peer->staletime[p->pt->aid];
2416 if (staletime && p->lastchange <= staletime)
2417 rib.flags |= F_PREF_STALE0x10;
2418 if (!adjout) {
2419 if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_RECV0x01)) {
2420 rib.path_id = p->path_id;
2421 rib.flags |= F_PREF_PATH_ID0x40;
2422 }
2423 } else {
2424 if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_SEND0x02)) {
2425 rib.path_id = 0; /* XXX add-path send */
2426 rib.flags |= F_PREF_PATH_ID0x40;
2427 }
2428 }
2429 aslen = aspath_length(asp->aspath);
2430
2431 if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid,
2432 sizeof(rib) + aslen)) == NULL((void*)0))
2433 return;
2434 if (imsg_add(wbuf, &rib, sizeof(rib)) == -1 ||
2435 imsg_add(wbuf, aspath_dump(asp->aspath), aslen) == -1)
2436 return;
2437 imsg_close(ibuf_se_ctl, wbuf);
2438
2439 if (flags & F_CTL_DETAIL0x1000) {
2440 struct rde_community *comm = prefix_communities(p);
2441 size_t len = comm->nentries * sizeof(struct community);
2442 if (comm->nentries > 0) {
2443 if ((wbuf = imsg_create(ibuf_se_ctl,
2444 IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, pid,
2445 len)) == NULL((void*)0))
2446 return;
2447 if ((bp = ibuf_reserve(wbuf, len)) == NULL((void*)0)) {
2448 ibuf_free(wbuf);
2449 return;
2450 }
2451 memcpy(bp, comm->communities, len);
2452 imsg_close(ibuf_se_ctl, wbuf);
2453 }
2454 for (l = 0; l < asp->others_len; l++) {
2455 if ((a = asp->others[l]) == NULL((void*)0))
2456 break;
2457 if ((wbuf = imsg_create(ibuf_se_ctl,
2458 IMSG_CTL_SHOW_RIB_ATTR, 0, pid,
2459 attr_optlen(a)((a)->len > 255 ? (a)->len + 4 : (a)->len + 3))) == NULL((void*)0))
2460 return;
2461 if ((bp = ibuf_reserve(wbuf, attr_optlen(a)((a)->len > 255 ? (a)->len + 4 : (a)->len + 3))) == NULL((void*)0)) {
2462 ibuf_free(wbuf);
2463 return;
2464 }
2465 if (attr_write(bp, attr_optlen(a)((a)->len > 255 ? (a)->len + 4 : (a)->len + 3), a->flags,
2466 a->type, a->data, a->len) == -1) {
2467 ibuf_free(wbuf);
2468 return;
2469 }
2470 imsg_close(ibuf_se_ctl, wbuf);
2471 }
2472 }
2473}
2474
2475int
2476rde_match_peer(struct rde_peer *p, struct ctl_neighbor *n)
2477{
2478 char *s;
2479
2480 if (n && n->addr.aid) {
2481 if (memcmp(&p->conf.remote_addr, &n->addr,
2482 sizeof(p->conf.remote_addr)))
2483 return 0;
2484 } else if (n && n->descr[0]) {
2485 s = n->is_group ? p->conf.group : p->conf.descr;
2486 if (strcmp(s, n->descr))
2487 return 0;
2488 }
2489 return 1;
2490}
2491
2492static void
2493rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req, int adjout)
2494{
2495 struct rde_aspath *asp;
2496 struct rib_entry *re;
2497
2498 if (!rde_match_peer(prefix_peer(p), &req->neighbor))
2499 return;
2500
2501 asp = prefix_aspath(p);
2502 re = prefix_re(p);
2503 if (asp == NULL((void*)0)) /* skip pending withdraw in Adj-RIB-Out */
2504 return;
2505 if ((req->flags & F_CTL_ACTIVE0x8000) && re != NULL((void*)0) && re->active != p)
2506 return;
2507 if ((req->flags & F_CTL_INVALID0x40000) &&
2508 (asp->flags & F_ATTR_PARSE_ERR0x10000) == 0)
2509 return;
2510 /*
2511 * XXX handle out specially since then we want to match against our
2512 * path ids.
2513 */
2514 if ((req->flags & F_CTL_HAS_PATHID0x800000) && req->path_id != p->path_id)
2515 return;
2516 if (req->as.type != AS_UNDEF &&
2517 !aspath_match(asp->aspath, &req->as, 0))
2518 return;
2519 if (req->community.flags != 0) {
2520 if (!community_match(prefix_communities(p), &req->community,
2521 NULL((void*)0)))
2522 return;
2523 }
2524 if (!ovs_match(p, req->flags))
2525 return;
2526 rde_dump_rib_as(p, asp, req->pid, req->flags, adjout);
2527}
2528
2529static void
2530rde_dump_upcall(struct rib_entry *re, void *ptr)
2531{
2532 struct rde_dump_ctx *ctx = ptr;
2533 struct prefix *p;
2534
2535 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
2536 rde_dump_filter(p, &ctx->req, 0);
2537}
2538
2539static void
2540rde_dump_prefix_upcall(struct rib_entry *re, void *ptr)
2541{
2542 struct rde_dump_ctx *ctx = ptr;
2543 struct prefix *p;
2544 struct pt_entry *pt;
2545 struct bgpd_addr addr;
2546
2547 pt = re->prefix;
2548 pt_getaddr(pt, &addr);
2549 if (addr.aid != ctx->req.prefix.aid)
2550 return;
2551 if (ctx->req.flags & F_LONGER0x0200) {
2552 if (ctx->req.prefixlen > pt->prefixlen)
2553 return;
2554 if (!prefix_compare(&ctx->req.prefix, &addr,
2555 ctx->req.prefixlen))
2556 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
2557 rde_dump_filter(p, &ctx->req, 0);
2558 } else {
2559 if (ctx->req.prefixlen < pt->prefixlen)
2560 return;
2561 if (!prefix_compare(&addr, &ctx->req.prefix,
2562 pt->prefixlen))
2563 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
2564 rde_dump_filter(p, &ctx->req, 0);
2565 }
2566}
2567
2568static void
2569rde_dump_adjout_upcall(struct prefix *p, void *ptr)
2570{
2571 struct rde_dump_ctx *ctx = ptr;
2572
2573 if (p->flags & (PREFIX_FLAG_WITHDRAW0x01 | PREFIX_FLAG_DEAD0x04))
2574 return;
2575 rde_dump_filter(p, &ctx->req, 1);
2576}
2577
2578static void
2579rde_dump_adjout_prefix_upcall(struct prefix *p, void *ptr)
2580{
2581 struct rde_dump_ctx *ctx = ptr;
2582 struct bgpd_addr addr;
2583
2584 if (p->flags & (PREFIX_FLAG_WITHDRAW0x01 | PREFIX_FLAG_DEAD0x04))
2585 return;
2586
2587 pt_getaddr(p->pt, &addr);
2588 if (addr.aid != ctx->req.prefix.aid)
2589 return;
2590 if (ctx->req.flags & F_LONGER0x0200) {
2591 if (ctx->req.prefixlen > p->pt->prefixlen)
2592 return;
2593 if (!prefix_compare(&ctx->req.prefix, &addr,
2594 ctx->req.prefixlen))
2595 rde_dump_filter(p, &ctx->req, 1);
2596 } else {
2597 if (ctx->req.prefixlen < p->pt->prefixlen)
2598 return;
2599 if (!prefix_compare(&addr, &ctx->req.prefix,
2600 p->pt->prefixlen))
2601 rde_dump_filter(p, &ctx->req, 1);
2602 }
2603}
2604
2605static int
2606rde_dump_throttled(void *arg)
2607{
2608 struct rde_dump_ctx *ctx = arg;
2609
2610 return (ctx->throttled != 0);
2611}
2612
2613static void
2614rde_dump_done(void *arg, u_int8_t aid)
2615{
2616 struct rde_dump_ctx *ctx = arg;
2617 struct rde_peer *peer;
2618 u_int error;
2619
2620 if (ctx->req.flags & F_CTL_ADJ_OUT0x4000) {
2621 peer = peer_match(&ctx->req.neighbor, ctx->peerid);
2622 if (peer == NULL((void*)0))
2623 goto done;
2624 ctx->peerid = peer->conf.id;
2625 switch (ctx->req.type) {
2626 case IMSG_CTL_SHOW_RIB:
2627 if (prefix_dump_new(peer, ctx->req.aid,
2628 CTL_MSG_HIGH_MARK500, ctx, rde_dump_adjout_upcall,
2629 rde_dump_done, rde_dump_throttled) == -1)
2630 goto nomem;
2631 break;
2632 case IMSG_CTL_SHOW_RIB_PREFIX:
2633 if (prefix_dump_new(peer, ctx->req.aid,
2634 CTL_MSG_HIGH_MARK500, ctx,
2635 rde_dump_adjout_prefix_upcall,
2636 rde_dump_done, rde_dump_throttled) == -1)
2637 goto nomem;
2638 break;
2639 default:
2640 fatalx("%s: unsupported imsg type", __func__);
2641 }
2642 return;
2643 }
2644done:
2645 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid, -1, NULL((void*)0), 0);
2646 LIST_REMOVE(ctx, entry)do { if ((ctx)->entry.le_next != ((void*)0)) (ctx)->entry
.le_next->entry.le_prev = (ctx)->entry.le_prev; *(ctx)->
entry.le_prev = (ctx)->entry.le_next; ; ; } while (0)
;
2647 free(ctx);
2648 return;
2649
2650nomem:
2651 log_warn(__func__);
2652 error = CTL_RES_NOMEM;
2653 imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, ctx->req.pid, -1, &error,
2654 sizeof(error));
2655 return;
2656}
2657
2658void
2659rde_dump_ctx_new(struct ctl_show_rib_request *req, pid_t pid,
2660 enum imsg_type type)
2661{
2662 struct rde_dump_ctx *ctx;
2663 struct rib_entry *re;
2664 struct prefix *p;
2665 u_int error;
2666 u_int8_t hostplen;
2667 u_int16_t rid;
2668
2669 if ((ctx = calloc(1, sizeof(*ctx))) == NULL((void*)0)) {
2670 nomem:
2671 log_warn(__func__);
2672 error = CTL_RES_NOMEM;
2673 imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
2674 sizeof(error));
2675 return;
2676 }
2677
2678 memcpy(&ctx->req, req, sizeof(struct ctl_show_rib_request));
2679 ctx->req.pid = pid;
2680 ctx->req.type = type;
2681
2682 if (req->flags & (F_CTL_ADJ_IN0x2000 | F_CTL_INVALID0x40000)) {
2683 rid = RIB_ADJ_IN0;
2684 } else if (req->flags & F_CTL_ADJ_OUT0x4000) {
2685 struct rde_peer *peer;
2686
2687 peer = peer_match(&req->neighbor, 0);
2688 if (peer == NULL((void*)0)) {
2689 error = CTL_RES_NOSUCHPEER;
2690 imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1,
2691 &error, sizeof(error));
2692 free(ctx);
2693 return;
2694 }
2695 ctx->peerid = peer->conf.id;
2696 switch (ctx->req.type) {
2697 case IMSG_CTL_SHOW_RIB:
2698 if (prefix_dump_new(peer, ctx->req.aid,
2699 CTL_MSG_HIGH_MARK500, ctx, rde_dump_adjout_upcall,
2700 rde_dump_done, rde_dump_throttled) == -1)
2701 goto nomem;
2702 break;
2703 case IMSG_CTL_SHOW_RIB_PREFIX:
2704 if (req->flags & (F_LONGER0x0200|F_SHORTER0x0400)) {
2705 if (prefix_dump_new(peer, ctx->req.aid,
2706 CTL_MSG_HIGH_MARK500, ctx,
2707 rde_dump_adjout_prefix_upcall,
2708 rde_dump_done, rde_dump_throttled) == -1)
2709 goto nomem;
2710 break;
2711 }
2712 switch (req->prefix.aid) {
2713 case AID_INET1:
2714 case AID_VPN_IPv43:
2715 hostplen = 32;
2716 break;
2717 case AID_INET62:
2718 case AID_VPN_IPv64:
2719 hostplen = 128;
2720 break;
2721 default:
2722 fatalx("%s: unknown af", __func__);
2723 }
2724
2725 do {
2726 if (req->prefixlen == hostplen)
2727 p = prefix_match(peer, &req->prefix);
2728 else
2729 p = prefix_lookup(peer, &req->prefix,
2730 req->prefixlen);
2731 if (p)
2732 rde_dump_adjout_upcall(p, ctx);
2733 } while ((peer = peer_match(&req->neighbor,
2734 peer->conf.id)));
2735
2736 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
2737 -1, NULL((void*)0), 0);
2738 free(ctx);
2739 return;
2740 default:
2741 fatalx("%s: unsupported imsg type", __func__);
2742 }
2743
2744 LIST_INSERT_HEAD(&rde_dump_h, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_dump_h)->lh_first
) != ((void*)0)) (&rde_dump_h)->lh_first->entry.le_prev
= &(ctx)->entry.le_next; (&rde_dump_h)->lh_first
= (ctx); (ctx)->entry.le_prev = &(&rde_dump_h)->
lh_first; } while (0)
;
2745 return;
2746 } else if ((rid = rib_find(req->rib)) == RIB_NOTFOUND0xffff) {
2747 log_warnx("%s: no such rib %s", __func__, req->rib);
2748 error = CTL_RES_NOSUCHRIB;
2749 imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
2750 sizeof(error));
2751 free(ctx);
2752 return;
2753 }
2754
2755 switch (ctx->req.type) {
2756 case IMSG_CTL_SHOW_NETWORK:
2757 if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK500, ctx,
2758 network_dump_upcall, rde_dump_done,
2759 rde_dump_throttled) == -1)
2760 goto nomem;
2761 break;
2762 case IMSG_CTL_SHOW_RIB:
2763 if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK500, ctx,
2764 rde_dump_upcall, rde_dump_done, rde_dump_throttled) == -1)
2765 goto nomem;
2766 break;
2767 case IMSG_CTL_SHOW_RIB_PREFIX:
2768 if (req->flags & (F_LONGER0x0200|F_SHORTER0x0400)) {
2769 if (rib_dump_new(rid, ctx->req.aid,
2770 CTL_MSG_HIGH_MARK500, ctx, rde_dump_prefix_upcall,
2771 rde_dump_done, rde_dump_throttled) == -1)
2772 goto nomem;
2773 break;
2774 }
2775 switch (req->prefix.aid) {
2776 case AID_INET1:
2777 case AID_VPN_IPv43:
2778 hostplen = 32;
2779 break;
2780 case AID_INET62:
2781 case AID_VPN_IPv64:
2782 hostplen = 128;
2783 break;
2784 default:
2785 fatalx("%s: unknown af", __func__);
2786 }
2787 if (req->prefixlen == hostplen)
2788 re = rib_match(rib_byid(rid), &req->prefix);
2789 else
2790 re = rib_get(rib_byid(rid), &req->prefix,
2791 req->prefixlen);
2792 if (re)
2793 rde_dump_upcall(re, ctx);
2794 imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
2795 -1, NULL((void*)0), 0);
2796 free(ctx);
2797 return;
2798 default:
2799 fatalx("%s: unsupported imsg type", __func__);
2800 }
2801 LIST_INSERT_HEAD(&rde_dump_h, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_dump_h)->lh_first
) != ((void*)0)) (&rde_dump_h)->lh_first->entry.le_prev
= &(ctx)->entry.le_next; (&rde_dump_h)->lh_first
= (ctx); (ctx)->entry.le_prev = &(&rde_dump_h)->
lh_first; } while (0)
;
2802}
2803
2804void
2805rde_dump_ctx_throttle(pid_t pid, int throttle)
2806{
2807 struct rde_dump_ctx *ctx;
2808
2809 LIST_FOREACH(ctx, &rde_dump_h, entry)for((ctx) = ((&rde_dump_h)->lh_first); (ctx)!= ((void*
)0); (ctx) = ((ctx)->entry.le_next))
{
2810 if (ctx->req.pid == pid) {
2811 ctx->throttled = throttle;
2812 return;
2813 }
2814 }
2815}
2816
2817void
2818rde_dump_ctx_terminate(pid_t pid)
2819{
2820 struct rde_dump_ctx *ctx;
2821
2822 LIST_FOREACH(ctx, &rde_dump_h, entry)for((ctx) = ((&rde_dump_h)->lh_first); (ctx)!= ((void*
)0); (ctx) = ((ctx)->entry.le_next))
{
2823 if (ctx->req.pid == pid) {
2824 rib_dump_terminate(ctx);
2825 return;
2826 }
2827 }
2828}
2829
2830static int
2831rde_mrt_throttled(void *arg)
2832{
2833 struct mrt *mrt = arg;
2834
2835 return (mrt->wbuf.queued > SESS_MSG_LOW_MARK500);
2836}
2837
2838static void
2839rde_mrt_done(void *ptr, u_int8_t aid)
2840{
2841 mrt_done(ptr);
2842}
2843
2844void
2845rde_dump_mrt_new(struct mrt *mrt, pid_t pid, int fd)
2846{
2847 struct rde_mrt_ctx *ctx;
2848 u_int16_t rid;
2849
2850 if ((ctx = calloc(1, sizeof(*ctx))) == NULL((void*)0)) {
2851 log_warn("rde_dump_mrt_new");
2852 return;
2853 }
2854 memcpy(&ctx->mrt, mrt, sizeof(struct mrt));
2855 TAILQ_INIT(&ctx->mrt.wbuf.bufs)do { (&ctx->mrt.wbuf.bufs)->tqh_first = ((void*)0);
(&ctx->mrt.wbuf.bufs)->tqh_last = &(&ctx->
mrt.wbuf.bufs)->tqh_first; } while (0)
;
2856 ctx->mrt.wbuf.fd = fd;
2857 ctx->mrt.state = MRT_STATE_RUNNING;
2858 rid = rib_find(ctx->mrt.rib);
2859 if (rid == RIB_NOTFOUND0xffff) {
2860 log_warnx("non existing RIB %s for mrt dump", ctx->mrt.rib);
2861 free(ctx);
2862 return;
2863 }
2864
2865 if (ctx->mrt.type == MRT_TABLE_DUMP_V2)
2866 mrt_dump_v2_hdr(&ctx->mrt, conf, &peerlist);
2867
2868 if (rib_dump_new(rid, AID_UNSPEC0, CTL_MSG_HIGH_MARK500, &ctx->mrt,
2869 mrt_dump_upcall, rde_mrt_done, rde_mrt_throttled) == -1)
2870 fatal("%s: rib_dump_new", __func__);
2871
2872 LIST_INSERT_HEAD(&rde_mrts, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_mrts)->lh_first
) != ((void*)0)) (&rde_mrts)->lh_first->entry.le_prev
= &(ctx)->entry.le_next; (&rde_mrts)->lh_first
= (ctx); (ctx)->entry.le_prev = &(&rde_mrts)->
lh_first; } while (0)
;
2873 rde_mrt_cnt++;
2874}
2875
2876/*
2877 * kroute specific functions
2878 */
2879int
2880rde_l3vpn_import(struct rde_community *comm, struct l3vpn *rd)
2881{
2882 struct filter_set *s;
2883
2884 TAILQ_FOREACH(s, &rd->import, entry)for((s) = ((&rd->import)->tqh_first); (s) != ((void
*)0); (s) = ((s)->entry.tqe_next))
{
2885 if (community_match(comm, &s->action.community, 0))
2886 return (1);
2887 }
2888 return (0);
2889}
2890
2891void
2892rde_send_kroute_flush(struct rib *rib)
2893{
2894 if (imsg_compose(ibuf_main, IMSG_KROUTE_FLUSH, rib->rtableid, 0, -1,
2895 NULL((void*)0), 0) == -1)
2896 fatal("%s %d imsg_compose error", __func__, __LINE__2896);
2897}
2898
2899void
2900rde_send_kroute(struct rib *rib, struct prefix *new, struct prefix *old)
2901{
2902 struct kroute_full kr;
2903 struct bgpd_addr addr;
2904 struct prefix *p;
2905 struct rde_aspath *asp;
2906 struct l3vpn *vpn;
2907 enum imsg_type type;
2908
2909 /*
2910 * Make sure that self announce prefixes are not committed to the
2911 * FIB. If both prefixes are unreachable no update is needed.
2912 */
2913 if ((old == NULL((void*)0) || prefix_aspath(old)->flags & F_PREFIX_ANNOUNCED0x00400) &&
2914 (new == NULL((void*)0) || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED0x00400))
2915 return;
2916
2917 if (new == NULL((void*)0) || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED0x00400) {
2918 type = IMSG_KROUTE_DELETE;
2919 p = old;
2920 } else {
2921 type = IMSG_KROUTE_CHANGE;
2922 p = new;
2923 }
2924
2925 asp = prefix_aspath(p);
2926 pt_getaddr(p->pt, &addr);
2927 bzero(&kr, sizeof(kr));
2928 memcpy(&kr.prefix, &addr, sizeof(kr.prefix));
2929 kr.prefixlen = p->pt->prefixlen;
2930 if (prefix_nhflags(p) == NEXTHOP_REJECT0x02)
2931 kr.flags |= F_REJECT0x0080;
2932 if (prefix_nhflags(p) == NEXTHOP_BLACKHOLE0x04)
2933 kr.flags |= F_BLACKHOLE0x0100;
2934 if (type == IMSG_KROUTE_CHANGE)
2935 memcpy(&kr.nexthop, &prefix_nexthop(p)->true_nexthop,
2936 sizeof(kr.nexthop));
2937 strlcpy(kr.label, rtlabel_id2name(asp->rtlabelid), sizeof(kr.label));
2938
2939 switch (addr.aid) {
2940 case AID_VPN_IPv43:
2941 case AID_VPN_IPv64:
2942 if (!(rib->flags & F_RIB_LOCAL0x0001))
2943 /* not Loc-RIB, no update for VPNs */
2944 break;
2945
2946 SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != (
(void*)0); (vpn) = ((vpn)->entry.sqe_next))
{
2947 if (!rde_l3vpn_import(prefix_communities(p), vpn))
2948 continue;
2949 /* must send exit_nexthop so that correct MPLS tunnel
2950 * is chosen
2951 */
2952 if (type == IMSG_KROUTE_CHANGE)
2953 memcpy(&kr.nexthop,
2954 &prefix_nexthop(p)->exit_nexthop,
2955 sizeof(kr.nexthop));
2956 /* XXX not ideal but this will change */
2957 kr.ifindex = if_nametoindex(vpn->ifmpe);
2958 if (imsg_compose(ibuf_main, type, vpn->rtableid, 0, -1,
2959 &kr, sizeof(kr)) == -1)
2960 fatal("%s %d imsg_compose error", __func__,
2961 __LINE__2961);
2962 }
2963 break;
2964 default:
2965 if (imsg_compose(ibuf_main, type, rib->rtableid, 0, -1,
2966 &kr, sizeof(kr)) == -1)
2967 fatal("%s %d imsg_compose error", __func__, __LINE__2967);
2968 break;
2969 }
2970}
2971
2972/*
2973 * update specific functions
2974 */
2975int
2976rde_evaluate_all(void)
2977{
2978 return rde_eval_all;
2979}
2980
2981static int
2982rde_skip_peer(struct rde_peer *peer, u_int16_t rib_id, u_int8_t aid)
2983{
2984 /* skip ourself */
2985 if (peer == peerself)
2986 return 1;
2987 if (peer->state != PEER_UP)
2988 return 1;
2989 /* skip peers using a different rib */
2990 if (peer->loc_rib_id != rib_id)
2991 return 1;
2992 /* check if peer actually supports the address family */
2993 if (peer->capa.mp[aid] == 0)
2994 return 1;
2995 /* skip peers with special export types */
2996 if (peer->export_type == EXPORT_NONE ||
2997 peer->export_type == EXPORT_DEFAULT_ROUTE)
2998 return 1;
2999
3000 return 0;
3001}
3002
3003void
3004rde_generate_updates(struct rib *rib, struct prefix *new, struct prefix *old,
3005 int eval_all)
3006{
3007 struct rde_peer *peer;
3008 u_int8_t aid;
3009
3010 /*
3011 * If old is != NULL we know it was active and should be removed.
3012 * If new is != NULL we know it is reachable and then we should
3013 * generate an update.
3014 */
3015 if (old == NULL((void*)0) && new == NULL((void*)0))
3016 return;
3017
3018 if (!eval_all && (rib->flags & F_RIB_NOFIB0x0004) == 0)
3019 rde_send_kroute(rib, new, old);
3020
3021 if (new)
3022 aid = new->pt->aid;
3023 else
3024 aid = old->pt->aid;
3025
3026 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3027 if (rde_skip_peer(peer, rib->id, aid))
3028 continue;
3029 /* skip regular peers if the best path didn't change */
3030 if ((peer->flags & PEERFLAG_EVALUATE_ALL0x04) == 0 && eval_all)
3031 continue;
3032
3033 up_generate_updates(out_rules, peer, new, old);
3034 }
3035}
3036
3037/* flush Adj-RIB-Out by withdrawing all prefixes */
3038static void
3039rde_up_flush_upcall(struct prefix *p, void *ptr)
3040{
3041 struct rde_peer *peer = ptr;
3042
3043 up_generate_updates(out_rules, peer, NULL((void*)0), p);
3044}
3045
3046u_char queue_buf[4096];
3047
3048int
3049rde_update_queue_pending(void)
3050{
3051 struct rde_peer *peer;
3052 u_int8_t aid;
3053
3054 if (ibuf_se && ibuf_se->w.queued >= SESS_MSG_HIGH_MARK2000)
3055 return 0;
3056
3057 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3058 if (peer->conf.id == 0)
3059 continue;
3060 if (peer->state != PEER_UP)
3061 continue;
3062 if (peer->throttled)
3063 continue;
3064 for (aid = 0; aid < AID_MAX5; aid++) {
3065 if (!RB_EMPTY(&peer->updates[aid])((&peer->updates[aid])->rbh_root == ((void*)0)) ||
3066 !RB_EMPTY(&peer->withdraws[aid])((&peer->withdraws[aid])->rbh_root == ((void*)0)))
3067 return 1;
3068 }
3069 }
3070 return 0;
3071}
3072
3073void
3074rde_update_queue_runner(void)
3075{
3076 struct rde_peer *peer;
3077 int r, sent, max = RDE_RUNNER_ROUNDS100, eor;
3078 u_int16_t len, wpos;
3079
3080 len = sizeof(queue_buf) - MSGSIZE_HEADER19;
3081 do {
3082 sent = 0;
3083 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3084 if (peer->conf.id == 0)
3085 continue;
3086 if (peer->state != PEER_UP)
3087 continue;
3088 if (peer->throttled)
3089 continue;
3090 eor = 0;
3091 wpos = 0;
3092 /* first withdraws, save 2 bytes for path attributes */
3093 if ((r = up_dump_withdraws(queue_buf, len - 2, peer,
3094 AID_INET1)) == -1)
3095 continue;
3096 wpos += r;
3097
3098 /* now bgp path attributes unless it is the EoR mark */
3099 if (up_is_eor(peer, AID_INET1)) {
3100 eor = 1;
3101 bzero(queue_buf + wpos, 2);
3102 wpos += 2;
3103 } else {
3104 r = up_dump_attrnlri(queue_buf + wpos,
3105 len - wpos, peer);
3106 wpos += r;
3107 }
3108
3109 /* finally send message to SE */
3110 if (wpos > 4) {
3111 if (imsg_compose(ibuf_se, IMSG_UPDATE,
3112 peer->conf.id, 0, -1, queue_buf,
3113 wpos) == -1)
3114 fatal("%s %d imsg_compose error",
3115 __func__, __LINE__3115);
3116 sent++;
3117 }
3118 if (eor) {
3119 int sent_eor = peer->sent_eor & (1 << AID_INET1);
3120 if (peer->capa.grestart.restart && !sent_eor)
3121 rde_peer_send_eor(peer, AID_INET1);
3122 if (peer->capa.enhanced_rr && sent_eor)
3123 rde_peer_send_rrefresh(peer, AID_INET1,
3124 ROUTE_REFRESH_END_RR2);
3125 }
3126 }
3127 max -= sent;
3128 } while (sent != 0 && max > 0);
3129}
3130
3131void
3132rde_update6_queue_runner(u_int8_t aid)
3133{
3134 struct rde_peer *peer;
3135 int r, sent, max = RDE_RUNNER_ROUNDS100 / 2;
3136 u_int16_t len;
3137
3138 /* first withdraws ... */
3139 do {
3140 sent = 0;
3141 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3142 if (peer->conf.id == 0)
3143 continue;
3144 if (peer->state != PEER_UP)
3145 continue;
3146 if (peer->throttled)
3147 continue;
3148 len = sizeof(queue_buf) - MSGSIZE_HEADER19;
3149 r = up_dump_mp_unreach(queue_buf, len, peer, aid);
3150 if (r == -1)
3151 continue;
3152 /* finally send message to SE */
3153 if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3154 0, -1, queue_buf, r) == -1)
3155 fatal("%s %d imsg_compose error", __func__,
3156 __LINE__3156);
3157 sent++;
3158 }
3159 max -= sent;
3160 } while (sent != 0 && max > 0);
3161
3162 /* ... then updates */
3163 max = RDE_RUNNER_ROUNDS100 / 2;
3164 do {
3165 sent = 0;
3166 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3167 if (peer->conf.id == 0)
3168 continue;
3169 if (peer->state != PEER_UP)
3170 continue;
3171 if (peer->throttled)
3172 continue;
3173 len = sizeof(queue_buf) - MSGSIZE_HEADER19;
3174 if (up_is_eor(peer, aid)) {
3175 int sent_eor = peer->sent_eor & (1 << aid);
3176 if (peer->capa.grestart.restart && !sent_eor)
3177 rde_peer_send_eor(peer, aid);
3178 if (peer->capa.enhanced_rr && sent_eor)
3179 rde_peer_send_rrefresh(peer, aid,
3180 ROUTE_REFRESH_END_RR2);
3181 continue;
3182 }
3183 r = up_dump_mp_reach(queue_buf, len, peer, aid);
3184 if (r == 0)
3185 continue;
3186
3187 /* finally send message to SE */
3188 if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3189 0, -1, queue_buf, r) == -1)
3190 fatal("%s %d imsg_compose error", __func__,
3191 __LINE__3191);
3192 sent++;
3193 }
3194 max -= sent;
3195 } while (sent != 0 && max > 0);
3196}
3197
3198/*
3199 * pf table specific functions
3200 */
3201struct rde_pftable_node {
3202 RB_ENTRY(rde_pftable_node)struct { struct rde_pftable_node *rbe_left; struct rde_pftable_node
*rbe_right; struct rde_pftable_node *rbe_parent; int rbe_color
; }
entry;
3203 struct pt_entry *prefix;
3204 int refcnt;
3205 u_int16_t id;
3206};
3207RB_HEAD(rde_pftable_tree, rde_pftable_node)struct rde_pftable_tree { struct rde_pftable_node *rbh_root; };
3208
3209static inline int
3210rde_pftable_cmp(struct rde_pftable_node *a, struct rde_pftable_node *b)
3211{
3212 if (a->prefix > b->prefix)
3213 return 1;
3214 if (a->prefix < b->prefix)
3215 return -1;
3216 return (a->id - b->id);
3217}
3218
3219RB_GENERATE_STATIC(rde_pftable_tree, rde_pftable_node, entry, rde_pftable_cmp)__attribute__((__unused__)) static void rde_pftable_tree_RB_INSERT_COLOR
(struct rde_pftable_tree *head, struct rde_pftable_node *elm)
{ struct rde_pftable_node *parent, *gparent, *tmp; while ((parent
= (elm)->entry.rbe_parent) && (parent)->entry.
rbe_color == 1) { gparent = (parent)->entry.rbe_parent; if
(parent == (gparent)->entry.rbe_left) { tmp = (gparent)->
entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } __attribute__((__unused__)) static void
rde_pftable_tree_RB_REMOVE_COLOR(struct rde_pftable_tree *head
, struct rde_pftable_node *parent, struct rde_pftable_node *elm
) { struct rde_pftable_node *tmp; while ((elm == ((void*)0) ||
(elm)->entry.rbe_color == 0) && elm != (head)->
rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = (
parent)->entry.rbe_right; if ((tmp)->entry.rbe_color ==
1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.
rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right
; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->
entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right)->
entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm =
parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp
)->entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right
)->entry.rbe_color == 0) { struct rde_pftable_node *oleft;
if ((oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color
= 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)->
entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry
.rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent
= (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent
= (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry
.rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent)
->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent
)->entry.rbe_right = (oleft); } else (head)->rbh_root =
(oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry
.rbe_parent = (oleft); do {} while (0); if (((oleft)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry
.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry
.rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color =
0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)->
entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry
.rbe_left)->entry.rbe_parent = (parent); } do {} while (0)
; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); elm = (head)->rbh_root; break; } } else {
tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color
== 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry
.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.
rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.
rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent
= (parent); } do {} while (0); if (((tmp)->entry.rbe_parent
= (parent)->entry.rbe_parent)) { if ((parent) == ((parent
)->entry.rbe_parent)->entry.rbe_left) ((parent)->entry
.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry
.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->entry.rbe_right = (parent); (parent)->
entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void*)0)
|| ((tmp)->entry.rbe_left)->entry.rbe_color == 0) &&
((tmp)->entry.rbe_right == ((void*)0) || ((tmp)->entry
.rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color
= 1; elm = parent; parent = (elm)->entry.rbe_parent; } else
{ if ((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry
.rbe_left)->entry.rbe_color == 0) { struct rde_pftable_node
*oright; if ((oright = (tmp)->entry.rbe_right)) (oright)->
entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright
) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right
= (oright)->entry.rbe_left)) { ((oright)->entry.rbe_left
)->entry.rbe_parent = (tmp); } do {} while (0); if (((oright
)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((
tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp
)->entry.rbe_parent)->entry.rbe_left = (oright); else (
(tmp)->entry.rbe_parent)->entry.rbe_right = (oright); }
else (head)->rbh_root = (oright); (oright)->entry.rbe_left
= (tmp); (tmp)->entry.rbe_parent = (oright); do {} while (
0); if (((oright)->entry.rbe_parent)) do {} while (0); } while
(0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left
; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } } if (elm) (elm)->entry.rbe_color = 0; } __attribute__
((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_REMOVE
(struct rde_pftable_tree *head, struct rde_pftable_node *elm)
{ struct rde_pftable_node *child, *parent, *old = elm; int color
; if ((elm)->entry.rbe_left == ((void*)0)) child = (elm)->
entry.rbe_right; else if ((elm)->entry.rbe_right == ((void
*)0)) child = (elm)->entry.rbe_left; else { struct rde_pftable_node
*left; elm = (elm)->entry.rbe_right; while ((left = (elm)
->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right
; parent = (elm)->entry.rbe_parent; color = (elm)->entry
.rbe_color; if (child) (child)->entry.rbe_parent = parent;
if (parent) { if ((parent)->entry.rbe_left == elm) (parent
)->entry.rbe_left = child; else (parent)->entry.rbe_right
= child; do {} while (0); } else (head)->rbh_root = child
; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->
entry = (old)->entry; if ((old)->entry.rbe_parent) { if
(((old)->entry.rbe_parent)->entry.rbe_left == old) ((old
)->entry.rbe_parent)->entry.rbe_left = elm; else ((old)
->entry.rbe_parent)->entry.rbe_right = elm; do {} while
(0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left
)->entry.rbe_parent = elm; if ((old)->entry.rbe_right) (
(old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent
) { left = parent; do { do {} while (0); } while ((left = (left
)->entry.rbe_parent)); } goto color; } parent = (elm)->
entry.rbe_parent; color = (elm)->entry.rbe_color; if (child
) (child)->entry.rbe_parent = parent; if (parent) { if ((parent
)->entry.rbe_left == elm) (parent)->entry.rbe_left = child
; else (parent)->entry.rbe_right = child; do {} while (0);
} else (head)->rbh_root = child; color: if (color == 0) rde_pftable_tree_RB_REMOVE_COLOR
(head, parent, child); return (old); } __attribute__((__unused__
)) static struct rde_pftable_node * rde_pftable_tree_RB_INSERT
(struct rde_pftable_tree *head, struct rde_pftable_node *elm)
{ struct rde_pftable_node *tmp; struct rde_pftable_node *parent
= ((void*)0); int comp = 0; tmp = (head)->rbh_root; while
(tmp) { parent = tmp; comp = (rde_pftable_cmp)(elm, parent);
if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp
> 0) tmp = (tmp)->entry.rbe_right; else return (tmp); }
do { (elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left
= (elm)->entry.rbe_right = ((void*)0); (elm)->entry.rbe_color
= 1; } while (0); if (parent != ((void*)0)) { if (comp < 0
) (parent)->entry.rbe_left = elm; else (parent)->entry.
rbe_right = elm; do {} while (0); } else (head)->rbh_root =
elm; rde_pftable_tree_RB_INSERT_COLOR(head, elm); return (((
void*)0)); } __attribute__((__unused__)) static struct rde_pftable_node
* rde_pftable_tree_RB_FIND(struct rde_pftable_tree *head, struct
rde_pftable_node *elm) { struct rde_pftable_node *tmp = (head
)->rbh_root; int comp; while (tmp) { comp = rde_pftable_cmp
(elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else
if (comp > 0) tmp = (tmp)->entry.rbe_right; else return
(tmp); } return (((void*)0)); } __attribute__((__unused__)) static
struct rde_pftable_node * rde_pftable_tree_RB_NFIND(struct rde_pftable_tree
*head, struct rde_pftable_node *elm) { struct rde_pftable_node
*tmp = (head)->rbh_root; struct rde_pftable_node *res = (
(void*)0); int comp; while (tmp) { comp = rde_pftable_cmp(elm
, tmp); if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left
; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else
return (tmp); } return (res); } __attribute__((__unused__)) static
struct rde_pftable_node * rde_pftable_tree_RB_NEXT(struct rde_pftable_node
*elm) { if ((elm)->entry.rbe_right) { elm = (elm)->entry
.rbe_right; while ((elm)->entry.rbe_left) elm = (elm)->
entry.rbe_left; } else { if ((elm)->entry.rbe_parent &&
(elm == ((elm)->entry.rbe_parent)->entry.rbe_left)) elm
= (elm)->entry.rbe_parent; else { while ((elm)->entry.
rbe_parent && (elm == ((elm)->entry.rbe_parent)->
entry.rbe_right)) elm = (elm)->entry.rbe_parent; elm = (elm
)->entry.rbe_parent; } } return (elm); } __attribute__((__unused__
)) static struct rde_pftable_node * rde_pftable_tree_RB_PREV(
struct rde_pftable_node *elm) { if ((elm)->entry.rbe_left)
{ elm = (elm)->entry.rbe_left; while ((elm)->entry.rbe_right
) elm = (elm)->entry.rbe_right; } else { if ((elm)->entry
.rbe_parent && (elm == ((elm)->entry.rbe_parent)->
entry.rbe_right)) elm = (elm)->entry.rbe_parent; else { while
((elm)->entry.rbe_parent && (elm == ((elm)->entry
.rbe_parent)->entry.rbe_left)) elm = (elm)->entry.rbe_parent
; elm = (elm)->entry.rbe_parent; } } return (elm); } __attribute__
((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_MINMAX
(struct rde_pftable_tree *head, int val) { struct rde_pftable_node
*tmp = (head)->rbh_root; struct rde_pftable_node *parent =
((void*)0); while (tmp) { parent = tmp; if (val < 0) tmp =
(tmp)->entry.rbe_left; else tmp = (tmp)->entry.rbe_right
; } return (parent); }
;
3220
3221struct rde_pftable_tree pftable_tree = RB_INITIALIZER(&pftable_tree){ ((void*)0) };
3222int need_commit;
3223
3224static void
3225rde_pftable_send(u_int16_t id, struct pt_entry *pt, int del)
3226{
3227 struct pftable_msg pfm;
3228
3229 if (id == 0)
3230 return;
3231
3232 /* do not run while cleaning up */
3233 if (rde_quit)
3234 return;
3235
3236 bzero(&pfm, sizeof(pfm));
3237 strlcpy(pfm.pftable, pftable_id2name(id), sizeof(pfm.pftable));
3238 pt_getaddr(pt, &pfm.addr);
3239 pfm.len = pt->prefixlen;
3240
3241 if (imsg_compose(ibuf_main,
3242 del ? IMSG_PFTABLE_REMOVE : IMSG_PFTABLE_ADD,
3243 0, 0, -1, &pfm, sizeof(pfm)) == -1)
3244 fatal("%s %d imsg_compose error", __func__, __LINE__3244);
3245
3246 need_commit = 1;
3247}
3248
3249void
3250rde_pftable_add(u_int16_t id, struct prefix *p)
3251{
3252 struct rde_pftable_node *pfn, node;
3253
3254 memset(&node, 0, sizeof(node));
3255 node.prefix = p->pt;
3256 node.id = id;
3257
3258 pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node)rde_pftable_tree_RB_FIND(&pftable_tree, &node);
3259 if (pfn == NULL((void*)0)) {
3260 if ((pfn = calloc(1, sizeof(*pfn))) == NULL((void*)0))
3261 fatal("%s", __func__);
3262 pfn->prefix = pt_ref(p->pt);
3263 pfn->id = id;
3264
3265 if (RB_INSERT(rde_pftable_tree, &pftable_tree, pfn)rde_pftable_tree_RB_INSERT(&pftable_tree, pfn) != NULL((void*)0))
3266 fatalx("%s: tree corrupt", __func__);
3267
3268 rde_pftable_send(id, p->pt, 0);
3269 }
3270 pfn->refcnt++;
3271}
3272
3273void
3274rde_pftable_del(u_int16_t id, struct prefix *p)
3275{
3276 struct rde_pftable_node *pfn, node;
3277
3278 memset(&node, 0, sizeof(node));
3279 node.prefix = p->pt;
3280 node.id = id;
3281
3282 pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node)rde_pftable_tree_RB_FIND(&pftable_tree, &node);
3283 if (pfn == NULL((void*)0))
3284 return;
3285
3286 if (--pfn->refcnt <= 0) {
3287 rde_pftable_send(id, p->pt, 1);
3288
3289 if (RB_REMOVE(rde_pftable_tree, &pftable_tree, pfn)rde_pftable_tree_RB_REMOVE(&pftable_tree, pfn) == NULL((void*)0))
3290 fatalx("%s: tree corrupt", __func__);
3291
3292 pt_unref(pfn->prefix);
3293 free(pfn);
3294 }
3295}
3296
3297void
3298rde_commit_pftable(void)
3299{
3300 /* do not run while cleaning up */
3301 if (rde_quit)
3302 return;
3303
3304 if (!need_commit)
3305 return;
3306
3307 if (imsg_compose(ibuf_main, IMSG_PFTABLE_COMMIT, 0, 0, -1, NULL((void*)0), 0) ==
3308 -1)
3309 fatal("%s %d imsg_compose error", __func__, __LINE__3309);
3310
3311 need_commit = 0;
3312}
3313
3314/*
3315 * nexthop specific functions
3316 */
3317void
3318rde_send_nexthop(struct bgpd_addr *next, int insert)
3319{
3320 int type;
3321
3322 if (insert)
3323 type = IMSG_NEXTHOP_ADD;
3324 else
3325 type = IMSG_NEXTHOP_REMOVE;
3326
3327 if (imsg_compose(ibuf_main, type, 0, 0, -1, next,
3328 sizeof(struct bgpd_addr)) == -1)
3329 fatal("%s %d imsg_compose error", __func__, __LINE__3329);
3330}
3331
3332/*
3333 * soft reconfig specific functions
3334 */
3335void
3336rde_reload_done(void)
3337{
3338 struct rde_peer *peer;
3339 struct filter_head *fh;
3340 struct rde_prefixset_head prefixsets_old;
3341 struct rde_prefixset_head originsets_old;
3342 struct as_set_head as_sets_old;
3343 u_int16_t rid;
3344 int reload = 0;
3345
3346 softreconfig = 0;
3347
3348 SIMPLEQ_INIT(&prefixsets_old)do { (&prefixsets_old)->sqh_first = ((void*)0); (&
prefixsets_old)->sqh_last = &(&prefixsets_old)->
sqh_first; } while (0)
;
3349 SIMPLEQ_INIT(&originsets_old)do { (&originsets_old)->sqh_first = ((void*)0); (&
originsets_old)->sqh_last = &(&originsets_old)->
sqh_first; } while (0)
;
3350 SIMPLEQ_INIT(&as_sets_old)do { (&as_sets_old)->sqh_first = ((void*)0); (&as_sets_old
)->sqh_last = &(&as_sets_old)->sqh_first; } while
(0)
;
3351 SIMPLEQ_CONCAT(&prefixsets_old, &conf->rde_prefixsets)do { if (!((((&conf->rde_prefixsets))->sqh_first) ==
((void*)0))) { *(&prefixsets_old)->sqh_last = (&conf
->rde_prefixsets)->sqh_first; (&prefixsets_old)->
sqh_last = (&conf->rde_prefixsets)->sqh_last; do { (
(&conf->rde_prefixsets))->sqh_first = ((void*)0); (
(&conf->rde_prefixsets))->sqh_last = &((&conf
->rde_prefixsets))->sqh_first; } while (0); } } while (
0)
;
3352 SIMPLEQ_CONCAT(&originsets_old, &conf->rde_originsets)do { if (!((((&conf->rde_originsets))->sqh_first) ==
((void*)0))) { *(&originsets_old)->sqh_last = (&conf
->rde_originsets)->sqh_first; (&originsets_old)->
sqh_last = (&conf->rde_originsets)->sqh_last; do { (
(&conf->rde_originsets))->sqh_first = ((void*)0); (
(&conf->rde_originsets))->sqh_last = &((&conf
->rde_originsets))->sqh_first; } while (0); } } while (
0)
;
3353 SIMPLEQ_CONCAT(&as_sets_old, &conf->as_sets)do { if (!((((&conf->as_sets))->sqh_first) == ((void
*)0))) { *(&as_sets_old)->sqh_last = (&conf->as_sets
)->sqh_first; (&as_sets_old)->sqh_last = (&conf
->as_sets)->sqh_last; do { ((&conf->as_sets))->
sqh_first = ((void*)0); ((&conf->as_sets))->sqh_last
= &((&conf->as_sets))->sqh_first; } while (0);
} } while (0)
;
3354
3355 /* merge the main config */
3356 copy_config(conf, nconf);
3357
3358 /* need to copy the sets and roa table and clear them in nconf */
3359 SIMPLEQ_CONCAT(&conf->rde_prefixsets, &nconf->rde_prefixsets)do { if (!((((&nconf->rde_prefixsets))->sqh_first) ==
((void*)0))) { *(&conf->rde_prefixsets)->sqh_last =
(&nconf->rde_prefixsets)->sqh_first; (&conf->
rde_prefixsets)->sqh_last = (&nconf->rde_prefixsets
)->sqh_last; do { ((&nconf->rde_prefixsets))->sqh_first
= ((void*)0); ((&nconf->rde_prefixsets))->sqh_last
= &((&nconf->rde_prefixsets))->sqh_first; } while
(0); } } while (0)
;
3360 SIMPLEQ_CONCAT(&conf->rde_originsets, &nconf->rde_originsets)do { if (!((((&nconf->rde_originsets))->sqh_first) ==
((void*)0))) { *(&conf->rde_originsets)->sqh_last =
(&nconf->rde_originsets)->sqh_first; (&conf->
rde_originsets)->sqh_last = (&nconf->rde_originsets
)->sqh_last; do { ((&nconf->rde_originsets))->sqh_first
= ((void*)0); ((&nconf->rde_originsets))->sqh_last
= &((&nconf->rde_originsets))->sqh_first; } while
(0); } } while (0)
;
3361 SIMPLEQ_CONCAT(&conf->as_sets, &nconf->as_sets)do { if (!((((&nconf->as_sets))->sqh_first) == ((void
*)0))) { *(&conf->as_sets)->sqh_last = (&nconf->
as_sets)->sqh_first; (&conf->as_sets)->sqh_last =
(&nconf->as_sets)->sqh_last; do { ((&nconf->
as_sets))->sqh_first = ((void*)0); ((&nconf->as_sets
))->sqh_last = &((&nconf->as_sets))->sqh_first
; } while (0); } } while (0)
;
3362
3363 /* apply new set of l3vpn, sync will be done later */
3364 free_l3vpns(&conf->l3vpns);
3365 SIMPLEQ_CONCAT(&conf->l3vpns, &nconf->l3vpns)do { if (!((((&nconf->l3vpns))->sqh_first) == ((void
*)0))) { *(&conf->l3vpns)->sqh_last = (&nconf->
l3vpns)->sqh_first; (&conf->l3vpns)->sqh_last = (
&nconf->l3vpns)->sqh_last; do { ((&nconf->l3vpns
))->sqh_first = ((void*)0); ((&nconf->l3vpns))->
sqh_last = &((&nconf->l3vpns))->sqh_first; } while
(0); } } while (0)
;
3366 /* XXX WHERE IS THE SYNC ??? */
3367
3368 free_config(nconf);
3369 nconf = NULL((void*)0);
3370
3371 /* sync peerself with conf */
3372 peerself->remote_bgpid = ntohl(conf->bgpid)(__uint32_t)(__builtin_constant_p(conf->bgpid) ? (__uint32_t
)(((__uint32_t)(conf->bgpid) & 0xff) << 24 | ((__uint32_t
)(conf->bgpid) & 0xff00) << 8 | ((__uint32_t)(conf
->bgpid) & 0xff0000) >> 8 | ((__uint32_t)(conf->
bgpid) & 0xff000000) >> 24) : __swap32md(conf->bgpid
))
;
3373 peerself->conf.local_as = conf->as;
3374 peerself->conf.remote_as = conf->as;
3375 peerself->conf.remote_addr.aid = AID_INET1;
3376 peerself->conf.remote_addr.v4ba.v4.s_addr = conf->bgpid;
3377 peerself->conf.remote_masklen = 32;
3378 peerself->short_as = conf->short_as;
3379
3380 rde_mark_prefixsets_dirty(&prefixsets_old, &conf->rde_prefixsets);
3381 rde_mark_prefixsets_dirty(&originsets_old, &conf->rde_originsets);
3382 as_sets_mark_dirty(&as_sets_old, &conf->as_sets);
3383
3384 /*
3385 * make the new filter rules the active one but keep the old for
3386 * softrconfig. This is needed so that changes happening are using
3387 * the right filters.
3388 */
3389 fh = out_rules;
3390 out_rules = out_rules_tmp;
3391 out_rules_tmp = fh;
3392
3393 rde_filter_calc_skip_steps(out_rules);
3394
3395 /* make sure that rde_eval_all is correctly set after a config change */
3396 rde_eval_all = 0;
3397
3398 /* check if filter changed */
3399 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3400 if (peer->conf.id == 0) /* ignore peerself*/
3401 continue;
3402 peer->reconf_out = 0;
3403 peer->reconf_rib = 0;
3404 if (peer->export_type != peer->conf.export_type) {
3405 log_peer_info(&peer->conf, "export type change, "
3406 "reloading");
3407 peer->reconf_rib = 1;
3408 }
3409 if ((peer->flags & PEERFLAG_EVALUATE_ALL0x04) !=
3410 (peer->conf.flags & PEERFLAG_EVALUATE_ALL0x04)) {
3411 log_peer_info(&peer->conf, "rde evaluate change, "
3412 "reloading");
3413 peer->reconf_rib = 1;
3414 }
3415 if ((peer->flags & PEERFLAG_TRANS_AS0x01) !=
3416 (peer->conf.flags & PEERFLAG_TRANS_AS0x01)) {
3417 log_peer_info(&peer->conf, "transparent-as change, "
3418 "reloading");
3419 peer->reconf_rib = 1;
3420 }
3421 if (peer->loc_rib_id != rib_find(peer->conf.rib)) {
3422 log_peer_info(&peer->conf, "rib change, reloading");
3423 peer->loc_rib_id = rib_find(peer->conf.rib);
3424 if (peer->loc_rib_id == RIB_NOTFOUND0xffff)
3425 fatalx("King Bula's peer met an unknown RIB");
3426 peer->reconf_rib = 1;
3427 }
3428 peer->export_type = peer->conf.export_type;
3429 peer->flags = peer->conf.flags;
3430 if (peer->flags & PEERFLAG_EVALUATE_ALL0x04)
3431 rde_eval_all = 1;
3432
3433 if (peer->reconf_rib) {
3434 if (prefix_dump_new(peer, AID_UNSPEC0,
3435 RDE_RUNNER_ROUNDS100, peer, rde_up_flush_upcall,
3436 rde_softreconfig_in_done, NULL((void*)0)) == -1)
3437 fatal("%s: prefix_dump_new", __func__);
3438 log_peer_info(&peer->conf, "flushing Adj-RIB-Out");
3439 softreconfig++; /* account for the running flush */
3440 continue;
3441 }
3442 if (!rde_filter_equal(out_rules, out_rules_tmp, peer)) {
3443 char *p = log_fmt_peer(&peer->conf);
3444 log_debug("out filter change: reloading peer %s", p);
3445 free(p);
3446 peer->reconf_out = 1;
3447 }
3448 }
3449
3450 /* bring ribs in sync */
3451 for (rid = 0; rid < rib_size; rid++) {
3452 struct rib *rib = rib_byid(rid);
3453 if (rib == NULL((void*)0))
3454 continue;
3455 rde_filter_calc_skip_steps(rib->in_rules_tmp);
3456
3457 /* flip rules, make new active */
3458 fh = rib->in_rules;
3459 rib->in_rules = rib->in_rules_tmp;
3460 rib->in_rules_tmp = fh;
3461
3462 switch (rib->state) {
3463 case RECONF_DELETE:
3464 rib_free(rib);
3465 break;
3466 case RECONF_RELOAD:
3467 rib_update(rib);
3468 rib->state = RECONF_KEEP;
3469 /* FALLTHROUGH */
3470 case RECONF_KEEP:
3471 if (rde_filter_equal(rib->in_rules,
3472 rib->in_rules_tmp, NULL((void*)0)))
3473 /* rib is in sync */
3474 break;
3475 log_debug("in filter change: reloading RIB %s",
3476 rib->name);
3477 rib->state = RECONF_RELOAD;
3478 reload++;
3479 break;
3480 case RECONF_REINIT:
3481 /* new rib */
3482 rib->state = RECONF_RELOAD;
3483 reload++;
3484 break;
3485 case RECONF_NONE:
3486 break;
3487 }
3488 filterlist_free(rib->in_rules_tmp);
3489 rib->in_rules_tmp = NULL((void*)0);
3490 }
3491
3492 filterlist_free(out_rules_tmp);
3493 out_rules_tmp = NULL((void*)0);
3494 /* old filters removed, free all sets */
3495 free_rde_prefixsets(&prefixsets_old);
3496 free_rde_prefixsets(&originsets_old);
3497 as_sets_free(&as_sets_old);
3498
3499 log_info("RDE reconfigured");
3500
3501 softreconfig++;
3502 if (reload > 0) {
3503 if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0, RDE_RUNNER_ROUNDS100,
3504 NULL((void*)0), rde_softreconfig_in, rde_softreconfig_in_done,
3505 NULL((void*)0)) == -1)
3506 fatal("%s: rib_dump_new", __func__);
3507 log_info("running softreconfig in");
3508 } else {
3509 rde_softreconfig_in_done((void *)1, AID_UNSPEC0);
3510 }
3511}
3512
3513static void
3514rde_softreconfig_in_done(void *arg, u_int8_t dummy)
3515{
3516 struct rde_peer *peer;
3517 u_int16_t i;
3518
3519 softreconfig--;
3520 /* one guy done but other dumps are still running */
3521 if (softreconfig > 0)
3522 return;
3523
3524 if (arg == NULL((void*)0))
3525 log_info("softreconfig in done");
3526
3527 /* now do the Adj-RIB-Out sync and a possible FIB sync */
3528 softreconfig = 0;
3529 for (i = 0; i < rib_size; i++) {
3530 struct rib *rib = rib_byid(i);
3531 if (rib == NULL((void*)0))
3532 continue;
3533 rib->state = RECONF_NONE;
3534 if (rib->fibstate == RECONF_RELOAD) {
3535 if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100,
3536 rib, rde_softreconfig_sync_fib,
3537 rde_softreconfig_sync_done, NULL((void*)0)) == -1)
3538 fatal("%s: rib_dump_new", __func__);
3539 softreconfig++;
3540 log_info("starting fib sync for rib %s",
3541 rib->name);
3542 } else if (rib->fibstate == RECONF_REINIT) {
3543 if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100,
3544 rib, rde_softreconfig_sync_reeval,
3545 rde_softreconfig_sync_done, NULL((void*)0)) == -1)
3546 fatal("%s: rib_dump_new", __func__);
3547 softreconfig++;
3548 log_info("starting re-evaluation of rib %s",
3549 rib->name);
3550 }
3551 }
3552
3553 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3554 u_int8_t aid;
3555
3556 if (peer->reconf_out) {
3557 if (peer->export_type == EXPORT_NONE) {
3558 /* nothing to do here */
3559 peer->reconf_out = 0;
3560 } else if (peer->export_type == EXPORT_DEFAULT_ROUTE) {
3561 /* just resend the default route */
3562 for (aid = 0; aid < AID_MAX5; aid++) {
3563 if (peer->capa.mp[aid])
3564 up_generate_default(out_rules,
3565 peer, aid);
3566 }
3567 peer->reconf_out = 0;
3568 } else
3569 rib_byid(peer->loc_rib_id)->state =
3570 RECONF_RELOAD;
3571 } else if (peer->reconf_rib) {
3572 /* dump the full table to neighbors that changed rib */
3573 for (aid = 0; aid < AID_MAX5; aid++) {
3574 if (peer->capa.mp[aid])
3575 peer_dump(peer, aid);
3576 }
3577 }
3578 }
3579
3580 for (i = 0; i < rib_size; i++) {
3581 struct rib *rib = rib_byid(i);
3582 if (rib == NULL((void*)0))
3583 continue;
3584 if (rib->state == RECONF_RELOAD) {
3585 if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100,
3586 rib, rde_softreconfig_out,
3587 rde_softreconfig_out_done, NULL((void*)0)) == -1)
3588 fatal("%s: rib_dump_new", __func__);
3589 softreconfig++;
3590 log_info("starting softreconfig out for rib %s",
3591 rib->name);
3592 }
3593 }
3594
3595 /* if nothing to do move to last stage */
3596 if (softreconfig == 0)
3597 rde_softreconfig_done();
3598}
3599
3600static void
3601rde_softreconfig_out_done(void *arg, u_int8_t aid)
3602{
3603 struct rib *rib = arg;
3604
3605 /* this RIB dump is done */
3606 log_info("softreconfig out done for %s", rib->name);
3607
3608 /* check if other dumps are still running */
3609 if (--softreconfig == 0)
3610 rde_softreconfig_done();
3611}
3612
3613static void
3614rde_softreconfig_done(void)
3615{
3616 u_int16_t i;
3617
3618 for (i = 0; i < rib_size; i++) {
3619 struct rib *rib = rib_byid(i);
3620 if (rib == NULL((void*)0))
3621 continue;
3622 rib->state = RECONF_NONE;
3623 }
3624
3625 log_info("RDE soft reconfiguration done");
3626 imsg_compose(ibuf_main, IMSG_RECONF_DONE, 0, 0,
3627 -1, NULL((void*)0), 0);
3628}
3629
3630static void
3631rde_softreconfig_in(struct rib_entry *re, void *bula)
3632{
3633 struct filterstate state;
3634 struct rib *rib;
3635 struct prefix *p;
3636 struct pt_entry *pt;
3637 struct rde_peer *peer;
3638 struct rde_aspath *asp;
3639 enum filter_actions action;
3640 struct bgpd_addr prefix;
3641 u_int16_t i;
3642
3643 pt = re->prefix;
3644 pt_getaddr(pt, &prefix);
3645 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
{
3646 asp = prefix_aspath(p);
3647 peer = prefix_peer(p);
3648
3649 /* skip announced networks, they are never filtered */
3650 if (asp->flags & F_PREFIX_ANNOUNCED0x00400)
3651 continue;
3652
3653 for (i = RIB_LOC_START1; i < rib_size; i++) {
3654 rib = rib_byid(i);
3655 if (rib == NULL((void*)0))
3656 continue;
3657
3658 if (rib->state != RECONF_RELOAD)
3659 continue;
3660
3661 rde_filterstate_prep(&state, asp, prefix_communities(p),
3662 prefix_nexthop(p), prefix_nhflags(p));
3663 action = rde_filter(rib->in_rules, peer, peer, &prefix,
3664 pt->prefixlen, p->validation_state, &state);
3665
3666 if (action == ACTION_ALLOW) {
3667 /* update Local-RIB */
3668 prefix_update(rib, peer, p->path_id, &state,
3669 &prefix, pt->prefixlen,
3670 p->validation_state);
3671 } else if (action == ACTION_DENY) {
3672 /* remove from Local-RIB */
3673 prefix_withdraw(rib, peer, p->path_id, &prefix,
3674 pt->prefixlen);
3675 }
3676
3677 rde_filterstate_clean(&state);
3678 }
3679 }
3680}
3681
3682static void
3683rde_softreconfig_out(struct rib_entry *re, void *bula)
3684{
3685 struct prefix *p = re->active;
3686 struct rde_peer *peer;
3687 u_int8_t aid = re->prefix->aid;
3688
3689 if (p == NULL((void*)0))
3690 /* no valid path for prefix */
3691 return;
3692
3693 LIST_FOREACH(peer, &peerlist, peer_l)for((peer) = ((&peerlist)->lh_first); (peer)!= ((void*
)0); (peer) = ((peer)->peer_l.le_next))
{
3694 if (rde_skip_peer(peer, re->rib_id, aid))
3695 continue;
3696 /* skip peers which don't need to reconfigure */
3697 if (peer->reconf_out == 0)
3698 continue;
3699
3700 /* Regenerate all updates. */
3701 up_generate_updates(out_rules, peer, p, p);
3702 }
3703}
3704
3705static void
3706rde_softreconfig_sync_reeval(struct rib_entry *re, void *arg)
3707{
3708 struct prefix_list prefixes;
3709 struct prefix *p, *next;
3710 struct rib *rib = arg;
3711
3712 if (rib->flags & F_RIB_NOEVALUATE0x0002) {
3713 /*
3714 * evaluation process is turned off
3715 * so remove all prefixes from adj-rib-out
3716 * also unlink nexthop if it was linked
3717 */
3718 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
{
3719 if (p->flags & PREFIX_NEXTHOP_LINKED0x40)
3720 nexthop_unlink(p);
3721 }
3722 if (re->active) {
3723 rde_generate_updates(rib, NULL((void*)0), re->active, 0);
3724 re->active = NULL((void*)0);
3725 }
3726 return;
3727 }
3728
3729 /* evaluation process is turned on, so evaluate all prefixes again */
3730 re->active = NULL((void*)0);
3731 prefixes = re->prefix_h;
3732 LIST_INIT(&re->prefix_h)do { ((&re->prefix_h)->lh_first) = ((void*)0); } while
(0)
;
3733
3734 LIST_FOREACH_SAFE(p, &prefixes, entry.list.rib, next)for ((p) = ((&prefixes)->lh_first); (p) && ((next
) = ((p)->entry.list.rib.le_next), 1); (p) = (next))
{
3735 /* need to re-link the nexthop if not already linked */
3736 if ((p->flags & PREFIX_NEXTHOP_LINKED0x40) == 0)
3737 nexthop_link(p);
3738 prefix_evaluate(re, p, p);
3739 }
3740}
3741
3742static void
3743rde_softreconfig_sync_fib(struct rib_entry *re, void *bula)
3744{
3745 if (re->active)
3746 rde_send_kroute(re_rib(re), re->active, NULL((void*)0));
3747}
3748
3749static void
3750rde_softreconfig_sync_done(void *arg, u_int8_t aid)
3751{
3752 struct rib *rib = arg;
3753
3754 /* this RIB dump is done */
3755 if (rib->fibstate == RECONF_RELOAD)
3756 log_info("fib sync done for %s", rib->name);
3757 else
3758 log_info("re-evaluation done for %s", rib->name);
3759 rib->fibstate = RECONF_NONE;
3760
3761 /* check if other dumps are still running */
3762 if (--softreconfig == 0)
3763 rde_softreconfig_done();
3764}
3765
3766/*
3767 * ROA specific functions. The roa set is updated independent of the config
3768 * so this runs outside of the softreconfig handlers.
3769 */
3770static void
3771rde_roa_softreload(struct rib_entry *re, void *bula)
3772{
3773 struct filterstate state;
3774 struct rib *rib;
3775 struct prefix *p;
3776 struct pt_entry *pt;
3777 struct rde_peer *peer;
3778 struct rde_aspath *asp;
3779 enum filter_actions action;
3780 struct bgpd_addr prefix;
3781 u_int8_t vstate;
3782 u_int16_t i;
3783
3784 pt = re->prefix;
3785 pt_getaddr(pt, &prefix);
3786 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
{
3787 asp = prefix_aspath(p);
3788 peer = prefix_peer(p);
3789
3790 /* ROA validation state update */
3791 vstate = rde_roa_validity(&rde_roa,
3792 &prefix, pt->prefixlen, aspath_origin(asp->aspath));
3793 if (vstate == p->validation_state)
3794 continue;
3795 p->validation_state = vstate;
3796
3797 /* skip announced networks, they are never filtered */
3798 if (asp->flags & F_PREFIX_ANNOUNCED0x00400)
3799 continue;
3800
3801 for (i = RIB_LOC_START1; i < rib_size; i++) {
3802 rib = rib_byid(i);
3803 if (rib == NULL((void*)0))
3804 continue;
3805
3806 rde_filterstate_prep(&state, asp, prefix_communities(p),
3807 prefix_nexthop(p), prefix_nhflags(p));
3808 action = rde_filter(rib->in_rules, peer, peer, &prefix,
3809 pt->prefixlen, p->validation_state, &state);
3810
3811 if (action == ACTION_ALLOW) {
3812 /* update Local-RIB */
3813 prefix_update(rib, peer, p->path_id, &state,
3814 &prefix, pt->prefixlen,
3815 p->validation_state);
3816 } else if (action == ACTION_DENY) {
3817 /* remove from Local-RIB */
3818 prefix_withdraw(rib, peer, p->path_id, &prefix,
3819 pt->prefixlen);
3820 }
3821
3822 rde_filterstate_clean(&state);
3823 }
3824 }
3825}
3826
3827static void
3828rde_roa_softreload_done(void *arg, u_int8_t aid)
3829{
3830 /* the roa update is done */
3831 log_info("ROA softreload done");
3832}
3833
3834static void
3835rde_roa_reload(void)
3836{
3837 struct rde_prefixset roa_old;
3838
3839 roa_old = rde_roa;
3840 rde_roa = roa_new;
3841 memset(&roa_new, 0, sizeof(roa_new));
3842
3843 /* check if roa changed */
3844 if (trie_equal(&rde_roa.th, &roa_old.th)) {
3845 rde_roa.lastchange = roa_old.lastchange;
3846 trie_free(&roa_old.th); /* old roa no longer needed */
3847 return;
3848 }
3849
3850 rde_roa.lastchange = getmonotime();
3851 trie_free(&roa_old.th); /* old roa no longer needed */
3852
3853 log_debug("ROA change: reloading Adj-RIB-In");
3854 if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0, RDE_RUNNER_ROUNDS100,
3855 rib_byid(RIB_ADJ_IN0), rde_roa_softreload,
3856 rde_roa_softreload_done, NULL((void*)0)) == -1)
3857 fatal("%s: rib_dump_new", __func__);
3858}
3859
3860/*
3861 * generic helper function
3862 */
3863u_int32_t
3864rde_local_as(void)
3865{
3866 return (conf->as);
3867}
3868
3869int
3870rde_decisionflags(void)
3871{
3872 return (conf->flags & BGPD_FLAG_DECISION_MASK0x0f00);
3873}
3874
3875/* End-of-RIB marker, RFC 4724 */
3876static void
3877rde_peer_recv_eor(struct rde_peer *peer, u_int8_t aid)
3878{
3879 peer->prefix_rcvd_eor++;
3880 peer->recv_eor |= 1 << aid;
3881
3882 /*
3883 * First notify SE to avert a possible race with the restart timeout.
3884 * If the timeout fires before this imsg is processed by the SE it will
3885 * result in the same operation since the timeout issues a FLUSH which
3886 * does the same as the RESTARTED action (flushing stale routes).
3887 * The logic in the SE is so that only one of FLUSH or RESTARTED will
3888 * be sent back to the RDE and so peer_flush is only called once.
3889 */
3890 if (imsg_compose(ibuf_se, IMSG_SESSION_RESTARTED, peer->conf.id,
3891 0, -1, &aid, sizeof(aid)) == -1)
3892 fatal("imsg_compose error while receiving EoR");
3893
3894 log_peer_info(&peer->conf, "received %s EOR marker",
3895 aid2str(aid));
3896}
3897
3898static void
3899rde_peer_send_eor(struct rde_peer *peer, u_int8_t aid)
3900{
3901 u_int16_t afi;
3902 u_int8_t safi;
3903
3904 peer->prefix_sent_eor++;
3905 peer->sent_eor |= 1 << aid;
3906
3907 if (aid == AID_INET1) {
3908 u_char null[4];
3909
3910 bzero(&null, 4);
3911 if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3912 0, -1, &null, 4) == -1)
3913 fatal("imsg_compose error while sending EoR");
3914 } else {
3915 u_int16_t i;
3916 u_char buf[10];
3917
3918 if (aid2afi(aid, &afi, &safi) == -1)
3919 fatalx("peer_send_eor: bad AID");
3920
3921 i = 0; /* v4 withdrawn len */
3922 bcopy(&i, &buf[0], sizeof(i));
3923 i = htons(6)(__uint16_t)(__builtin_constant_p(6) ? (__uint16_t)(((__uint16_t
)(6) & 0xffU) << 8 | ((__uint16_t)(6) & 0xff00U
) >> 8) : __swap16md(6))
; /* path attr len */
3924 bcopy(&i, &buf[2], sizeof(i));
3925 buf[4] = ATTR_OPTIONAL0x80;
3926 buf[5] = ATTR_MP_UNREACH_NLRI;
3927 buf[6] = 3; /* withdrawn len */
3928 i = htons(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t
)(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U
) >> 8) : __swap16md(afi))
;
3929 bcopy(&i, &buf[7], sizeof(i));
3930 buf[9] = safi;
3931
3932 if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
3933 0, -1, &buf, 10) == -1)
3934 fatal("%s %d imsg_compose error in peer_send_eor",
3935 __func__, __LINE__3935);
3936 }
3937
3938 log_peer_info(&peer->conf, "sending %s EOR marker",
3939 aid2str(aid));
3940}
3941
3942void
3943rde_peer_send_rrefresh(struct rde_peer *peer, u_int8_t aid, u_int8_t subtype)
3944{
3945 struct route_refresh rr;
3946
3947 /* not strickly needed, the SE checks as well */
3948 if (peer->capa.enhanced_rr == 0)
3949 return;
3950
3951 switch (subtype) {
3952 case ROUTE_REFRESH_END_RR2:
3953 case ROUTE_REFRESH_BEGIN_RR1:
3954 break;
3955 default:
3956 fatalx("%s unexpected subtype %d", __func__, subtype);
3957 }
3958
3959 rr.aid = aid;
3960 rr.subtype = subtype;
3961
3962 if (imsg_compose(ibuf_se, IMSG_REFRESH, peer->conf.id, 0, -1,
3963 &rr, sizeof(rr)) == -1)
3964
3965 log_peer_info(&peer->conf, "sending %s %s marker",
3966 aid2str(aid), subtype == ROUTE_REFRESH_END_RR2 ? "EoRR" : "BoRR");
3967}
3968
3969/*
3970 * network announcement stuff
3971 */
3972void
3973network_add(struct network_config *nc, struct filterstate *state)
3974{
3975 struct l3vpn *vpn;
3976 struct filter_set_head *vpnset = NULL((void*)0);
3977 struct in_addr prefix4;
3978 struct in6_addr prefix6;
3979 u_int8_t vstate;
3980 u_int16_t i;
3981
3982 if (nc->rd != 0) {
3983 SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != (
(void*)0); (vpn) = ((vpn)->entry.sqe_next))
{
3984 if (vpn->rd != nc->rd)
3985 continue;
3986 switch (nc->prefix.aid) {
3987 case AID_INET1:
3988 prefix4 = nc->prefix.v4ba.v4;
3989 memset(&nc->prefix, 0, sizeof(nc->prefix));
3990 nc->prefix.aid = AID_VPN_IPv43;
3991 nc->prefix.rd = vpn->rd;
3992 nc->prefix.v4ba.v4 = prefix4;
3993 nc->prefix.labellen = 3;
3994 nc->prefix.labelstack[0] =
3995 (vpn->label >> 12) & 0xff;
3996 nc->prefix.labelstack[1] =
3997 (vpn->label >> 4) & 0xff;
3998 nc->prefix.labelstack[2] =
3999 (vpn->label << 4) & 0xf0;
4000 nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01;
4001 vpnset = &vpn->export;
4002 break;
4003 case AID_INET62:
4004 prefix6 = nc->prefix.v6ba.v6;
4005 memset(&nc->prefix, 0, sizeof(nc->prefix));
4006 nc->prefix.aid = AID_VPN_IPv64;
4007 nc->prefix.rd = vpn->rd;
4008 nc->prefix.v6ba.v6 = prefix6;
4009 nc->prefix.labellen = 3;
4010 nc->prefix.labelstack[0] =
4011 (vpn->label >> 12) & 0xff;
4012 nc->prefix.labelstack[1] =
4013 (vpn->label >> 4) & 0xff;
4014 nc->prefix.labelstack[2] =
4015 (vpn->label << 4) & 0xf0;
4016 nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01;
4017 vpnset = &vpn->export;
4018 break;
4019 default:
4020 log_warnx("unable to VPNize prefix");
4021 filterset_free(&nc->attrset);
4022 return;
4023 }
4024 break;
4025 }
4026 if (vpn == NULL((void*)0)) {
4027 log_warnx("network_add: "
4028 "prefix %s/%u in non-existing l3vpn %s",
4029 log_addr(&nc->prefix), nc->prefixlen,
4030 log_rd(nc->rd));
4031 return;
4032 }
4033 }
4034
4035 rde_apply_set(&nc->attrset, peerself, peerself, state, nc->prefix.aid);
4036 if (vpnset)
4037 rde_apply_set(vpnset, peerself, peerself, state,
4038 nc->prefix.aid);
4039
4040 vstate = rde_roa_validity(&rde_roa, &nc->prefix,
4041 nc->prefixlen, aspath_origin(state->aspath.aspath));
4042 if (prefix_update(rib_byid(RIB_ADJ_IN0), peerself, 0, state, &nc->prefix,
4043 nc->prefixlen, vstate) == 1)
4044 peerself->prefix_cnt++;
4045 for (i = RIB_LOC_START1; i < rib_size; i++) {
4046 struct rib *rib = rib_byid(i);
4047 if (rib == NULL((void*)0))
4048 continue;
4049 rde_update_log("announce", i, peerself,
4050 state->nexthop ? &state->nexthop->exit_nexthop : NULL((void*)0),
4051 &nc->prefix, nc->prefixlen);
4052 prefix_update(rib, peerself, 0, state, &nc->prefix,
4053 nc->prefixlen, vstate);
4054 }
4055 filterset_free(&nc->attrset);
4056}
4057
4058void
4059network_delete(struct network_config *nc)
4060{
4061 struct l3vpn *vpn;
4062 struct in_addr prefix4;
4063 struct in6_addr prefix6;
4064 u_int32_t i;
4065
4066 if (nc->rd) {
4067 SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != (
(void*)0); (vpn) = ((vpn)->entry.sqe_next))
{
4068 if (vpn->rd != nc->rd)
4069 continue;
4070 switch (nc->prefix.aid) {
4071 case AID_INET1:
4072 prefix4 = nc->prefix.v4ba.v4;
4073 memset(&nc->prefix, 0, sizeof(nc->prefix));
4074 nc->prefix.aid = AID_VPN_IPv43;
4075 nc->prefix.rd = vpn->rd;
4076 nc->prefix.v4ba.v4 = prefix4;
4077 nc->prefix.labellen = 3;
4078 nc->prefix.labelstack[0] =
4079 (vpn->label >> 12) & 0xff;
4080 nc->prefix.labelstack[1] =
4081 (vpn->label >> 4) & 0xff;
4082 nc->prefix.labelstack[2] =
4083 (vpn->label << 4) & 0xf0;
4084 nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01;
4085 break;
4086 case AID_INET62:
4087 prefix6 = nc->prefix.v6ba.v6;
4088 memset(&nc->prefix, 0, sizeof(nc->prefix));
4089 nc->prefix.aid = AID_VPN_IPv64;
4090 nc->prefix.rd = vpn->rd;
4091 nc->prefix.v6ba.v6 = prefix6;
4092 nc->prefix.labellen = 3;
4093 nc->prefix.labelstack[0] =
4094 (vpn->label >> 12) & 0xff;
4095 nc->prefix.labelstack[1] =
4096 (vpn->label >> 4) & 0xff;
4097 nc->prefix.labelstack[2] =
4098 (vpn->label << 4) & 0xf0;
4099 nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01;
4100 break;
4101 default:
4102 log_warnx("unable to VPNize prefix");
4103 return;
4104 }
4105 }
4106 }
4107
4108 for (i = RIB_LOC_START1; i < rib_size; i++) {
4109 struct rib *rib = rib_byid(i);
4110 if (rib == NULL((void*)0))
4111 continue;
4112 if (prefix_withdraw(rib, peerself, 0, &nc->prefix,
4113 nc->prefixlen))
4114 rde_update_log("withdraw announce", i, peerself,
4115 NULL((void*)0), &nc->prefix, nc->prefixlen);
4116 }
4117 if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peerself, 0, &nc->prefix,
4118 nc->prefixlen))
4119 peerself->prefix_cnt--;
4120}
4121
4122static void
4123network_dump_upcall(struct rib_entry *re, void *ptr)
4124{
4125 struct prefix *p;
4126 struct rde_aspath *asp;
4127 struct kroute_full k;
4128 struct bgpd_addr addr;
4129 struct rde_dump_ctx *ctx = ptr;
4130
4131 LIST_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->lh_first); (p)!= ((void
*)0); (p) = ((p)->entry.list.rib.le_next))
{
4132 asp = prefix_aspath(p);
4133 if (!(asp->flags & F_PREFIX_ANNOUNCED0x00400))
4134 continue;
4135 pt_getaddr(p->pt, &addr);
4136
4137 bzero(&k, sizeof(k));
4138 memcpy(&k.prefix, &addr, sizeof(k.prefix));
4139 if (prefix_nexthop(p) == NULL((void*)0) ||
4140 prefix_nexthop(p)->state != NEXTHOP_REACH)
4141 k.nexthop.aid = k.prefix.aid;
4142 else
4143 memcpy(&k.nexthop, &prefix_nexthop(p)->true_nexthop,
4144 sizeof(k.nexthop));
4145 k.prefixlen = p->pt->prefixlen;
4146 k.flags = F_KERNEL0x0002;
4147 if ((asp->flags & F_ANN_DYNAMIC0x00800) == 0)
4148 k.flags = F_STATIC0x0020;
4149 if (imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NETWORK, 0,
4150 ctx->req.pid, -1, &k, sizeof(k)) == -1)
4151 log_warnx("network_dump_upcall: "
4152 "imsg_compose error");
4153 }
4154}
4155
4156static void
4157network_flush_upcall(struct rib_entry *re, void *ptr)
4158{
4159 struct bgpd_addr addr;
4160 struct prefix *p;
4161 u_int32_t i;
4162 u_int8_t prefixlen;
4163
4164 p = prefix_bypeer(re, peerself, 0);
4165 if (p == NULL((void*)0))
4166 return;
4167 if ((prefix_aspath(p)->flags & F_ANN_DYNAMIC0x00800) != F_ANN_DYNAMIC0x00800)
4168 return;
4169
4170 pt_getaddr(re->prefix, &addr);
4171 prefixlen = re->prefix->prefixlen;
4172
4173 for (i = RIB_LOC_START1; i < rib_size; i++) {
4174 struct rib *rib = rib_byid(i);
4175 if (rib == NULL((void*)0))
4176 continue;
4177 if (prefix_withdraw(rib, peerself, 0, &addr, prefixlen) == 1)
4178 rde_update_log("flush announce", i, peerself,
4179 NULL((void*)0), &addr, prefixlen);
4180 }
4181
4182 if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peerself, 0, &addr,
4183 prefixlen) == 1)
4184 peerself->prefix_cnt--;
4185}
4186
4187/* clean up */
4188void
4189rde_shutdown(void)
4190{
4191 /*
4192 * the decision process is turned off if rde_quit = 1 and
4193 * rde_shutdown depends on this.
4194 */
4195
4196 /* First all peers go down */
4197 peer_foreach(peer_down, NULL((void*)0));
4198
4199 /* free filters */
4200 filterlist_free(out_rules);
4201 filterlist_free(out_rules_tmp);
4202
4203 /* kill the VPN configs */
4204 free_l3vpns(&conf->l3vpns);
4205
4206 /* now check everything */
4207 rib_shutdown();
4208 nexthop_shutdown();
4209 path_shutdown();
4210 aspath_shutdown();
4211 attr_shutdown();
4212 pt_shutdown();
4213 peer_shutdown();
4214}
4215
4216struct rde_prefixset *
4217rde_find_prefixset(char *name, struct rde_prefixset_head *p)
4218{
4219 struct rde_prefixset *ps;
4220
4221 SIMPLEQ_FOREACH(ps, p, entry)for((ps) = ((p)->sqh_first); (ps) != ((void*)0); (ps) = ((
ps)->entry.sqe_next))
{
4222 if (!strcmp(ps->name, name))
4223 return (ps);
4224 }
4225 return (NULL((void*)0));
4226}
4227
4228void
4229rde_mark_prefixsets_dirty(struct rde_prefixset_head *psold,
4230 struct rde_prefixset_head *psnew)
4231{
4232 struct rde_prefixset *new, *old;
4233
4234 SIMPLEQ_FOREACH(new, psnew, entry)for((new) = ((psnew)->sqh_first); (new) != ((void*)0); (new
) = ((new)->entry.sqe_next))
{
4235 if ((psold == NULL((void*)0)) ||
4236 (old = rde_find_prefixset(new->name, psold)) == NULL((void*)0)) {
4237 new->dirty = 1;
4238 new->lastchange = getmonotime();
4239 } else {
4240 if (trie_equal(&new->th, &old->th) == 0) {
4241 new->dirty = 1;
4242 new->lastchange = getmonotime();
4243 } else
4244 new->lastchange = old->lastchange;
4245 }
4246 }
4247}
4248
4249u_int8_t
4250rde_roa_validity(struct rde_prefixset *ps, struct bgpd_addr *prefix,
4251 u_int8_t plen, u_int32_t as)
4252{
4253 int r;
4254
4255 r = trie_roa_check(&ps->th, prefix, plen, as);
4256 return (r & ROA_MASK0x3);
4257}
4258
4259int
4260ovs_match(struct prefix *p, u_int32_t flag)
4261{
4262 if (flag & (F_CTL_OVS_VALID0x80000|F_CTL_OVS_INVALID0x100000|F_CTL_OVS_NOTFOUND0x200000)) {
4263 switch (prefix_vstate(p)) {
4264 case ROA_VALID0x2:
4265 if (!(flag & F_CTL_OVS_VALID0x80000))
4266 return 0;
4267 break;
4268 case ROA_INVALID0x1:
4269 if (!(flag & F_CTL_OVS_INVALID0x100000))
4270 return 0;
4271 break;
4272 case ROA_NOTFOUND0x0:
4273 if (!(flag & F_CTL_OVS_NOTFOUND0x200000))
4274 return 0;
4275 break;
4276 default:
4277 break;
4278 }
4279 }
4280
4281 return 1;
4282}