File: | src/usr.sbin/bgpd/rde.c |
Warning: | line 1129, column 4 Value stored to 'rv' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: rde.c,v 1.613 2023/12/14 13:52:37 claudio Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> |
5 | * Copyright (c) 2016 Job Snijders <job@instituut.net> |
6 | * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org> |
7 | * Copyright (c) 2018 Sebastian Benoit <benno@openbsd.org> |
8 | * |
9 | * Permission to use, copy, modify, and distribute this software for any |
10 | * purpose with or without fee is hereby granted, provided that the above |
11 | * copyright notice and this permission notice appear in all copies. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
14 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
15 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
16 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
17 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
18 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
19 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
20 | */ |
21 | |
22 | #include <sys/types.h> |
23 | #include <sys/time.h> |
24 | #include <sys/resource.h> |
25 | |
26 | #include <errno(*__errno()).h> |
27 | #include <pwd.h> |
28 | #include <poll.h> |
29 | #include <signal.h> |
30 | #include <stdio.h> |
31 | #include <stdlib.h> |
32 | #include <string.h> |
33 | #include <syslog.h> |
34 | #include <unistd.h> |
35 | |
36 | #include "bgpd.h" |
37 | #include "session.h" |
38 | #include "rde.h" |
39 | #include "log.h" |
40 | |
41 | #define PFD_PIPE_MAIN0 0 |
42 | #define PFD_PIPE_SESSION1 1 |
43 | #define PFD_PIPE_SESSION_CTL2 2 |
44 | #define PFD_PIPE_ROA3 3 |
45 | #define PFD_PIPE_COUNT4 4 |
46 | |
47 | void rde_sighdlr(int); |
48 | void rde_dispatch_imsg_session(struct imsgbuf *); |
49 | void rde_dispatch_imsg_parent(struct imsgbuf *); |
50 | void rde_dispatch_imsg_rtr(struct imsgbuf *); |
51 | void rde_dispatch_imsg_peer(struct rde_peer *, void *); |
52 | void rde_update_dispatch(struct rde_peer *, struct imsg *); |
53 | int rde_update_update(struct rde_peer *, uint32_t, |
54 | struct filterstate *, struct bgpd_addr *, uint8_t); |
55 | void rde_update_withdraw(struct rde_peer *, uint32_t, |
56 | struct bgpd_addr *, uint8_t); |
57 | int rde_attr_parse(u_char *, uint16_t, struct rde_peer *, |
58 | struct filterstate *, struct mpattr *); |
59 | int rde_attr_add(struct filterstate *, u_char *, uint16_t); |
60 | uint8_t rde_attr_missing(struct rde_aspath *, int, uint16_t); |
61 | int rde_get_mp_nexthop(u_char *, uint16_t, uint8_t, |
62 | struct rde_peer *, struct filterstate *); |
63 | void rde_as4byte_fixup(struct rde_peer *, struct rde_aspath *); |
64 | uint8_t rde_aspa_validity(struct rde_peer *, struct rde_aspath *, |
65 | uint8_t); |
66 | void rde_reflector(struct rde_peer *, struct rde_aspath *); |
67 | |
68 | void rde_dump_ctx_new(struct ctl_show_rib_request *, pid_t, |
69 | enum imsg_type); |
70 | void rde_dump_ctx_throttle(pid_t, int); |
71 | void rde_dump_ctx_terminate(pid_t); |
72 | void rde_dump_mrt_new(struct mrt *, pid_t, int); |
73 | |
74 | int rde_l3vpn_import(struct rde_community *, struct l3vpn *); |
75 | static void rde_commit_pftable(void); |
76 | void rde_reload_done(void); |
77 | static void rde_softreconfig_in_done(void *, uint8_t); |
78 | static void rde_softreconfig_out_done(void *, uint8_t); |
79 | static void rde_softreconfig_done(void); |
80 | static void rde_softreconfig_out(struct rib_entry *, void *); |
81 | static void rde_softreconfig_in(struct rib_entry *, void *); |
82 | static void rde_softreconfig_sync_reeval(struct rib_entry *, void *); |
83 | static void rde_softreconfig_sync_fib(struct rib_entry *, void *); |
84 | static void rde_softreconfig_sync_done(void *, uint8_t); |
85 | static void rde_rpki_reload(void); |
86 | static int rde_roa_reload(void); |
87 | static int rde_aspa_reload(void); |
88 | int rde_update_queue_pending(void); |
89 | void rde_update_queue_runner(uint8_t); |
90 | struct rde_prefixset *rde_find_prefixset(char *, struct rde_prefixset_head *); |
91 | void rde_mark_prefixsets_dirty(struct rde_prefixset_head *, |
92 | struct rde_prefixset_head *); |
93 | uint8_t rde_roa_validity(struct rde_prefixset *, |
94 | struct bgpd_addr *, uint8_t, uint32_t); |
95 | |
96 | static void rde_peer_recv_eor(struct rde_peer *, uint8_t); |
97 | static void rde_peer_send_eor(struct rde_peer *, uint8_t); |
98 | |
99 | void network_add(struct network_config *, struct filterstate *); |
100 | void network_delete(struct network_config *); |
101 | static void network_dump_upcall(struct rib_entry *, void *); |
102 | static void network_flush_upcall(struct rib_entry *, void *); |
103 | |
104 | void flowspec_add(struct flowspec *, struct filterstate *, |
105 | struct filter_set_head *); |
106 | void flowspec_delete(struct flowspec *); |
107 | static void flowspec_flush_upcall(struct rib_entry *, void *); |
108 | static void flowspec_dump_upcall(struct rib_entry *, void *); |
109 | static void flowspec_dump_done(void *, uint8_t); |
110 | |
111 | void rde_shutdown(void); |
112 | static int ovs_match(struct prefix *, uint32_t); |
113 | static int avs_match(struct prefix *, uint32_t); |
114 | |
115 | static struct imsgbuf *ibuf_se; |
116 | static struct imsgbuf *ibuf_se_ctl; |
117 | static struct imsgbuf *ibuf_rtr; |
118 | static struct imsgbuf *ibuf_main; |
119 | static struct bgpd_config *conf, *nconf; |
120 | static struct rde_prefixset rde_roa, roa_new; |
121 | static struct rde_aspa *rde_aspa, *aspa_new; |
122 | static uint8_t rde_aspa_generation; |
123 | |
124 | volatile sig_atomic_t rde_quit = 0; |
125 | struct filter_head *out_rules, *out_rules_tmp; |
126 | struct rde_memstats rdemem; |
127 | int softreconfig; |
128 | static int rde_eval_all; |
129 | |
130 | extern struct peer_tree peertable; |
131 | extern struct rde_peer *peerself; |
132 | |
133 | struct rde_dump_ctx { |
134 | LIST_ENTRY(rde_dump_ctx)struct { struct rde_dump_ctx *le_next; struct rde_dump_ctx ** le_prev; } entry; |
135 | struct ctl_show_rib_request req; |
136 | uint32_t peerid; |
137 | uint8_t throttled; |
138 | }; |
139 | |
140 | LIST_HEAD(, rde_dump_ctx)struct { struct rde_dump_ctx *lh_first; } rde_dump_h = LIST_HEAD_INITIALIZER(rde_dump_h){ ((void *)0) }; |
141 | |
142 | struct rde_mrt_ctx { |
143 | LIST_ENTRY(rde_mrt_ctx)struct { struct rde_mrt_ctx *le_next; struct rde_mrt_ctx **le_prev ; } entry; |
144 | struct mrt mrt; |
145 | }; |
146 | |
147 | LIST_HEAD(, rde_mrt_ctx)struct { struct rde_mrt_ctx *lh_first; } rde_mrts = LIST_HEAD_INITIALIZER(rde_mrts){ ((void *)0) }; |
148 | u_int rde_mrt_cnt; |
149 | |
150 | void |
151 | rde_sighdlr(int sig) |
152 | { |
153 | switch (sig) { |
154 | case SIGINT2: |
155 | case SIGTERM15: |
156 | rde_quit = 1; |
157 | break; |
158 | } |
159 | } |
160 | |
161 | void |
162 | rde_main(int debug, int verbose) |
163 | { |
164 | struct passwd *pw; |
165 | struct pollfd *pfd = NULL((void *)0); |
166 | struct rde_mrt_ctx *mctx, *xmctx; |
167 | void *newp; |
168 | u_int pfd_elms = 0, i, j; |
169 | int timeout; |
170 | uint8_t aid; |
171 | |
172 | log_init(debug, LOG_DAEMON(3<<3)); |
173 | log_setverbose(verbose); |
174 | |
175 | log_procinit(log_procnames[PROC_RDE]); |
176 | |
177 | if ((pw = getpwnam(BGPD_USER"_bgpd")) == NULL((void *)0)) |
178 | fatal("getpwnam"); |
179 | |
180 | if (chroot(pw->pw_dir) == -1) |
181 | fatal("chroot"); |
182 | if (chdir("/") == -1) |
183 | fatal("chdir(\"/\")"); |
184 | |
185 | setproctitle("route decision engine"); |
186 | |
187 | if (setgroups(1, &pw->pw_gid) || |
188 | setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || |
189 | setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) |
190 | fatal("can't drop privileges"); |
191 | |
192 | if (pledge("stdio recvfd", NULL((void *)0)) == -1) |
193 | fatal("pledge"); |
194 | |
195 | signal(SIGTERM15, rde_sighdlr); |
196 | signal(SIGINT2, rde_sighdlr); |
197 | signal(SIGPIPE13, SIG_IGN(void (*)(int))1); |
198 | signal(SIGHUP1, SIG_IGN(void (*)(int))1); |
199 | signal(SIGALRM14, SIG_IGN(void (*)(int))1); |
200 | signal(SIGUSR130, SIG_IGN(void (*)(int))1); |
201 | |
202 | if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL((void *)0)) |
203 | fatal(NULL((void *)0)); |
204 | imsg_init(ibuf_main, 3); |
205 | |
206 | /* initialize the RIB structures */ |
207 | if ((out_rules = calloc(1, sizeof(struct filter_head))) == NULL((void *)0)) |
208 | fatal(NULL((void *)0)); |
209 | TAILQ_INIT(out_rules)do { (out_rules)->tqh_first = ((void *)0); (out_rules)-> tqh_last = &(out_rules)->tqh_first; } while (0); |
210 | |
211 | pt_init(); |
212 | peer_init(out_rules); |
213 | |
214 | /* make sure the default RIBs are setup */ |
215 | rib_new("Adj-RIB-In", 0, F_RIB_NOFIB0x0004 | F_RIB_NOEVALUATE0x0002); |
216 | |
217 | conf = new_config(); |
218 | log_info("route decision engine ready"); |
219 | |
220 | while (rde_quit == 0) { |
221 | if (pfd_elms < PFD_PIPE_COUNT4 + rde_mrt_cnt) { |
222 | if ((newp = reallocarray(pfd, |
223 | PFD_PIPE_COUNT4 + rde_mrt_cnt, |
224 | sizeof(struct pollfd))) == NULL((void *)0)) { |
225 | /* panic for now */ |
226 | log_warn("could not resize pfd from %u -> %u" |
227 | " entries", pfd_elms, PFD_PIPE_COUNT4 + |
228 | rde_mrt_cnt); |
229 | fatalx("exiting"); |
230 | } |
231 | pfd = newp; |
232 | pfd_elms = PFD_PIPE_COUNT4 + rde_mrt_cnt; |
233 | } |
234 | timeout = -1; |
235 | memset(pfd, 0, sizeof(struct pollfd) * pfd_elms); |
236 | |
237 | set_pollfd(&pfd[PFD_PIPE_MAIN0], ibuf_main); |
238 | set_pollfd(&pfd[PFD_PIPE_SESSION1], ibuf_se); |
239 | set_pollfd(&pfd[PFD_PIPE_SESSION_CTL2], ibuf_se_ctl); |
240 | set_pollfd(&pfd[PFD_PIPE_ROA3], ibuf_rtr); |
241 | |
242 | i = PFD_PIPE_COUNT4; |
243 | for (mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first); mctx != 0; mctx = xmctx) { |
244 | xmctx = LIST_NEXT(mctx, entry)((mctx)->entry.le_next); |
245 | |
246 | if (i >= pfd_elms) |
247 | fatalx("poll pfd too small"); |
248 | if (mctx->mrt.wbuf.queued) { |
249 | pfd[i].fd = mctx->mrt.wbuf.fd; |
250 | pfd[i].events = POLLOUT0x0004; |
251 | i++; |
252 | } else if (mctx->mrt.state == MRT_STATE_REMOVE) { |
253 | close(mctx->mrt.wbuf.fd); |
254 | LIST_REMOVE(mctx, entry)do { if ((mctx)->entry.le_next != ((void *)0)) (mctx)-> entry.le_next->entry.le_prev = (mctx)->entry.le_prev; * (mctx)->entry.le_prev = (mctx)->entry.le_next; ; ; } while (0); |
255 | free(mctx); |
256 | rde_mrt_cnt--; |
257 | } |
258 | } |
259 | |
260 | if (peer_imsg_pending() || rde_update_queue_pending() || |
261 | nexthop_pending() || rib_dump_pending()) |
262 | timeout = 0; |
263 | |
264 | if (poll(pfd, i, timeout) == -1) { |
265 | if (errno(*__errno()) == EINTR4) |
266 | continue; |
267 | fatal("poll error"); |
268 | } |
269 | |
270 | if (handle_pollfd(&pfd[PFD_PIPE_MAIN0], ibuf_main) == -1) |
271 | fatalx("Lost connection to parent"); |
272 | else |
273 | rde_dispatch_imsg_parent(ibuf_main); |
274 | |
275 | if (handle_pollfd(&pfd[PFD_PIPE_SESSION1], ibuf_se) == -1) { |
276 | log_warnx("RDE: Lost connection to SE"); |
277 | msgbuf_clear(&ibuf_se->w); |
278 | free(ibuf_se); |
279 | ibuf_se = NULL((void *)0); |
280 | } else |
281 | rde_dispatch_imsg_session(ibuf_se); |
282 | |
283 | if (handle_pollfd(&pfd[PFD_PIPE_SESSION_CTL2], ibuf_se_ctl) == |
284 | -1) { |
285 | log_warnx("RDE: Lost connection to SE control"); |
286 | msgbuf_clear(&ibuf_se_ctl->w); |
287 | free(ibuf_se_ctl); |
288 | ibuf_se_ctl = NULL((void *)0); |
289 | } else |
290 | rde_dispatch_imsg_session(ibuf_se_ctl); |
291 | |
292 | if (handle_pollfd(&pfd[PFD_PIPE_ROA3], ibuf_rtr) == -1) { |
293 | log_warnx("RDE: Lost connection to ROA"); |
294 | msgbuf_clear(&ibuf_rtr->w); |
295 | free(ibuf_rtr); |
296 | ibuf_rtr = NULL((void *)0); |
297 | } else |
298 | rde_dispatch_imsg_rtr(ibuf_rtr); |
299 | |
300 | for (j = PFD_PIPE_COUNT4, mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first); |
301 | j < i && mctx != 0; j++) { |
302 | if (pfd[j].fd == mctx->mrt.wbuf.fd && |
303 | pfd[j].revents & POLLOUT0x0004) |
304 | mrt_write(&mctx->mrt); |
305 | mctx = LIST_NEXT(mctx, entry)((mctx)->entry.le_next); |
306 | } |
307 | |
308 | peer_foreach(rde_dispatch_imsg_peer, NULL((void *)0)); |
309 | rib_dump_runner(); |
310 | nexthop_runner(); |
311 | if (ibuf_se && ibuf_se->w.queued < SESS_MSG_HIGH_MARK2000) { |
312 | for (aid = AID_MIN1; aid < AID_MAX7; aid++) |
313 | rde_update_queue_runner(aid); |
314 | } |
315 | /* commit pftable once per poll loop */ |
316 | rde_commit_pftable(); |
317 | } |
318 | |
319 | /* do not clean up on shutdown on production, it takes ages. */ |
320 | if (debug) |
321 | rde_shutdown(); |
322 | |
323 | free_config(conf); |
324 | free(pfd); |
325 | |
326 | /* close pipes */ |
327 | if (ibuf_se) { |
328 | msgbuf_clear(&ibuf_se->w); |
329 | close(ibuf_se->fd); |
330 | free(ibuf_se); |
331 | } |
332 | if (ibuf_se_ctl) { |
333 | msgbuf_clear(&ibuf_se_ctl->w); |
334 | close(ibuf_se_ctl->fd); |
335 | free(ibuf_se_ctl); |
336 | } |
337 | if (ibuf_rtr) { |
338 | msgbuf_clear(&ibuf_rtr->w); |
339 | close(ibuf_rtr->fd); |
340 | free(ibuf_rtr); |
341 | } |
342 | msgbuf_clear(&ibuf_main->w); |
343 | close(ibuf_main->fd); |
344 | free(ibuf_main); |
345 | |
346 | while ((mctx = LIST_FIRST(&rde_mrts)((&rde_mrts)->lh_first)) != NULL((void *)0)) { |
347 | msgbuf_clear(&mctx->mrt.wbuf); |
348 | close(mctx->mrt.wbuf.fd); |
349 | LIST_REMOVE(mctx, entry)do { if ((mctx)->entry.le_next != ((void *)0)) (mctx)-> entry.le_next->entry.le_prev = (mctx)->entry.le_prev; * (mctx)->entry.le_prev = (mctx)->entry.le_next; ; ; } while (0); |
350 | free(mctx); |
351 | } |
352 | |
353 | log_info("route decision engine exiting"); |
354 | exit(0); |
355 | } |
356 | |
357 | struct network_config netconf_s, netconf_p; |
358 | struct filterstate netconf_state; |
359 | struct filter_set_head session_set = TAILQ_HEAD_INITIALIZER(session_set){ ((void *)0), &(session_set).tqh_first }; |
360 | struct filter_set_head parent_set = TAILQ_HEAD_INITIALIZER(parent_set){ ((void *)0), &(parent_set).tqh_first }; |
361 | |
362 | void |
363 | rde_dispatch_imsg_session(struct imsgbuf *imsgbuf) |
364 | { |
365 | static struct flowspec *curflow; |
366 | struct imsg imsg; |
367 | struct rde_peer_stats stats; |
368 | struct ctl_show_set cset; |
369 | struct ctl_show_rib csr; |
370 | struct ctl_show_rib_request req; |
371 | struct session_up sup; |
372 | struct rde_peer *peer; |
373 | struct rde_aspath *asp; |
374 | struct filter_set *s; |
375 | struct as_set *aset; |
376 | struct rde_prefixset *pset; |
377 | uint8_t *asdata; |
378 | ssize_t n; |
379 | size_t aslen; |
380 | int verbose; |
381 | uint16_t len; |
382 | uint8_t aid; |
383 | |
384 | while (imsgbuf) { |
385 | if ((n = imsg_get(imsgbuf, &imsg)) == -1) |
386 | fatal("rde_dispatch_imsg_session: imsg_get error"); |
387 | if (n == 0) |
388 | break; |
389 | |
390 | switch (imsg.hdr.type) { |
391 | case IMSG_UPDATE: |
392 | case IMSG_REFRESH: |
393 | if ((peer = peer_get(imsg.hdr.peerid)) == NULL((void *)0)) { |
394 | log_warnx("rde_dispatch: unknown peer id %d", |
395 | imsg.hdr.peerid); |
396 | break; |
397 | } |
398 | peer_imsg_push(peer, &imsg); |
399 | break; |
400 | case IMSG_SESSION_ADD: |
401 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
402 | sizeof(struct peer_config)) |
403 | fatalx("incorrect size of session request"); |
404 | peer = peer_add(imsg.hdr.peerid, imsg.data, out_rules); |
405 | /* make sure rde_eval_all is on if needed. */ |
406 | if (peer->conf.flags & PEERFLAG_EVALUATE_ALL0x04) |
407 | rde_eval_all = 1; |
408 | break; |
409 | case IMSG_SESSION_UP: |
410 | if ((peer = peer_get(imsg.hdr.peerid)) == NULL((void *)0)) { |
411 | log_warnx("%s: unknown peer id %d", |
412 | "IMSG_SESSION_UP", imsg.hdr.peerid); |
413 | break; |
414 | } |
415 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(sup)) |
416 | fatalx("incorrect size of session request"); |
417 | memcpy(&sup, imsg.data, sizeof(sup)); |
418 | peer_up(peer, &sup); |
419 | /* make sure rde_eval_all is on if needed. */ |
420 | if (peer_has_add_path(peer, AID_UNSPEC0, CAPA_AP_SEND0x02)) |
421 | rde_eval_all = 1; |
422 | break; |
423 | case IMSG_SESSION_DOWN: |
424 | if ((peer = peer_get(imsg.hdr.peerid)) == NULL((void *)0)) { |
425 | log_warnx("%s: unknown peer id %d", |
426 | "IMSG_SESSION_DOWN", imsg.hdr.peerid); |
427 | break; |
428 | } |
429 | peer_down(peer, NULL((void *)0)); |
430 | break; |
431 | case IMSG_SESSION_STALE: |
432 | case IMSG_SESSION_NOGRACE: |
433 | case IMSG_SESSION_FLUSH: |
434 | case IMSG_SESSION_RESTARTED: |
435 | if ((peer = peer_get(imsg.hdr.peerid)) == NULL((void *)0)) { |
436 | log_warnx("%s: unknown peer id %d", |
437 | "graceful restart", imsg.hdr.peerid); |
438 | break; |
439 | } |
440 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(aid)) { |
441 | log_warnx("%s: wrong imsg len", __func__); |
442 | break; |
443 | } |
444 | memcpy(&aid, imsg.data, sizeof(aid)); |
445 | if (aid >= AID_MAX7) { |
446 | log_warnx("%s: bad AID", __func__); |
447 | break; |
448 | } |
449 | |
450 | switch (imsg.hdr.type) { |
451 | case IMSG_SESSION_STALE: |
452 | case IMSG_SESSION_NOGRACE: |
453 | peer_stale(peer, aid, |
454 | imsg.hdr.type == IMSG_SESSION_NOGRACE); |
455 | break; |
456 | case IMSG_SESSION_FLUSH: |
457 | peer_flush(peer, aid, peer->staletime[aid]); |
458 | break; |
459 | case IMSG_SESSION_RESTARTED: |
460 | if (peer->staletime[aid]) |
461 | peer_flush(peer, aid, |
462 | peer->staletime[aid]); |
463 | break; |
464 | } |
465 | break; |
466 | case IMSG_NETWORK_ADD: |
467 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
468 | sizeof(struct network_config)) { |
469 | log_warnx("rde_dispatch: wrong imsg len"); |
470 | break; |
471 | } |
472 | memcpy(&netconf_s, imsg.data, sizeof(netconf_s)); |
473 | TAILQ_INIT(&netconf_s.attrset)do { (&netconf_s.attrset)->tqh_first = ((void *)0); (& netconf_s.attrset)->tqh_last = &(&netconf_s.attrset )->tqh_first; } while (0); |
474 | rde_filterstate_init(&netconf_state); |
475 | asp = &netconf_state.aspath; |
476 | asp->aspath = aspath_get(NULL((void *)0), 0); |
477 | asp->origin = ORIGIN_IGP0; |
478 | asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 | |
479 | F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400 | |
480 | F_ANN_DYNAMIC0x00800; |
481 | break; |
482 | case IMSG_NETWORK_ASPATH: |
483 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) < |
484 | sizeof(csr)) { |
485 | log_warnx("rde_dispatch: wrong imsg len"); |
486 | memset(&netconf_s, 0, sizeof(netconf_s)); |
487 | break; |
488 | } |
489 | aslen = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) - sizeof(csr); |
490 | asdata = imsg.data; |
491 | asdata += sizeof(struct ctl_show_rib); |
492 | memcpy(&csr, imsg.data, sizeof(csr)); |
493 | asp = &netconf_state.aspath; |
494 | asp->lpref = csr.local_pref; |
495 | asp->med = csr.med; |
496 | asp->weight = csr.weight; |
497 | asp->flags = csr.flags; |
498 | asp->origin = csr.origin; |
499 | asp->flags |= F_PREFIX_ANNOUNCED0x00400 | F_ANN_DYNAMIC0x00800; |
500 | aspath_put(asp->aspath); |
501 | asp->aspath = aspath_get(asdata, aslen); |
502 | break; |
503 | case IMSG_NETWORK_ATTR: |
504 | if (imsg.hdr.len <= IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
505 | log_warnx("rde_dispatch: wrong imsg len"); |
506 | break; |
507 | } |
508 | /* parse optional path attributes */ |
509 | len = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr); |
510 | if (rde_attr_add(&netconf_state, imsg.data, |
511 | len) == -1) { |
512 | log_warnx("rde_dispatch: bad network " |
513 | "attribute"); |
514 | rde_filterstate_clean(&netconf_state); |
515 | memset(&netconf_s, 0, sizeof(netconf_s)); |
516 | break; |
517 | } |
518 | break; |
519 | case IMSG_NETWORK_DONE: |
520 | if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
521 | log_warnx("rde_dispatch: wrong imsg len"); |
522 | break; |
523 | } |
524 | TAILQ_CONCAT(&netconf_s.attrset, &session_set, entry)do { if (!(((&session_set)->tqh_first) == ((void *)0)) ) { *(&netconf_s.attrset)->tqh_last = (&session_set )->tqh_first; (&session_set)->tqh_first->entry.tqe_prev = (&netconf_s.attrset)->tqh_last; (&netconf_s.attrset )->tqh_last = (&session_set)->tqh_last; do { ((& session_set))->tqh_first = ((void *)0); ((&session_set ))->tqh_last = &((&session_set))->tqh_first; } while (0); } } while (0); |
525 | switch (netconf_s.prefix.aid) { |
526 | case AID_INET1: |
527 | if (netconf_s.prefixlen > 32) |
528 | goto badnet; |
529 | network_add(&netconf_s, &netconf_state); |
530 | break; |
531 | case AID_INET62: |
532 | if (netconf_s.prefixlen > 128) |
533 | goto badnet; |
534 | network_add(&netconf_s, &netconf_state); |
535 | break; |
536 | case 0: |
537 | /* something failed beforehand */ |
538 | break; |
539 | default: |
540 | badnet: |
541 | log_warnx("request to insert invalid network"); |
542 | break; |
543 | } |
544 | rde_filterstate_clean(&netconf_state); |
545 | break; |
546 | case IMSG_NETWORK_REMOVE: |
547 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
548 | sizeof(struct network_config)) { |
549 | log_warnx("rde_dispatch: wrong imsg len"); |
550 | break; |
551 | } |
552 | memcpy(&netconf_s, imsg.data, sizeof(netconf_s)); |
553 | TAILQ_INIT(&netconf_s.attrset)do { (&netconf_s.attrset)->tqh_first = ((void *)0); (& netconf_s.attrset)->tqh_last = &(&netconf_s.attrset )->tqh_first; } while (0); |
554 | |
555 | switch (netconf_s.prefix.aid) { |
556 | case AID_INET1: |
557 | if (netconf_s.prefixlen > 32) |
558 | goto badnetdel; |
559 | network_delete(&netconf_s); |
560 | break; |
561 | case AID_INET62: |
562 | if (netconf_s.prefixlen > 128) |
563 | goto badnetdel; |
564 | network_delete(&netconf_s); |
565 | break; |
566 | default: |
567 | badnetdel: |
568 | log_warnx("request to remove invalid network"); |
569 | break; |
570 | } |
571 | break; |
572 | case IMSG_NETWORK_FLUSH: |
573 | if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
574 | log_warnx("rde_dispatch: wrong imsg len"); |
575 | break; |
576 | } |
577 | if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0, |
578 | RDE_RUNNER_ROUNDS100, NULL((void *)0), network_flush_upcall, |
579 | NULL((void *)0), NULL((void *)0)) == -1) |
580 | log_warn("rde_dispatch: IMSG_NETWORK_FLUSH"); |
581 | break; |
582 | case IMSG_FLOWSPEC_ADD: |
583 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) <= FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data))) { |
584 | log_warnx("rde_dispatch: wrong imsg len"); |
585 | break; |
586 | } |
587 | if (curflow != NULL((void *)0)) { |
588 | log_warnx("rde_dispatch: " |
589 | "unexpected flowspec add"); |
590 | break; |
591 | } |
592 | curflow = malloc(imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
593 | if (curflow == NULL((void *)0)) |
594 | fatal(NULL((void *)0)); |
595 | memcpy(curflow, imsg.data, |
596 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
597 | if (curflow->len + FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data)) != |
598 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
599 | free(curflow); |
600 | curflow = NULL((void *)0); |
601 | log_warnx("rde_dispatch: wrong flowspec len"); |
602 | break; |
603 | } |
604 | rde_filterstate_init(&netconf_state); |
605 | asp = &netconf_state.aspath; |
606 | asp->aspath = aspath_get(NULL((void *)0), 0); |
607 | asp->origin = ORIGIN_IGP0; |
608 | asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 | |
609 | F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400 | |
610 | F_ANN_DYNAMIC0x00800; |
611 | break; |
612 | case IMSG_FLOWSPEC_DONE: |
613 | if (curflow == NULL((void *)0)) { |
614 | log_warnx("rde_dispatch: " |
615 | "unexpected flowspec done"); |
616 | break; |
617 | } |
618 | |
619 | if (flowspec_valid(curflow->data, curflow->len, |
620 | curflow->aid == AID_FLOWSPECv66) == -1) |
621 | log_warnx("invalid flowspec update received " |
622 | "from bgpctl"); |
623 | else |
624 | flowspec_add(curflow, &netconf_state, |
625 | &session_set); |
626 | |
627 | rde_filterstate_clean(&netconf_state); |
628 | filterset_free(&session_set); |
629 | free(curflow); |
630 | curflow = NULL((void *)0); |
631 | break; |
632 | case IMSG_FLOWSPEC_REMOVE: |
633 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) <= FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data))) { |
634 | log_warnx("rde_dispatch: wrong imsg len"); |
635 | break; |
636 | } |
637 | if (curflow != NULL((void *)0)) { |
638 | log_warnx("rde_dispatch: " |
639 | "unexpected flowspec remove"); |
640 | break; |
641 | } |
642 | curflow = malloc(imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
643 | if (curflow == NULL((void *)0)) |
644 | fatal(NULL((void *)0)); |
645 | memcpy(curflow, imsg.data, |
646 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
647 | if (curflow->len + FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data)) != |
648 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
649 | free(curflow); |
650 | curflow = NULL((void *)0); |
651 | log_warnx("rde_dispatch: wrong flowspec len"); |
652 | break; |
653 | } |
654 | |
655 | if (flowspec_valid(curflow->data, curflow->len, |
656 | curflow->aid == AID_FLOWSPECv66) == -1) |
657 | log_warnx("invalid flowspec withdraw received " |
658 | "from bgpctl"); |
659 | else |
660 | flowspec_delete(curflow); |
661 | |
662 | free(curflow); |
663 | curflow = NULL((void *)0); |
664 | break; |
665 | case IMSG_FLOWSPEC_FLUSH: |
666 | if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
667 | log_warnx("rde_dispatch: wrong imsg len"); |
668 | break; |
669 | } |
670 | prefix_flowspec_dump(AID_UNSPEC0, NULL((void *)0), |
671 | flowspec_flush_upcall, NULL((void *)0)); |
672 | break; |
673 | case IMSG_FILTER_SET: |
674 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
675 | sizeof(struct filter_set)) { |
676 | log_warnx("rde_dispatch: wrong imsg len"); |
677 | break; |
678 | } |
679 | if ((s = malloc(sizeof(struct filter_set))) == NULL((void *)0)) |
680 | fatal(NULL((void *)0)); |
681 | memcpy(s, imsg.data, sizeof(struct filter_set)); |
682 | if (s->type == ACTION_SET_NEXTHOP) { |
683 | s->action.nh_ref = |
684 | nexthop_get(&s->action.nexthop); |
685 | s->type = ACTION_SET_NEXTHOP_REF; |
686 | } |
687 | TAILQ_INSERT_TAIL(&session_set, s, entry)do { (s)->entry.tqe_next = ((void *)0); (s)->entry.tqe_prev = (&session_set)->tqh_last; *(&session_set)->tqh_last = (s); (&session_set)->tqh_last = &(s)->entry. tqe_next; } while (0); |
688 | break; |
689 | case IMSG_CTL_SHOW_NETWORK: |
690 | case IMSG_CTL_SHOW_RIB: |
691 | case IMSG_CTL_SHOW_RIB_PREFIX: |
692 | if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(req)) { |
693 | log_warnx("rde_dispatch: wrong imsg len"); |
694 | break; |
695 | } |
696 | memcpy(&req, imsg.data, sizeof(req)); |
697 | rde_dump_ctx_new(&req, imsg.hdr.pid, imsg.hdr.type); |
698 | break; |
699 | case IMSG_CTL_SHOW_FLOWSPEC: |
700 | if (imsg.hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(req)) { |
701 | log_warnx("rde_dispatch: wrong imsg len"); |
702 | break; |
703 | } |
704 | memcpy(&req, imsg.data, sizeof(req)); |
705 | prefix_flowspec_dump(req.aid, &imsg.hdr.pid, |
706 | flowspec_dump_upcall, flowspec_dump_done); |
707 | break; |
708 | case IMSG_CTL_SHOW_NEIGHBOR: |
709 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != 0) { |
710 | log_warnx("rde_dispatch: wrong imsg len"); |
711 | break; |
712 | } |
713 | peer = peer_get(imsg.hdr.peerid); |
714 | if (peer != NULL((void *)0)) |
715 | memcpy(&stats, &peer->stats, sizeof(stats)); |
716 | else |
717 | memset(&stats, 0, sizeof(stats)); |
718 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NEIGHBOR, |
719 | imsg.hdr.peerid, imsg.hdr.pid, -1, |
720 | &stats, sizeof(stats)); |
721 | break; |
722 | case IMSG_CTL_SHOW_RIB_MEM: |
723 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0, |
724 | imsg.hdr.pid, -1, &rdemem, sizeof(rdemem)); |
725 | break; |
726 | case IMSG_CTL_SHOW_SET: |
727 | /* first roa set */ |
728 | pset = &rde_roa; |
729 | memset(&cset, 0, sizeof(cset)); |
730 | cset.type = ROA_SET; |
731 | strlcpy(cset.name, "RPKI ROA", sizeof(cset.name)); |
732 | cset.lastchange = pset->lastchange; |
733 | cset.v4_cnt = pset->th.v4_cnt; |
734 | cset.v6_cnt = pset->th.v6_cnt; |
735 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, |
736 | imsg.hdr.pid, -1, &cset, sizeof(cset)); |
737 | |
738 | /* then aspa set */ |
739 | memset(&cset, 0, sizeof(cset)); |
740 | cset.type = ASPA_SET; |
741 | strlcpy(cset.name, "RPKI ASPA", sizeof(cset.name)); |
742 | aspa_table_stats(rde_aspa, &cset); |
743 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, |
744 | imsg.hdr.pid, -1, &cset, sizeof(cset)); |
745 | |
746 | SIMPLEQ_FOREACH(aset, &conf->as_sets, entry)for((aset) = ((&conf->as_sets)->sqh_first); (aset) != ((void *)0); (aset) = ((aset)->entry.sqe_next)) { |
747 | memset(&cset, 0, sizeof(cset)); |
748 | cset.type = ASNUM_SET; |
749 | strlcpy(cset.name, aset->name, |
750 | sizeof(cset.name)); |
751 | cset.lastchange = aset->lastchange; |
752 | cset.as_cnt = set_nmemb(aset->set); |
753 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, |
754 | imsg.hdr.pid, -1, &cset, sizeof(cset)); |
755 | } |
756 | SIMPLEQ_FOREACH(pset, &conf->rde_prefixsets, entry)for((pset) = ((&conf->rde_prefixsets)->sqh_first); ( pset) != ((void *)0); (pset) = ((pset)->entry.sqe_next)) { |
757 | memset(&cset, 0, sizeof(cset)); |
758 | cset.type = PREFIX_SET; |
759 | strlcpy(cset.name, pset->name, |
760 | sizeof(cset.name)); |
761 | cset.lastchange = pset->lastchange; |
762 | cset.v4_cnt = pset->th.v4_cnt; |
763 | cset.v6_cnt = pset->th.v6_cnt; |
764 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, |
765 | imsg.hdr.pid, -1, &cset, sizeof(cset)); |
766 | } |
767 | SIMPLEQ_FOREACH(pset, &conf->rde_originsets, entry)for((pset) = ((&conf->rde_originsets)->sqh_first); ( pset) != ((void *)0); (pset) = ((pset)->entry.sqe_next)) { |
768 | memset(&cset, 0, sizeof(cset)); |
769 | cset.type = ORIGIN_SET; |
770 | strlcpy(cset.name, pset->name, |
771 | sizeof(cset.name)); |
772 | cset.lastchange = pset->lastchange; |
773 | cset.v4_cnt = pset->th.v4_cnt; |
774 | cset.v6_cnt = pset->th.v6_cnt; |
775 | imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0, |
776 | imsg.hdr.pid, -1, &cset, sizeof(cset)); |
777 | } |
778 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid, |
779 | -1, NULL((void *)0), 0); |
780 | break; |
781 | case IMSG_CTL_LOG_VERBOSE: |
782 | /* already checked by SE */ |
783 | memcpy(&verbose, imsg.data, sizeof(verbose)); |
784 | log_setverbose(verbose); |
785 | break; |
786 | case IMSG_CTL_END: |
787 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid, |
788 | -1, NULL((void *)0), 0); |
789 | break; |
790 | case IMSG_CTL_TERMINATE: |
791 | rde_dump_ctx_terminate(imsg.hdr.pid); |
792 | break; |
793 | case IMSG_XON: |
794 | if (imsg.hdr.peerid) { |
795 | peer = peer_get(imsg.hdr.peerid); |
796 | if (peer) |
797 | peer->throttled = 0; |
798 | } else { |
799 | rde_dump_ctx_throttle(imsg.hdr.pid, 0); |
800 | } |
801 | break; |
802 | case IMSG_XOFF: |
803 | if (imsg.hdr.peerid) { |
804 | peer = peer_get(imsg.hdr.peerid); |
805 | if (peer) |
806 | peer->throttled = 1; |
807 | } else { |
808 | rde_dump_ctx_throttle(imsg.hdr.pid, 1); |
809 | } |
810 | break; |
811 | case IMSG_RECONF_DRAIN: |
812 | imsg_compose(ibuf_se, IMSG_RECONF_DRAIN, 0, 0, |
813 | -1, NULL((void *)0), 0); |
814 | break; |
815 | default: |
816 | break; |
817 | } |
818 | imsg_free(&imsg); |
819 | } |
820 | } |
821 | |
822 | void |
823 | rde_dispatch_imsg_parent(struct imsgbuf *imsgbuf) |
824 | { |
825 | static struct rde_prefixset *last_prefixset; |
826 | static struct as_set *last_as_set; |
827 | static struct l3vpn *vpn; |
828 | static struct flowspec *curflow; |
829 | struct imsg imsg; |
830 | struct mrt xmrt; |
831 | struct roa roa; |
832 | struct rde_rib rr; |
833 | struct filterstate state; |
834 | struct imsgbuf *i; |
835 | struct filter_head *nr; |
836 | struct filter_rule *r; |
837 | struct filter_set *s; |
838 | struct rib *rib; |
839 | struct rde_prefixset *ps; |
840 | struct rde_aspath *asp; |
841 | struct prefixset_item psi; |
842 | char *name; |
843 | size_t nmemb; |
844 | int n, fd, rv; |
845 | uint16_t rid; |
846 | |
847 | while (imsgbuf) { |
848 | if ((n = imsg_get(imsgbuf, &imsg)) == -1) |
849 | fatal("rde_dispatch_imsg_parent: imsg_get error"); |
850 | if (n == 0) |
851 | break; |
852 | |
853 | switch (imsg.hdr.type) { |
854 | case IMSG_SOCKET_CONN: |
855 | case IMSG_SOCKET_CONN_CTL: |
856 | case IMSG_SOCKET_CONN_RTR: |
857 | if ((fd = imsg_get_fd(&imsg)) == -1) { |
858 | log_warnx("expected to receive imsg fd " |
859 | "but didn't receive any"); |
860 | break; |
861 | } |
862 | if ((i = malloc(sizeof(struct imsgbuf))) == NULL((void *)0)) |
863 | fatal(NULL((void *)0)); |
864 | imsg_init(i, fd); |
865 | switch (imsg.hdr.type) { |
866 | case IMSG_SOCKET_CONN: |
867 | if (ibuf_se) { |
868 | log_warnx("Unexpected imsg connection " |
869 | "to SE received"); |
870 | msgbuf_clear(&ibuf_se->w); |
871 | free(ibuf_se); |
872 | } |
873 | ibuf_se = i; |
874 | break; |
875 | case IMSG_SOCKET_CONN_CTL: |
876 | if (ibuf_se_ctl) { |
877 | log_warnx("Unexpected imsg ctl " |
878 | "connection to SE received"); |
879 | msgbuf_clear(&ibuf_se_ctl->w); |
880 | free(ibuf_se_ctl); |
881 | } |
882 | ibuf_se_ctl = i; |
883 | break; |
884 | case IMSG_SOCKET_CONN_RTR: |
885 | if (ibuf_rtr) { |
886 | log_warnx("Unexpected imsg ctl " |
887 | "connection to ROA received"); |
888 | msgbuf_clear(&ibuf_rtr->w); |
889 | free(ibuf_rtr); |
890 | } |
891 | ibuf_rtr = i; |
892 | break; |
893 | } |
894 | break; |
895 | case IMSG_NETWORK_ADD: |
896 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
897 | sizeof(struct network_config)) { |
898 | log_warnx("rde_dispatch: wrong imsg len"); |
899 | break; |
900 | } |
901 | memcpy(&netconf_p, imsg.data, sizeof(netconf_p)); |
902 | TAILQ_INIT(&netconf_p.attrset)do { (&netconf_p.attrset)->tqh_first = ((void *)0); (& netconf_p.attrset)->tqh_last = &(&netconf_p.attrset )->tqh_first; } while (0); |
903 | break; |
904 | case IMSG_NETWORK_DONE: |
905 | TAILQ_CONCAT(&netconf_p.attrset, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void *)0))) { *(&netconf_p.attrset)->tqh_last = (&parent_set) ->tqh_first; (&parent_set)->tqh_first->entry.tqe_prev = (&netconf_p.attrset)->tqh_last; (&netconf_p.attrset )->tqh_last = (&parent_set)->tqh_last; do { ((& parent_set))->tqh_first = ((void *)0); ((&parent_set)) ->tqh_last = &((&parent_set))->tqh_first; } while (0); } } while (0); |
906 | |
907 | rde_filterstate_init(&state); |
908 | asp = &state.aspath; |
909 | asp->aspath = aspath_get(NULL((void *)0), 0); |
910 | asp->origin = ORIGIN_IGP0; |
911 | asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 | |
912 | F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400; |
913 | |
914 | network_add(&netconf_p, &state); |
915 | rde_filterstate_clean(&state); |
916 | break; |
917 | case IMSG_NETWORK_REMOVE: |
918 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
919 | sizeof(struct network_config)) { |
920 | log_warnx("rde_dispatch: wrong imsg len"); |
921 | break; |
922 | } |
923 | memcpy(&netconf_p, imsg.data, sizeof(netconf_p)); |
924 | TAILQ_INIT(&netconf_p.attrset)do { (&netconf_p.attrset)->tqh_first = ((void *)0); (& netconf_p.attrset)->tqh_last = &(&netconf_p.attrset )->tqh_first; } while (0); |
925 | network_delete(&netconf_p); |
926 | break; |
927 | case IMSG_FLOWSPEC_ADD: |
928 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) <= FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data))) { |
929 | log_warnx("rde_dispatch: wrong imsg len"); |
930 | break; |
931 | } |
932 | if (curflow != NULL((void *)0)) { |
933 | log_warnx("rde_dispatch: " |
934 | "unexpected flowspec add"); |
935 | break; |
936 | } |
937 | curflow = malloc(imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
938 | if (curflow == NULL((void *)0)) |
939 | fatal(NULL((void *)0)); |
940 | memcpy(curflow, imsg.data, |
941 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
942 | if (curflow->len + FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data)) != |
943 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
944 | free(curflow); |
945 | curflow = NULL((void *)0); |
946 | log_warnx("rde_dispatch: wrong flowspec len"); |
947 | break; |
948 | } |
949 | break; |
950 | case IMSG_FLOWSPEC_DONE: |
951 | if (curflow == NULL((void *)0)) { |
952 | log_warnx("rde_dispatch: " |
953 | "unexpected flowspec done"); |
954 | break; |
955 | } |
956 | |
957 | rde_filterstate_init(&state); |
958 | asp = &state.aspath; |
959 | asp->aspath = aspath_get(NULL((void *)0), 0); |
960 | asp->origin = ORIGIN_IGP0; |
961 | asp->flags = F_ATTR_ORIGIN0x00001 | F_ATTR_ASPATH0x00002 | |
962 | F_ATTR_LOCALPREF0x00008 | F_PREFIX_ANNOUNCED0x00400; |
963 | |
964 | if (flowspec_valid(curflow->data, curflow->len, |
965 | curflow->aid == AID_FLOWSPECv66) == -1) |
966 | log_warnx("invalid flowspec update received " |
967 | "from parent"); |
968 | else |
969 | flowspec_add(curflow, &state, &parent_set); |
970 | |
971 | rde_filterstate_clean(&state); |
972 | filterset_free(&parent_set); |
973 | free(curflow); |
974 | curflow = NULL((void *)0); |
975 | break; |
976 | case IMSG_FLOWSPEC_REMOVE: |
977 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) <= FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data))) { |
978 | log_warnx("rde_dispatch: wrong imsg len"); |
979 | break; |
980 | } |
981 | if (curflow != NULL((void *)0)) { |
982 | log_warnx("rde_dispatch: " |
983 | "unexpected flowspec remove"); |
984 | break; |
985 | } |
986 | curflow = malloc(imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
987 | if (curflow == NULL((void *)0)) |
988 | fatal(NULL((void *)0)); |
989 | memcpy(curflow, imsg.data, |
990 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)); |
991 | if (curflow->len + FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data)) != |
992 | imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr)) { |
993 | free(curflow); |
994 | curflow = NULL((void *)0); |
995 | log_warnx("rde_dispatch: wrong flowspec len"); |
996 | break; |
997 | } |
998 | |
999 | if (flowspec_valid(curflow->data, curflow->len, |
1000 | curflow->aid == AID_FLOWSPECv66) == -1) |
1001 | log_warnx("invalid flowspec withdraw received " |
1002 | "from parent"); |
1003 | else |
1004 | flowspec_delete(curflow); |
1005 | |
1006 | free(curflow); |
1007 | curflow = NULL((void *)0); |
1008 | break; |
1009 | case IMSG_RECONF_CONF: |
1010 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1011 | sizeof(struct bgpd_config)) |
1012 | fatalx("IMSG_RECONF_CONF bad len"); |
1013 | out_rules_tmp = calloc(1, sizeof(struct filter_head)); |
1014 | if (out_rules_tmp == NULL((void *)0)) |
1015 | fatal(NULL((void *)0)); |
1016 | TAILQ_INIT(out_rules_tmp)do { (out_rules_tmp)->tqh_first = ((void *)0); (out_rules_tmp )->tqh_last = &(out_rules_tmp)->tqh_first; } while ( 0); |
1017 | nconf = new_config(); |
1018 | copy_config(nconf, imsg.data); |
1019 | |
1020 | for (rid = 0; rid < rib_size; rid++) { |
1021 | if ((rib = rib_byid(rid)) == NULL((void *)0)) |
1022 | continue; |
1023 | rib->state = RECONF_DELETE; |
1024 | rib->fibstate = RECONF_NONE; |
1025 | } |
1026 | break; |
1027 | case IMSG_RECONF_RIB: |
1028 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1029 | sizeof(struct rde_rib)) |
1030 | fatalx("IMSG_RECONF_RIB bad len"); |
1031 | memcpy(&rr, imsg.data, sizeof(rr)); |
1032 | rib = rib_byid(rib_find(rr.name)); |
1033 | if (rib == NULL((void *)0)) { |
1034 | rib = rib_new(rr.name, rr.rtableid, rr.flags); |
1035 | } else if (rib->flags == rr.flags && |
1036 | rib->rtableid == rr.rtableid) { |
1037 | /* no change to rib apart from filters */ |
1038 | rib->state = RECONF_KEEP; |
1039 | } else { |
1040 | /* reload rib because something changed */ |
1041 | rib->flags_tmp = rr.flags; |
1042 | rib->rtableid_tmp = rr.rtableid; |
1043 | rib->state = RECONF_RELOAD; |
1044 | } |
1045 | break; |
1046 | case IMSG_RECONF_FILTER: |
1047 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1048 | sizeof(struct filter_rule)) |
1049 | fatalx("IMSG_RECONF_FILTER bad len"); |
1050 | if ((r = malloc(sizeof(struct filter_rule))) == NULL((void *)0)) |
1051 | fatal(NULL((void *)0)); |
1052 | memcpy(r, imsg.data, sizeof(struct filter_rule)); |
1053 | if (r->match.prefixset.name[0] != '\0') { |
1054 | r->match.prefixset.ps = |
1055 | rde_find_prefixset(r->match.prefixset.name, |
1056 | &nconf->rde_prefixsets); |
1057 | if (r->match.prefixset.ps == NULL((void *)0)) |
1058 | log_warnx("%s: no prefixset for %s", |
1059 | __func__, r->match.prefixset.name); |
1060 | } |
1061 | if (r->match.originset.name[0] != '\0') { |
1062 | r->match.originset.ps = |
1063 | rde_find_prefixset(r->match.originset.name, |
1064 | &nconf->rde_originsets); |
1065 | if (r->match.originset.ps == NULL((void *)0)) |
1066 | log_warnx("%s: no origin-set for %s", |
1067 | __func__, r->match.originset.name); |
1068 | } |
1069 | if (r->match.as.flags & AS_FLAG_AS_SET_NAME0x02) { |
1070 | struct as_set * aset; |
1071 | |
1072 | aset = as_sets_lookup(&nconf->as_sets, |
1073 | r->match.as.name); |
1074 | if (aset == NULL((void *)0)) { |
1075 | log_warnx("%s: no as-set for %s", |
1076 | __func__, r->match.as.name); |
1077 | } else { |
1078 | r->match.as.flags = AS_FLAG_AS_SET0x04; |
1079 | r->match.as.aset = aset; |
1080 | } |
1081 | } |
1082 | TAILQ_INIT(&r->set)do { (&r->set)->tqh_first = ((void *)0); (&r-> set)->tqh_last = &(&r->set)->tqh_first; } while (0); |
1083 | TAILQ_CONCAT(&r->set, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void *)0))) { *(&r->set)->tqh_last = (&parent_set)->tqh_first ; (&parent_set)->tqh_first->entry.tqe_prev = (& r->set)->tqh_last; (&r->set)->tqh_last = (& parent_set)->tqh_last; do { ((&parent_set))->tqh_first = ((void *)0); ((&parent_set))->tqh_last = &((& parent_set))->tqh_first; } while (0); } } while (0); |
1084 | if ((rib = rib_byid(rib_find(r->rib))) == NULL((void *)0)) { |
1085 | log_warnx("IMSG_RECONF_FILTER: filter rule " |
1086 | "for nonexistent rib %s", r->rib); |
1087 | filterset_free(&r->set); |
1088 | free(r); |
1089 | break; |
1090 | } |
1091 | r->peer.ribid = rib->id; |
1092 | if (r->dir == DIR_IN) { |
1093 | nr = rib->in_rules_tmp; |
1094 | if (nr == NULL((void *)0)) { |
1095 | nr = calloc(1, |
1096 | sizeof(struct filter_head)); |
1097 | if (nr == NULL((void *)0)) |
1098 | fatal(NULL((void *)0)); |
1099 | TAILQ_INIT(nr)do { (nr)->tqh_first = ((void *)0); (nr)->tqh_last = & (nr)->tqh_first; } while (0); |
1100 | rib->in_rules_tmp = nr; |
1101 | } |
1102 | TAILQ_INSERT_TAIL(nr, r, entry)do { (r)->entry.tqe_next = ((void *)0); (r)->entry.tqe_prev = (nr)->tqh_last; *(nr)->tqh_last = (r); (nr)->tqh_last = &(r)->entry.tqe_next; } while (0); |
1103 | } else { |
1104 | TAILQ_INSERT_TAIL(out_rules_tmp, r, entry)do { (r)->entry.tqe_next = ((void *)0); (r)->entry.tqe_prev = (out_rules_tmp)->tqh_last; *(out_rules_tmp)->tqh_last = (r); (out_rules_tmp)->tqh_last = &(r)->entry.tqe_next ; } while (0); |
1105 | } |
1106 | break; |
1107 | case IMSG_RECONF_PREFIX_SET: |
1108 | case IMSG_RECONF_ORIGIN_SET: |
1109 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1110 | sizeof(ps->name)) |
1111 | fatalx("IMSG_RECONF_PREFIX_SET bad len"); |
1112 | ps = calloc(1, sizeof(struct rde_prefixset)); |
1113 | if (ps == NULL((void *)0)) |
1114 | fatal(NULL((void *)0)); |
1115 | memcpy(ps->name, imsg.data, sizeof(ps->name)); |
1116 | if (imsg.hdr.type == IMSG_RECONF_ORIGIN_SET) { |
1117 | SIMPLEQ_INSERT_TAIL(&nconf->rde_originsets, ps,do { (ps)->entry.sqe_next = ((void *)0); *(&nconf-> rde_originsets)->sqh_last = (ps); (&nconf->rde_originsets )->sqh_last = &(ps)->entry.sqe_next; } while (0) |
1118 | entry)do { (ps)->entry.sqe_next = ((void *)0); *(&nconf-> rde_originsets)->sqh_last = (ps); (&nconf->rde_originsets )->sqh_last = &(ps)->entry.sqe_next; } while (0); |
1119 | } else { |
1120 | SIMPLEQ_INSERT_TAIL(&nconf->rde_prefixsets, ps,do { (ps)->entry.sqe_next = ((void *)0); *(&nconf-> rde_prefixsets)->sqh_last = (ps); (&nconf->rde_prefixsets )->sqh_last = &(ps)->entry.sqe_next; } while (0) |
1121 | entry)do { (ps)->entry.sqe_next = ((void *)0); *(&nconf-> rde_prefixsets)->sqh_last = (ps); (&nconf->rde_prefixsets )->sqh_last = &(ps)->entry.sqe_next; } while (0); |
1122 | } |
1123 | last_prefixset = ps; |
1124 | break; |
1125 | case IMSG_RECONF_ROA_ITEM: |
1126 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(roa)) |
1127 | fatalx("IMSG_RECONF_ROA_ITEM bad len"); |
1128 | memcpy(&roa, imsg.data, sizeof(roa)); |
1129 | rv = trie_roa_add(&last_prefixset->th, &roa); |
Value stored to 'rv' is never read | |
1130 | break; |
1131 | case IMSG_RECONF_PREFIX_SET_ITEM: |
1132 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(psi)) |
1133 | fatalx("IMSG_RECONF_PREFIX_SET_ITEM bad len"); |
1134 | memcpy(&psi, imsg.data, sizeof(psi)); |
1135 | if (last_prefixset == NULL((void *)0)) |
1136 | fatalx("King Bula has no prefixset"); |
1137 | rv = trie_add(&last_prefixset->th, |
1138 | &psi.p.addr, psi.p.len, |
1139 | psi.p.len_min, psi.p.len_max); |
1140 | if (rv == -1) |
1141 | log_warnx("trie_add(%s) %s/%u failed", |
1142 | last_prefixset->name, log_addr(&psi.p.addr), |
1143 | psi.p.len); |
1144 | break; |
1145 | case IMSG_RECONF_AS_SET: |
1146 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1147 | sizeof(nmemb) + SET_NAME_LEN128) |
1148 | fatalx("IMSG_RECONF_AS_SET bad len"); |
1149 | memcpy(&nmemb, imsg.data, sizeof(nmemb)); |
1150 | name = (char *)imsg.data + sizeof(nmemb); |
1151 | if (as_sets_lookup(&nconf->as_sets, name) != NULL((void *)0)) |
1152 | fatalx("duplicate as-set %s", name); |
1153 | last_as_set = as_sets_new(&nconf->as_sets, name, nmemb, |
1154 | sizeof(uint32_t)); |
1155 | break; |
1156 | case IMSG_RECONF_AS_SET_ITEMS: |
1157 | nmemb = imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr); |
1158 | nmemb /= sizeof(uint32_t); |
1159 | if (set_add(last_as_set->set, imsg.data, nmemb) != 0) |
1160 | fatal(NULL((void *)0)); |
1161 | break; |
1162 | case IMSG_RECONF_AS_SET_DONE: |
1163 | set_prep(last_as_set->set); |
1164 | last_as_set = NULL((void *)0); |
1165 | break; |
1166 | case IMSG_RECONF_VPN: |
1167 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1168 | sizeof(struct l3vpn)) |
1169 | fatalx("IMSG_RECONF_VPN bad len"); |
1170 | if ((vpn = malloc(sizeof(struct l3vpn))) == NULL((void *)0)) |
1171 | fatal(NULL((void *)0)); |
1172 | memcpy(vpn, imsg.data, sizeof(struct l3vpn)); |
1173 | TAILQ_INIT(&vpn->import)do { (&vpn->import)->tqh_first = ((void *)0); (& vpn->import)->tqh_last = &(&vpn->import)-> tqh_first; } while (0); |
1174 | TAILQ_INIT(&vpn->export)do { (&vpn->export)->tqh_first = ((void *)0); (& vpn->export)->tqh_last = &(&vpn->export)-> tqh_first; } while (0); |
1175 | TAILQ_INIT(&vpn->net_l)do { (&vpn->net_l)->tqh_first = ((void *)0); (& vpn->net_l)->tqh_last = &(&vpn->net_l)->tqh_first ; } while (0); |
1176 | SIMPLEQ_INSERT_TAIL(&nconf->l3vpns, vpn, entry)do { (vpn)->entry.sqe_next = ((void *)0); *(&nconf-> l3vpns)->sqh_last = (vpn); (&nconf->l3vpns)->sqh_last = &(vpn)->entry.sqe_next; } while (0); |
1177 | break; |
1178 | case IMSG_RECONF_VPN_EXPORT: |
1179 | if (vpn == NULL((void *)0)) { |
1180 | log_warnx("rde_dispatch_imsg_parent: " |
1181 | "IMSG_RECONF_VPN_EXPORT unexpected"); |
1182 | break; |
1183 | } |
1184 | TAILQ_CONCAT(&vpn->export, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void *)0))) { *(&vpn->export)->tqh_last = (&parent_set)-> tqh_first; (&parent_set)->tqh_first->entry.tqe_prev = (&vpn->export)->tqh_last; (&vpn->export)-> tqh_last = (&parent_set)->tqh_last; do { ((&parent_set ))->tqh_first = ((void *)0); ((&parent_set))->tqh_last = &((&parent_set))->tqh_first; } while (0); } } while (0); |
1185 | break; |
1186 | case IMSG_RECONF_VPN_IMPORT: |
1187 | if (vpn == NULL((void *)0)) { |
1188 | log_warnx("rde_dispatch_imsg_parent: " |
1189 | "IMSG_RECONF_VPN_IMPORT unexpected"); |
1190 | break; |
1191 | } |
1192 | TAILQ_CONCAT(&vpn->import, &parent_set, entry)do { if (!(((&parent_set)->tqh_first) == ((void *)0))) { *(&vpn->import)->tqh_last = (&parent_set)-> tqh_first; (&parent_set)->tqh_first->entry.tqe_prev = (&vpn->import)->tqh_last; (&vpn->import)-> tqh_last = (&parent_set)->tqh_last; do { ((&parent_set ))->tqh_first = ((void *)0); ((&parent_set))->tqh_last = &((&parent_set))->tqh_first; } while (0); } } while (0); |
1193 | break; |
1194 | case IMSG_RECONF_VPN_DONE: |
1195 | break; |
1196 | case IMSG_RECONF_DRAIN: |
1197 | imsg_compose(ibuf_main, IMSG_RECONF_DRAIN, 0, 0, |
1198 | -1, NULL((void *)0), 0); |
1199 | break; |
1200 | case IMSG_RECONF_DONE: |
1201 | if (nconf == NULL((void *)0)) |
1202 | fatalx("got IMSG_RECONF_DONE but no config"); |
1203 | last_prefixset = NULL((void *)0); |
1204 | |
1205 | rde_reload_done(); |
1206 | break; |
1207 | case IMSG_NEXTHOP_UPDATE: |
1208 | nexthop_update(imsg.data); |
1209 | break; |
1210 | case IMSG_FILTER_SET: |
1211 | if (imsg.hdr.len > IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + |
1212 | sizeof(struct filter_set)) |
1213 | fatalx("IMSG_FILTER_SET bad len"); |
1214 | if ((s = malloc(sizeof(struct filter_set))) == NULL((void *)0)) |
1215 | fatal(NULL((void *)0)); |
1216 | memcpy(s, imsg.data, sizeof(struct filter_set)); |
1217 | if (s->type == ACTION_SET_NEXTHOP) { |
1218 | s->action.nh_ref = |
1219 | nexthop_get(&s->action.nexthop); |
1220 | s->type = ACTION_SET_NEXTHOP_REF; |
1221 | } |
1222 | TAILQ_INSERT_TAIL(&parent_set, s, entry)do { (s)->entry.tqe_next = ((void *)0); (s)->entry.tqe_prev = (&parent_set)->tqh_last; *(&parent_set)->tqh_last = (s); (&parent_set)->tqh_last = &(s)->entry.tqe_next ; } while (0); |
1223 | break; |
1224 | case IMSG_MRT_OPEN: |
1225 | case IMSG_MRT_REOPEN: |
1226 | if (imsg.hdr.len > IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + |
1227 | sizeof(struct mrt)) { |
1228 | log_warnx("wrong imsg len"); |
1229 | break; |
1230 | } |
1231 | memcpy(&xmrt, imsg.data, sizeof(xmrt)); |
1232 | if ((fd = imsg_get_fd(&imsg)) == -1) |
1233 | log_warnx("expected to receive fd for mrt dump " |
1234 | "but didn't receive any"); |
1235 | else if (xmrt.type == MRT_TABLE_DUMP || |
1236 | xmrt.type == MRT_TABLE_DUMP_MP || |
1237 | xmrt.type == MRT_TABLE_DUMP_V2) { |
1238 | rde_dump_mrt_new(&xmrt, imsg.hdr.pid, fd); |
1239 | } else |
1240 | close(fd); |
1241 | break; |
1242 | case IMSG_MRT_CLOSE: |
1243 | /* ignore end message because a dump is atomic */ |
1244 | break; |
1245 | default: |
1246 | fatalx("unhandled IMSG %u", imsg.hdr.type); |
1247 | } |
1248 | imsg_free(&imsg); |
1249 | } |
1250 | } |
1251 | |
1252 | void |
1253 | rde_dispatch_imsg_rtr(struct imsgbuf *imsgbuf) |
1254 | { |
1255 | static struct aspa_set *aspa; |
1256 | struct imsg imsg; |
1257 | struct roa roa; |
1258 | struct aspa_prep ap; |
1259 | int n; |
1260 | |
1261 | while (imsgbuf) { |
1262 | if ((n = imsg_get(imsgbuf, &imsg)) == -1) |
1263 | fatal("rde_dispatch_imsg_parent: imsg_get error"); |
1264 | if (n == 0) |
1265 | break; |
1266 | |
1267 | switch (imsg.hdr.type) { |
1268 | case IMSG_RECONF_ROA_SET: |
1269 | /* start of update */ |
1270 | trie_free(&roa_new.th); /* clear new roa */ |
1271 | break; |
1272 | case IMSG_RECONF_ROA_ITEM: |
1273 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1274 | sizeof(roa)) |
1275 | fatalx("IMSG_RECONF_ROA_ITEM bad len"); |
1276 | memcpy(&roa, imsg.data, sizeof(roa)); |
1277 | if (trie_roa_add(&roa_new.th, &roa) != 0) { |
1278 | struct bgpd_addr p = { |
1279 | .aid = roa.aid, |
1280 | .v6ba.v6 = roa.prefix.inet6 |
1281 | }; |
1282 | log_warnx("trie_roa_add %s/%u failed", |
1283 | log_addr(&p), roa.prefixlen); |
1284 | } |
1285 | break; |
1286 | case IMSG_RECONF_ASPA_PREP: |
1287 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(ap)) |
1288 | fatalx("IMSG_RECONF_ASPA_PREP bad len"); |
1289 | if (aspa_new) |
1290 | fatalx("unexpected IMSG_RECONF_ASPA_PREP"); |
1291 | memcpy(&ap, imsg.data, sizeof(ap)); |
1292 | aspa_new = aspa_table_prep(ap.entries, ap.datasize); |
1293 | break; |
1294 | case IMSG_RECONF_ASPA: |
1295 | if (aspa_new == NULL((void *)0)) |
1296 | fatalx("unexpected IMSG_RECONF_ASPA"); |
1297 | if (aspa != NULL((void *)0)) |
1298 | fatalx("IMSG_RECONF_ASPA already sent"); |
1299 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1300 | sizeof(uint32_t) * 2) |
1301 | fatalx("IMSG_RECONF_ASPA bad len"); |
1302 | |
1303 | if ((aspa = calloc(1, sizeof(*aspa))) == NULL((void *)0)) |
1304 | fatal("IMSG_RECONF_ASPA"); |
1305 | memcpy(&aspa->as, imsg.data, sizeof(aspa->as)); |
1306 | memcpy(&aspa->num, (char *)imsg.data + sizeof(aspa->as), |
1307 | sizeof(aspa->num)); |
1308 | break; |
1309 | case IMSG_RECONF_ASPA_TAS: |
1310 | if (aspa == NULL((void *)0)) |
1311 | fatalx("unexpected IMSG_RECONF_ASPA_TAS"); |
1312 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != |
1313 | aspa->num * sizeof(uint32_t)) |
1314 | fatalx("IMSG_RECONF_ASPA_TAS bad len"); |
1315 | aspa->tas = reallocarray(NULL((void *)0), aspa->num, |
1316 | sizeof(uint32_t)); |
1317 | if (aspa->tas == NULL((void *)0)) |
1318 | fatal("IMSG_RECONF_ASPA_TAS"); |
1319 | memcpy(aspa->tas, imsg.data, |
1320 | aspa->num * sizeof(uint32_t)); |
1321 | break; |
1322 | case IMSG_RECONF_ASPA_DONE: |
1323 | if (aspa_new == NULL((void *)0)) |
1324 | fatalx("unexpected IMSG_RECONF_ASPA"); |
1325 | aspa_add_set(aspa_new, aspa->as, aspa->tas, |
1326 | aspa->num); |
1327 | free_aspa(aspa); |
1328 | aspa = NULL((void *)0); |
1329 | break; |
1330 | case IMSG_RECONF_DONE: |
1331 | /* end of update */ |
1332 | if (rde_roa_reload() + rde_aspa_reload() != 0) |
1333 | rde_rpki_reload(); |
1334 | break; |
1335 | } |
1336 | imsg_free(&imsg); |
1337 | } |
1338 | } |
1339 | |
1340 | void |
1341 | rde_dispatch_imsg_peer(struct rde_peer *peer, void *bula) |
1342 | { |
1343 | struct route_refresh rr; |
1344 | struct imsg imsg; |
1345 | |
1346 | if (!peer_imsg_pop(peer, &imsg)) |
1347 | return; |
1348 | |
1349 | switch (imsg.hdr.type) { |
1350 | case IMSG_UPDATE: |
1351 | if (peer->state != PEER_UP) |
1352 | break; |
1353 | rde_update_dispatch(peer, &imsg); |
1354 | break; |
1355 | case IMSG_REFRESH: |
1356 | if (imsg.hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) != sizeof(rr)) { |
1357 | log_warnx("route refresh: wrong imsg len"); |
1358 | break; |
1359 | } |
1360 | memcpy(&rr, imsg.data, sizeof(rr)); |
1361 | if (rr.aid >= AID_MAX7) { |
1362 | log_peer_warnx(&peer->conf, |
1363 | "route refresh: bad AID %d", rr.aid); |
1364 | break; |
1365 | } |
1366 | if (peer->capa.mp[rr.aid] == 0) { |
1367 | log_peer_warnx(&peer->conf, |
1368 | "route refresh: AID %s not negotiated", |
1369 | aid2str(rr.aid)); |
1370 | break; |
1371 | } |
1372 | switch (rr.subtype) { |
1373 | case ROUTE_REFRESH_REQUEST0: |
1374 | peer_dump(peer, rr.aid); |
1375 | break; |
1376 | case ROUTE_REFRESH_BEGIN_RR1: |
1377 | /* check if graceful restart EOR was received */ |
1378 | if ((peer->recv_eor & (1 << rr.aid)) == 0) { |
1379 | log_peer_warnx(&peer->conf, |
1380 | "received %s BoRR before EoR", |
1381 | aid2str(rr.aid)); |
1382 | break; |
1383 | } |
1384 | peer_begin_rrefresh(peer, rr.aid); |
1385 | break; |
1386 | case ROUTE_REFRESH_END_RR2: |
1387 | if ((peer->recv_eor & (1 << rr.aid)) != 0 && |
1388 | peer->staletime[rr.aid]) |
1389 | peer_flush(peer, rr.aid, |
1390 | peer->staletime[rr.aid]); |
1391 | else |
1392 | log_peer_warnx(&peer->conf, |
1393 | "received unexpected %s EoRR", |
1394 | aid2str(rr.aid)); |
1395 | break; |
1396 | default: |
1397 | log_peer_warnx(&peer->conf, |
1398 | "route refresh: bad subtype %d", rr.subtype); |
1399 | break; |
1400 | } |
1401 | break; |
1402 | default: |
1403 | log_warnx("%s: unhandled imsg type %d", __func__, |
1404 | imsg.hdr.type); |
1405 | break; |
1406 | } |
1407 | |
1408 | imsg_free(&imsg); |
1409 | } |
1410 | |
1411 | /* handle routing updates from the session engine. */ |
1412 | void |
1413 | rde_update_dispatch(struct rde_peer *peer, struct imsg *imsg) |
1414 | { |
1415 | struct filterstate state; |
1416 | struct bgpd_addr prefix; |
1417 | struct mpattr mpa; |
1418 | u_char *p, *mpp = NULL((void *)0); |
1419 | int pos = 0; |
1420 | uint16_t afi, len, mplen; |
1421 | uint16_t withdrawn_len; |
1422 | uint16_t attrpath_len; |
1423 | uint16_t nlri_len; |
1424 | uint8_t aid, prefixlen, safi, subtype; |
1425 | uint32_t fas, pathid; |
1426 | |
1427 | p = imsg->data; |
1428 | |
1429 | if (imsg->hdr.len < IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2) { |
1430 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1431 | return; |
1432 | } |
1433 | |
1434 | memcpy(&len, p, 2); |
1435 | withdrawn_len = ntohs(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t )(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U ) >> 8) : __swap16md(len)); |
1436 | p += 2; |
1437 | if (imsg->hdr.len < IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2 + withdrawn_len + 2) { |
1438 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1439 | return; |
1440 | } |
1441 | |
1442 | p += withdrawn_len; |
1443 | memcpy(&len, p, 2); |
1444 | attrpath_len = len = ntohs(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t )(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U ) >> 8) : __swap16md(len)); |
1445 | p += 2; |
1446 | if (imsg->hdr.len < |
1447 | IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + 2 + withdrawn_len + 2 + attrpath_len) { |
1448 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1449 | return; |
1450 | } |
1451 | |
1452 | nlri_len = |
1453 | imsg->hdr.len - IMSG_HEADER_SIZEsizeof(struct imsg_hdr) - 4 - withdrawn_len - attrpath_len; |
1454 | |
1455 | if (attrpath_len == 0) { |
1456 | /* 0 = no NLRI information in this message */ |
1457 | if (nlri_len != 0) { |
1458 | /* crap at end of update which should not be there */ |
1459 | rde_update_err(peer, ERR_UPDATE, |
1460 | ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1461 | return; |
1462 | } |
1463 | if (withdrawn_len == 0) { |
1464 | /* EoR marker */ |
1465 | rde_peer_recv_eor(peer, AID_INET1); |
1466 | return; |
1467 | } |
1468 | } |
1469 | |
1470 | memset(&mpa, 0, sizeof(mpa)); |
1471 | rde_filterstate_init(&state); |
1472 | if (attrpath_len != 0) { /* 0 = no NLRI information in this message */ |
1473 | /* parse path attributes */ |
1474 | while (len > 0) { |
1475 | if ((pos = rde_attr_parse(p, len, peer, &state, |
1476 | &mpa)) < 0) |
1477 | goto done; |
1478 | p += pos; |
1479 | len -= pos; |
1480 | } |
1481 | |
1482 | /* check for missing but necessary attributes */ |
1483 | if ((subtype = rde_attr_missing(&state.aspath, peer->conf.ebgp, |
1484 | nlri_len))) { |
1485 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_MISSNG_WK_ATTR, |
1486 | &subtype, sizeof(uint8_t)); |
1487 | goto done; |
1488 | } |
1489 | |
1490 | rde_as4byte_fixup(peer, &state.aspath); |
1491 | |
1492 | /* enforce remote AS if requested */ |
1493 | if (state.aspath.flags & F_ATTR_ASPATH0x00002 && |
1494 | peer->conf.enforce_as == ENFORCE_AS_ON) { |
1495 | fas = aspath_neighbor(state.aspath.aspath); |
1496 | if (peer->conf.remote_as != fas) { |
1497 | log_peer_warnx(&peer->conf, "bad path, " |
1498 | "starting with %s, " |
1499 | "enforce neighbor-as enabled", log_as(fas)); |
1500 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH, |
1501 | NULL((void *)0), 0); |
1502 | goto done; |
1503 | } |
1504 | } |
1505 | |
1506 | /* aspath needs to be loop free. This is not a hard error. */ |
1507 | if (state.aspath.flags & F_ATTR_ASPATH0x00002 && |
1508 | peer->conf.ebgp && |
1509 | peer->conf.enforce_local_as == ENFORCE_AS_ON && |
1510 | !aspath_loopfree(state.aspath.aspath, peer->conf.local_as)) |
1511 | state.aspath.flags |= F_ATTR_LOOP0x00200; |
1512 | |
1513 | rde_reflector(peer, &state.aspath); |
1514 | |
1515 | /* Cache aspa lookup for all updates from ebgp sessions. */ |
1516 | if (state.aspath.flags & F_ATTR_ASPATH0x00002 && peer->conf.ebgp) { |
1517 | aspa_validation(rde_aspa, state.aspath.aspath, |
1518 | &state.aspath.aspa_state); |
1519 | state.aspath.aspa_generation = rde_aspa_generation; |
1520 | } |
1521 | } |
1522 | |
1523 | p = imsg->data; |
1524 | len = withdrawn_len; |
1525 | p += 2; |
1526 | |
1527 | /* withdraw prefix */ |
1528 | if (len > 0) { |
1529 | if (peer->capa.mp[AID_INET1] == 0) { |
1530 | log_peer_warnx(&peer->conf, |
1531 | "bad withdraw, %s disabled", aid2str(AID_INET1)); |
1532 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1533 | NULL((void *)0), 0); |
1534 | goto done; |
1535 | } |
1536 | } |
1537 | while (len > 0) { |
1538 | if (peer_has_add_path(peer, AID_INET1, CAPA_AP_RECV0x01)) { |
1539 | if (len <= sizeof(pathid)) { |
1540 | log_peer_warnx(&peer->conf, |
1541 | "bad withdraw prefix"); |
1542 | rde_update_err(peer, ERR_UPDATE, |
1543 | ERR_UPD_NETWORK, NULL((void *)0), 0); |
1544 | goto done; |
1545 | } |
1546 | memcpy(&pathid, p, sizeof(pathid)); |
1547 | pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t )(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) & 0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >> 8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md (pathid)); |
1548 | p += sizeof(pathid); |
1549 | len -= sizeof(pathid); |
1550 | } else |
1551 | pathid = 0; |
1552 | |
1553 | if ((pos = nlri_get_prefix(p, len, &prefix, |
1554 | &prefixlen)) == -1) { |
1555 | /* |
1556 | * the RFC does not mention what we should do in |
1557 | * this case. Let's do the same as in the NLRI case. |
1558 | */ |
1559 | log_peer_warnx(&peer->conf, "bad withdraw prefix"); |
1560 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK, |
1561 | NULL((void *)0), 0); |
1562 | goto done; |
1563 | } |
1564 | p += pos; |
1565 | len -= pos; |
1566 | |
1567 | rde_update_withdraw(peer, pathid, &prefix, prefixlen); |
1568 | } |
1569 | |
1570 | /* withdraw MP_UNREACH_NLRI if available */ |
1571 | if (mpa.unreach_len != 0) { |
1572 | mpp = mpa.unreach; |
1573 | mplen = mpa.unreach_len; |
1574 | memcpy(&afi, mpp, 2); |
1575 | mpp += 2; |
1576 | mplen -= 2; |
1577 | afi = ntohs(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t )(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U ) >> 8) : __swap16md(afi)); |
1578 | safi = *mpp++; |
1579 | mplen--; |
1580 | |
1581 | if (afi2aid(afi, safi, &aid) == -1) { |
1582 | log_peer_warnx(&peer->conf, |
1583 | "bad AFI/SAFI pair in withdraw"); |
1584 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1585 | NULL((void *)0), 0); |
1586 | goto done; |
1587 | } |
1588 | |
1589 | if (peer->capa.mp[aid] == 0) { |
1590 | log_peer_warnx(&peer->conf, |
1591 | "bad withdraw, %s disabled", aid2str(aid)); |
1592 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1593 | NULL((void *)0), 0); |
1594 | goto done; |
1595 | } |
1596 | |
1597 | if ((state.aspath.flags & ~F_ATTR_MP_UNREACH0x00080) == 0 && |
1598 | mplen == 0) { |
1599 | /* EoR marker */ |
1600 | rde_peer_recv_eor(peer, aid); |
1601 | } |
1602 | |
1603 | while (mplen > 0) { |
1604 | if (peer_has_add_path(peer, aid, CAPA_AP_RECV0x01)) { |
1605 | if (mplen <= sizeof(pathid)) { |
1606 | log_peer_warnx(&peer->conf, |
1607 | "bad %s withdraw prefix", |
1608 | aid2str(aid)); |
1609 | rde_update_err(peer, ERR_UPDATE, |
1610 | ERR_UPD_OPTATTR, |
1611 | mpa.unreach, mpa.unreach_len); |
1612 | goto done; |
1613 | } |
1614 | memcpy(&pathid, mpp, sizeof(pathid)); |
1615 | pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t )(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) & 0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >> 8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md (pathid)); |
1616 | mpp += sizeof(pathid); |
1617 | mplen -= sizeof(pathid); |
1618 | } else |
1619 | pathid = 0; |
1620 | |
1621 | switch (aid) { |
1622 | case AID_INET62: |
1623 | if ((pos = nlri_get_prefix6(mpp, mplen, |
1624 | &prefix, &prefixlen)) == -1) { |
1625 | log_peer_warnx(&peer->conf, |
1626 | "bad IPv6 withdraw prefix"); |
1627 | rde_update_err(peer, ERR_UPDATE, |
1628 | ERR_UPD_OPTATTR, |
1629 | mpa.unreach, mpa.unreach_len); |
1630 | goto done; |
1631 | } |
1632 | break; |
1633 | case AID_VPN_IPv43: |
1634 | if ((pos = nlri_get_vpn4(mpp, mplen, |
1635 | &prefix, &prefixlen, 1)) == -1) { |
1636 | log_peer_warnx(&peer->conf, |
1637 | "bad VPNv4 withdraw prefix"); |
1638 | rde_update_err(peer, ERR_UPDATE, |
1639 | ERR_UPD_OPTATTR, |
1640 | mpa.unreach, mpa.unreach_len); |
1641 | goto done; |
1642 | } |
1643 | break; |
1644 | case AID_VPN_IPv64: |
1645 | if ((pos = nlri_get_vpn6(mpp, mplen, |
1646 | &prefix, &prefixlen, 1)) == -1) { |
1647 | log_peer_warnx(&peer->conf, |
1648 | "bad VPNv6 withdraw prefix"); |
1649 | rde_update_err(peer, ERR_UPDATE, |
1650 | ERR_UPD_OPTATTR, mpa.unreach, |
1651 | mpa.unreach_len); |
1652 | goto done; |
1653 | } |
1654 | break; |
1655 | case AID_FLOWSPECv45: |
1656 | case AID_FLOWSPECv66: |
1657 | /* ignore flowspec for now */ |
1658 | default: |
1659 | /* ignore unsupported multiprotocol AF */ |
1660 | mpp += mplen; |
1661 | mplen = 0; |
1662 | continue; |
1663 | } |
1664 | |
1665 | mpp += pos; |
1666 | mplen -= pos; |
1667 | |
1668 | rde_update_withdraw(peer, pathid, &prefix, prefixlen); |
1669 | } |
1670 | |
1671 | if ((state.aspath.flags & ~F_ATTR_MP_UNREACH0x00080) == 0) |
1672 | goto done; |
1673 | } |
1674 | |
1675 | /* shift to NLRI information */ |
1676 | p += 2 + attrpath_len; |
1677 | |
1678 | /* parse nlri prefix */ |
1679 | if (nlri_len > 0) { |
1680 | if (peer->capa.mp[AID_INET1] == 0) { |
1681 | log_peer_warnx(&peer->conf, |
1682 | "bad update, %s disabled", aid2str(AID_INET1)); |
1683 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1684 | NULL((void *)0), 0); |
1685 | goto done; |
1686 | } |
1687 | |
1688 | /* inject open policy OTC attribute if needed */ |
1689 | if ((state.aspath.flags & F_ATTR_OTC0x01000) == 0) { |
1690 | uint32_t tmp; |
1691 | switch (peer->role) { |
1692 | case ROLE_CUSTOMER: |
1693 | case ROLE_RS_CLIENT: |
1694 | case ROLE_PEER: |
1695 | tmp = htonl(peer->conf.remote_as)(__uint32_t)(__builtin_constant_p(peer->conf.remote_as) ? ( __uint32_t)(((__uint32_t)(peer->conf.remote_as) & 0xff ) << 24 | ((__uint32_t)(peer->conf.remote_as) & 0xff00 ) << 8 | ((__uint32_t)(peer->conf.remote_as) & 0xff0000 ) >> 8 | ((__uint32_t)(peer->conf.remote_as) & 0xff000000 ) >> 24) : __swap32md(peer->conf.remote_as)); |
1696 | if (attr_optadd(&state.aspath, |
1697 | ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, ATTR_OTC, |
1698 | &tmp, sizeof(tmp)) == -1) { |
1699 | rde_update_err(peer, ERR_UPDATE, |
1700 | ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1701 | goto done; |
1702 | } |
1703 | state.aspath.flags |= F_ATTR_OTC0x01000; |
1704 | break; |
1705 | default: |
1706 | break; |
1707 | } |
1708 | } |
1709 | } |
1710 | while (nlri_len > 0) { |
1711 | if (peer_has_add_path(peer, AID_INET1, CAPA_AP_RECV0x01)) { |
1712 | if (nlri_len <= sizeof(pathid)) { |
1713 | log_peer_warnx(&peer->conf, |
1714 | "bad nlri prefix"); |
1715 | rde_update_err(peer, ERR_UPDATE, |
1716 | ERR_UPD_NETWORK, NULL((void *)0), 0); |
1717 | goto done; |
1718 | } |
1719 | memcpy(&pathid, p, sizeof(pathid)); |
1720 | pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t )(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) & 0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >> 8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md (pathid)); |
1721 | p += sizeof(pathid); |
1722 | nlri_len -= sizeof(pathid); |
1723 | } else |
1724 | pathid = 0; |
1725 | |
1726 | if ((pos = nlri_get_prefix(p, nlri_len, &prefix, |
1727 | &prefixlen)) == -1) { |
1728 | log_peer_warnx(&peer->conf, "bad nlri prefix"); |
1729 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK, |
1730 | NULL((void *)0), 0); |
1731 | goto done; |
1732 | } |
1733 | p += pos; |
1734 | nlri_len -= pos; |
1735 | |
1736 | if (rde_update_update(peer, pathid, &state, |
1737 | &prefix, prefixlen) == -1) |
1738 | goto done; |
1739 | |
1740 | } |
1741 | |
1742 | /* add MP_REACH_NLRI if available */ |
1743 | if (mpa.reach_len != 0) { |
1744 | mpp = mpa.reach; |
1745 | mplen = mpa.reach_len; |
1746 | memcpy(&afi, mpp, 2); |
1747 | mpp += 2; |
1748 | mplen -= 2; |
1749 | afi = ntohs(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t )(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U ) >> 8) : __swap16md(afi)); |
1750 | safi = *mpp++; |
1751 | mplen--; |
1752 | |
1753 | if (afi2aid(afi, safi, &aid) == -1) { |
1754 | log_peer_warnx(&peer->conf, |
1755 | "bad AFI/SAFI pair in update"); |
1756 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1757 | NULL((void *)0), 0); |
1758 | goto done; |
1759 | } |
1760 | |
1761 | if (peer->capa.mp[aid] == 0) { |
1762 | log_peer_warnx(&peer->conf, |
1763 | "bad update, %s disabled", aid2str(aid)); |
1764 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1765 | NULL((void *)0), 0); |
1766 | goto done; |
1767 | } |
1768 | |
1769 | if (aid == AID_INET62) { |
1770 | /* inject open policy OTC attribute if needed */ |
1771 | if ((state.aspath.flags & F_ATTR_OTC0x01000) == 0) { |
1772 | uint32_t tmp; |
1773 | switch (peer->role) { |
1774 | case ROLE_CUSTOMER: |
1775 | case ROLE_RS_CLIENT: |
1776 | case ROLE_PEER: |
1777 | tmp = htonl(peer->conf.remote_as)(__uint32_t)(__builtin_constant_p(peer->conf.remote_as) ? ( __uint32_t)(((__uint32_t)(peer->conf.remote_as) & 0xff ) << 24 | ((__uint32_t)(peer->conf.remote_as) & 0xff00 ) << 8 | ((__uint32_t)(peer->conf.remote_as) & 0xff0000 ) >> 8 | ((__uint32_t)(peer->conf.remote_as) & 0xff000000 ) >> 24) : __swap32md(peer->conf.remote_as)); |
1778 | if (attr_optadd(&state.aspath, |
1779 | ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
1780 | ATTR_OTC, &tmp, |
1781 | sizeof(tmp)) == -1) { |
1782 | rde_update_err(peer, ERR_UPDATE, |
1783 | ERR_UPD_ATTRLIST, NULL((void *)0), 0); |
1784 | goto done; |
1785 | } |
1786 | state.aspath.flags |= F_ATTR_OTC0x01000; |
1787 | break; |
1788 | default: |
1789 | break; |
1790 | } |
1791 | } |
1792 | } else { |
1793 | /* Only IPv4 and IPv6 unicast do OTC handling */ |
1794 | state.aspath.flags &= ~F_ATTR_OTC_LEAK0x02000; |
1795 | } |
1796 | |
1797 | /* unlock the previously locked nexthop, it is no longer used */ |
1798 | nexthop_unref(state.nexthop); |
1799 | state.nexthop = NULL((void *)0); |
1800 | if ((pos = rde_get_mp_nexthop(mpp, mplen, aid, peer, |
1801 | &state)) == -1) { |
1802 | log_peer_warnx(&peer->conf, "bad nlri nexthop"); |
1803 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR, |
1804 | mpa.reach, mpa.reach_len); |
1805 | goto done; |
1806 | } |
1807 | mpp += pos; |
1808 | mplen -= pos; |
1809 | |
1810 | while (mplen > 0) { |
1811 | if (peer_has_add_path(peer, aid, CAPA_AP_RECV0x01)) { |
1812 | if (mplen <= sizeof(pathid)) { |
1813 | log_peer_warnx(&peer->conf, |
1814 | "bad %s nlri prefix", aid2str(aid)); |
1815 | rde_update_err(peer, ERR_UPDATE, |
1816 | ERR_UPD_OPTATTR, |
1817 | mpa.reach, mpa.reach_len); |
1818 | goto done; |
1819 | } |
1820 | memcpy(&pathid, mpp, sizeof(pathid)); |
1821 | pathid = ntohl(pathid)(__uint32_t)(__builtin_constant_p(pathid) ? (__uint32_t)(((__uint32_t )(pathid) & 0xff) << 24 | ((__uint32_t)(pathid) & 0xff00) << 8 | ((__uint32_t)(pathid) & 0xff0000) >> 8 | ((__uint32_t)(pathid) & 0xff000000) >> 24) : __swap32md (pathid)); |
1822 | mpp += sizeof(pathid); |
1823 | mplen -= sizeof(pathid); |
1824 | } else |
1825 | pathid = 0; |
1826 | |
1827 | switch (aid) { |
1828 | case AID_INET62: |
1829 | if ((pos = nlri_get_prefix6(mpp, mplen, |
1830 | &prefix, &prefixlen)) == -1) { |
1831 | log_peer_warnx(&peer->conf, |
1832 | "bad IPv6 nlri prefix"); |
1833 | rde_update_err(peer, ERR_UPDATE, |
1834 | ERR_UPD_OPTATTR, |
1835 | mpa.reach, mpa.reach_len); |
1836 | goto done; |
1837 | } |
1838 | break; |
1839 | case AID_VPN_IPv43: |
1840 | if ((pos = nlri_get_vpn4(mpp, mplen, |
1841 | &prefix, &prefixlen, 0)) == -1) { |
1842 | log_peer_warnx(&peer->conf, |
1843 | "bad VPNv4 nlri prefix"); |
1844 | rde_update_err(peer, ERR_UPDATE, |
1845 | ERR_UPD_OPTATTR, |
1846 | mpa.reach, mpa.reach_len); |
1847 | goto done; |
1848 | } |
1849 | break; |
1850 | case AID_VPN_IPv64: |
1851 | if ((pos = nlri_get_vpn6(mpp, mplen, |
1852 | &prefix, &prefixlen, 0)) == -1) { |
1853 | log_peer_warnx(&peer->conf, |
1854 | "bad VPNv6 nlri prefix"); |
1855 | rde_update_err(peer, ERR_UPDATE, |
1856 | ERR_UPD_OPTATTR, |
1857 | mpa.reach, mpa.reach_len); |
1858 | goto done; |
1859 | } |
1860 | break; |
1861 | case AID_FLOWSPECv45: |
1862 | case AID_FLOWSPECv66: |
1863 | /* ignore flowspec for now */ |
1864 | default: |
1865 | /* ignore unsupported multiprotocol AF */ |
1866 | mpp += mplen; |
1867 | mplen = 0; |
1868 | continue; |
1869 | } |
1870 | |
1871 | mpp += pos; |
1872 | mplen -= pos; |
1873 | |
1874 | if (rde_update_update(peer, pathid, &state, |
1875 | &prefix, prefixlen) == -1) |
1876 | goto done; |
1877 | } |
1878 | } |
1879 | |
1880 | done: |
1881 | rde_filterstate_clean(&state); |
1882 | } |
1883 | |
1884 | /* |
1885 | * Check if path_id is already in use. |
1886 | */ |
1887 | static int |
1888 | pathid_conflict(struct rib_entry *re, uint32_t pathid) |
1889 | { |
1890 | struct prefix *p; |
1891 | |
1892 | if (re == NULL((void *)0)) |
1893 | return 0; |
1894 | |
1895 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) |
1896 | if (p->path_id_tx == pathid) |
1897 | return 1; |
1898 | return 0; |
1899 | } |
1900 | |
1901 | /* |
1902 | * Assign a send side path_id to all paths. |
1903 | */ |
1904 | static uint32_t |
1905 | pathid_assign(struct rde_peer *peer, uint32_t path_id, |
1906 | struct bgpd_addr *prefix, uint8_t prefixlen) |
1907 | { |
1908 | struct rib_entry *re; |
1909 | uint32_t path_id_tx; |
1910 | |
1911 | /* If peer has no add-path use the per peer path_id */ |
1912 | if (!peer_has_add_path(peer, prefix->aid, CAPA_AP_RECV0x01)) |
1913 | return peer->path_id_tx; |
1914 | |
1915 | /* peer uses add-path, therefore new path_ids need to be assigned */ |
1916 | re = rib_get_addr(rib_byid(RIB_ADJ_IN0), prefix, prefixlen); |
1917 | if (re != NULL((void *)0)) { |
1918 | struct prefix *p; |
1919 | |
1920 | p = prefix_bypeer(re, peer, path_id); |
1921 | if (p != NULL((void *)0)) |
1922 | return p->path_id_tx; |
1923 | } |
1924 | |
1925 | /* |
1926 | * Assign new local path_id, must be an odd number. |
1927 | * Even numbers are used by the per peer path_id_tx. |
1928 | */ |
1929 | do { |
1930 | path_id_tx = arc4random() | 1; |
1931 | } while (pathid_conflict(re, path_id_tx)); |
1932 | |
1933 | return path_id_tx; |
1934 | } |
1935 | |
1936 | int |
1937 | rde_update_update(struct rde_peer *peer, uint32_t path_id, |
1938 | struct filterstate *in, struct bgpd_addr *prefix, uint8_t prefixlen) |
1939 | { |
1940 | struct filterstate state; |
1941 | enum filter_actions action; |
1942 | uint32_t path_id_tx; |
1943 | uint16_t i; |
1944 | uint8_t roa_state, aspa_state; |
1945 | const char *wmsg = "filtered, withdraw"; |
1946 | |
1947 | peer->stats.prefix_rcvd_update++; |
1948 | |
1949 | roa_state = rde_roa_validity(&rde_roa, prefix, prefixlen, |
1950 | aspath_origin(in->aspath.aspath)); |
1951 | aspa_state = rde_aspa_validity(peer, &in->aspath, prefix->aid); |
1952 | rde_filterstate_set_vstate(in, roa_state, aspa_state); |
1953 | |
1954 | path_id_tx = pathid_assign(peer, path_id, prefix, prefixlen); |
1955 | /* add original path to the Adj-RIB-In */ |
1956 | if (prefix_update(rib_byid(RIB_ADJ_IN0), peer, path_id, path_id_tx, |
1957 | in, prefix, prefixlen) == 1) |
1958 | peer->stats.prefix_cnt++; |
1959 | |
1960 | /* max prefix checker */ |
1961 | if (peer->conf.max_prefix && |
1962 | peer->stats.prefix_cnt > peer->conf.max_prefix) { |
1963 | log_peer_warnx(&peer->conf, "prefix limit reached (>%u/%u)", |
1964 | peer->stats.prefix_cnt, peer->conf.max_prefix); |
1965 | rde_update_err(peer, ERR_CEASE, ERR_CEASE_MAX_PREFIX, NULL((void *)0), 0); |
1966 | return (-1); |
1967 | } |
1968 | |
1969 | if (in->aspath.flags & F_ATTR_PARSE_ERR0x10000) |
1970 | wmsg = "path invalid, withdraw"; |
1971 | |
1972 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
1973 | struct rib *rib = rib_byid(i); |
1974 | if (rib == NULL((void *)0)) |
1975 | continue; |
1976 | rde_filterstate_copy(&state, in); |
1977 | /* input filter */ |
1978 | action = rde_filter(rib->in_rules, peer, peer, prefix, |
1979 | prefixlen, &state); |
1980 | |
1981 | if (action == ACTION_ALLOW) { |
1982 | rde_update_log("update", i, peer, |
1983 | &state.nexthop->exit_nexthop, prefix, |
1984 | prefixlen); |
1985 | prefix_update(rib, peer, path_id, path_id_tx, &state, |
1986 | prefix, prefixlen); |
1987 | } else if (prefix_withdraw(rib, peer, path_id, prefix, |
1988 | prefixlen)) { |
1989 | rde_update_log(wmsg, i, peer, |
1990 | NULL((void *)0), prefix, prefixlen); |
1991 | } |
1992 | |
1993 | rde_filterstate_clean(&state); |
1994 | } |
1995 | return (0); |
1996 | } |
1997 | |
1998 | void |
1999 | rde_update_withdraw(struct rde_peer *peer, uint32_t path_id, |
2000 | struct bgpd_addr *prefix, uint8_t prefixlen) |
2001 | { |
2002 | uint16_t i; |
2003 | |
2004 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
2005 | struct rib *rib = rib_byid(i); |
2006 | if (rib == NULL((void *)0)) |
2007 | continue; |
2008 | if (prefix_withdraw(rib, peer, path_id, prefix, prefixlen)) |
2009 | rde_update_log("withdraw", i, peer, NULL((void *)0), prefix, |
2010 | prefixlen); |
2011 | } |
2012 | |
2013 | /* remove original path form the Adj-RIB-In */ |
2014 | if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peer, path_id, |
2015 | prefix, prefixlen)) |
2016 | peer->stats.prefix_cnt--; |
2017 | |
2018 | peer->stats.prefix_rcvd_withdraw++; |
2019 | } |
2020 | |
2021 | /* |
2022 | * BGP UPDATE parser functions |
2023 | */ |
2024 | |
2025 | /* attribute parser specific macros */ |
2026 | #define UPD_READ(t, p, plen, n) \ |
2027 | do { \ |
2028 | memcpy(t, p, n); \ |
2029 | p += n; \ |
2030 | plen += n; \ |
2031 | } while (0) |
2032 | |
2033 | #define CHECK_FLAGS(s, t, m) \ |
2034 | (((s) & ~(ATTR_DEFMASK(0x0f | 0x10) | (m))) == (t)) |
2035 | |
2036 | int |
2037 | rde_attr_parse(u_char *p, uint16_t len, struct rde_peer *peer, |
2038 | struct filterstate *state, struct mpattr *mpa) |
2039 | { |
2040 | struct bgpd_addr nexthop; |
2041 | struct rde_aspath *a = &state->aspath; |
2042 | u_char *op = p, *npath; |
2043 | uint32_t tmp32, zero = 0; |
2044 | int error; |
2045 | uint16_t attr_len, nlen; |
2046 | uint16_t plen = 0; |
2047 | uint8_t flags, type, tmp8; |
2048 | |
2049 | if (len < 3) { |
2050 | bad_len: |
2051 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLEN, op, len); |
2052 | return (-1); |
2053 | } |
2054 | |
2055 | UPD_READ(&flags, p, plen, 1); |
2056 | UPD_READ(&type, p, plen, 1); |
2057 | |
2058 | if (flags & ATTR_EXTLEN0x10) { |
2059 | if (len - plen < 2) |
2060 | goto bad_len; |
2061 | UPD_READ(&attr_len, p, plen, 2); |
2062 | attr_len = ntohs(attr_len)(__uint16_t)(__builtin_constant_p(attr_len) ? (__uint16_t)((( __uint16_t)(attr_len) & 0xffU) << 8 | ((__uint16_t) (attr_len) & 0xff00U) >> 8) : __swap16md(attr_len)); |
2063 | } else { |
2064 | UPD_READ(&tmp8, p, plen, 1); |
2065 | attr_len = tmp8; |
2066 | } |
2067 | |
2068 | if (len - plen < attr_len) |
2069 | goto bad_len; |
2070 | |
2071 | /* adjust len to the actual attribute size including header */ |
2072 | len = plen + attr_len; |
2073 | |
2074 | switch (type) { |
2075 | case ATTR_UNDEF: |
2076 | /* ignore and drop path attributes with a type code of 0 */ |
2077 | plen += attr_len; |
2078 | break; |
2079 | case ATTR_ORIGIN: |
2080 | if (attr_len != 1) |
2081 | goto bad_len; |
2082 | |
2083 | if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) { |
2084 | bad_flags: |
2085 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRFLAGS, |
2086 | op, len); |
2087 | return (-1); |
2088 | } |
2089 | |
2090 | UPD_READ(&a->origin, p, plen, 1); |
2091 | if (a->origin > ORIGIN_INCOMPLETE2) { |
2092 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ORIGIN, |
2093 | op, len); |
2094 | return (-1); |
2095 | } |
2096 | if (a->flags & F_ATTR_ORIGIN0x00001) |
2097 | goto bad_list; |
2098 | a->flags |= F_ATTR_ORIGIN0x00001; |
2099 | break; |
2100 | case ATTR_ASPATH: |
2101 | if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) |
2102 | goto bad_flags; |
2103 | error = aspath_verify(p, attr_len, peer_has_as4byte(peer), |
2104 | peer_accept_no_as_set(peer)); |
2105 | if (error == AS_ERR_SOFT-4) { |
2106 | /* |
2107 | * soft errors like unexpected segment types are |
2108 | * not considered fatal and the path is just |
2109 | * marked invalid. |
2110 | */ |
2111 | a->flags |= F_ATTR_PARSE_ERR0x10000; |
2112 | } else if (error != 0) { |
2113 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH, |
2114 | NULL((void *)0), 0); |
2115 | return (-1); |
2116 | } |
2117 | if (a->flags & F_ATTR_ASPATH0x00002) |
2118 | goto bad_list; |
2119 | if (peer_has_as4byte(peer)) { |
2120 | npath = p; |
2121 | nlen = attr_len; |
2122 | } else { |
2123 | npath = aspath_inflate(p, attr_len, &nlen); |
2124 | if (npath == NULL((void *)0)) |
2125 | fatal("aspath_inflate"); |
2126 | } |
2127 | if (error == AS_ERR_SOFT-4) { |
2128 | char *str; |
2129 | |
2130 | aspath_asprint(&str, npath, nlen); |
2131 | log_peer_warnx(&peer->conf, "bad ASPATH %s, " |
2132 | "path invalidated and prefix withdrawn", |
2133 | str ? str : "(bad aspath)"); |
2134 | free(str); |
2135 | } |
2136 | a->flags |= F_ATTR_ASPATH0x00002; |
2137 | a->aspath = aspath_get(npath, nlen); |
2138 | if (npath != p) |
2139 | free(npath); |
2140 | plen += attr_len; |
2141 | break; |
2142 | case ATTR_NEXTHOP: |
2143 | if (attr_len != 4) |
2144 | goto bad_len; |
2145 | if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) |
2146 | goto bad_flags; |
2147 | if (a->flags & F_ATTR_NEXTHOP0x00004) |
2148 | goto bad_list; |
2149 | a->flags |= F_ATTR_NEXTHOP0x00004; |
2150 | |
2151 | memset(&nexthop, 0, sizeof(nexthop)); |
2152 | nexthop.aid = AID_INET1; |
2153 | UPD_READ(&nexthop.v4ba.v4.s_addr, p, plen, 4); |
2154 | /* |
2155 | * Check if the nexthop is a valid IP address. We consider |
2156 | * multicast addresses as invalid. |
2157 | */ |
2158 | tmp32 = ntohl(nexthop.v4.s_addr)(__uint32_t)(__builtin_constant_p(nexthop.ba.v4.s_addr) ? (__uint32_t )(((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff) << 24 | ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff00) << 8 | ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff0000) >> 8 | ((__uint32_t)(nexthop.ba.v4.s_addr) & 0xff000000) >> 24) : __swap32md(nexthop.ba.v4.s_addr)); |
2159 | if (IN_MULTICAST(tmp32)(((u_int32_t)(tmp32) & ((u_int32_t)(0xf0000000))) == ((u_int32_t )(0xe0000000)))) { |
2160 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_NEXTHOP, |
2161 | op, len); |
2162 | return (-1); |
2163 | } |
2164 | nexthop_unref(state->nexthop); /* just to be sure */ |
2165 | state->nexthop = nexthop_get(&nexthop); |
2166 | break; |
2167 | case ATTR_MED: |
2168 | if (attr_len != 4) |
2169 | goto bad_len; |
2170 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0)) |
2171 | goto bad_flags; |
2172 | if (a->flags & F_ATTR_MED0x00010) |
2173 | goto bad_list; |
2174 | a->flags |= F_ATTR_MED0x00010; |
2175 | |
2176 | UPD_READ(&tmp32, p, plen, 4); |
2177 | a->med = ntohl(tmp32)(__uint32_t)(__builtin_constant_p(tmp32) ? (__uint32_t)(((__uint32_t )(tmp32) & 0xff) << 24 | ((__uint32_t)(tmp32) & 0xff00) << 8 | ((__uint32_t)(tmp32) & 0xff0000) >> 8 | ((__uint32_t)(tmp32) & 0xff000000) >> 24) : __swap32md (tmp32)); |
2178 | break; |
2179 | case ATTR_LOCALPREF: |
2180 | if (attr_len != 4) |
2181 | goto bad_len; |
2182 | if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) |
2183 | goto bad_flags; |
2184 | if (peer->conf.ebgp) { |
2185 | /* ignore local-pref attr on non ibgp peers */ |
2186 | plen += attr_len; |
2187 | break; |
2188 | } |
2189 | if (a->flags & F_ATTR_LOCALPREF0x00008) |
2190 | goto bad_list; |
2191 | a->flags |= F_ATTR_LOCALPREF0x00008; |
2192 | |
2193 | UPD_READ(&tmp32, p, plen, 4); |
2194 | a->lpref = ntohl(tmp32)(__uint32_t)(__builtin_constant_p(tmp32) ? (__uint32_t)(((__uint32_t )(tmp32) & 0xff) << 24 | ((__uint32_t)(tmp32) & 0xff00) << 8 | ((__uint32_t)(tmp32) & 0xff0000) >> 8 | ((__uint32_t)(tmp32) & 0xff000000) >> 24) : __swap32md (tmp32)); |
2195 | break; |
2196 | case ATTR_ATOMIC_AGGREGATE: |
2197 | if (attr_len != 0) |
2198 | goto bad_len; |
2199 | if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN0x40, 0)) |
2200 | goto bad_flags; |
2201 | goto optattr; |
2202 | case ATTR_AGGREGATOR: |
2203 | if ((!peer_has_as4byte(peer) && attr_len != 6) || |
2204 | (peer_has_as4byte(peer) && attr_len != 8)) { |
2205 | /* |
2206 | * ignore attribute in case of error as per |
2207 | * RFC 7606 |
2208 | */ |
2209 | log_peer_warnx(&peer->conf, "bad AGGREGATOR, " |
2210 | "attribute discarded"); |
2211 | plen += attr_len; |
2212 | break; |
2213 | } |
2214 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2215 | ATTR_PARTIAL0x20)) |
2216 | goto bad_flags; |
2217 | if (!peer_has_as4byte(peer)) { |
2218 | /* need to inflate aggregator AS to 4-byte */ |
2219 | u_char t[8]; |
2220 | t[0] = t[1] = 0; |
2221 | UPD_READ(&t[2], p, plen, 2); |
2222 | UPD_READ(&t[4], p, plen, 4); |
2223 | if (memcmp(t, &zero, sizeof(uint32_t)) == 0) { |
2224 | /* As per RFC7606 use "attribute discard". */ |
2225 | log_peer_warnx(&peer->conf, "bad AGGREGATOR, " |
2226 | "AS 0 not allowed, attribute discarded"); |
2227 | break; |
2228 | } |
2229 | if (attr_optadd(a, flags, type, t, |
2230 | sizeof(t)) == -1) |
2231 | goto bad_list; |
2232 | break; |
2233 | } |
2234 | /* 4-byte ready server take the default route */ |
2235 | if (memcmp(p, &zero, sizeof(uint32_t)) == 0) { |
2236 | /* As per RFC7606 use "attribute discard" here. */ |
2237 | char *pfmt = log_fmt_peer(&peer->conf); |
2238 | log_debug("%s: bad AGGREGATOR, " |
2239 | "AS 0 not allowed, attribute discarded", pfmt); |
2240 | free(pfmt); |
2241 | plen += attr_len; |
2242 | break; |
2243 | } |
2244 | goto optattr; |
2245 | case ATTR_COMMUNITIES: |
2246 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2247 | ATTR_PARTIAL0x20)) |
2248 | goto bad_flags; |
2249 | if (community_add(&state->communities, flags, p, |
2250 | attr_len) == -1) { |
2251 | /* |
2252 | * mark update as bad and withdraw all routes as per |
2253 | * RFC 7606 |
2254 | */ |
2255 | a->flags |= F_ATTR_PARSE_ERR0x10000; |
2256 | log_peer_warnx(&peer->conf, "bad COMMUNITIES, " |
2257 | "path invalidated and prefix withdrawn"); |
2258 | } |
2259 | plen += attr_len; |
2260 | break; |
2261 | case ATTR_LARGE_COMMUNITIES: |
2262 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2263 | ATTR_PARTIAL0x20)) |
2264 | goto bad_flags; |
2265 | if (community_large_add(&state->communities, flags, p, |
2266 | attr_len) == -1) { |
2267 | /* |
2268 | * mark update as bad and withdraw all routes as per |
2269 | * RFC 7606 |
2270 | */ |
2271 | a->flags |= F_ATTR_PARSE_ERR0x10000; |
2272 | log_peer_warnx(&peer->conf, "bad LARGE COMMUNITIES, " |
2273 | "path invalidated and prefix withdrawn"); |
2274 | } |
2275 | plen += attr_len; |
2276 | break; |
2277 | case ATTR_EXT_COMMUNITIES: |
2278 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2279 | ATTR_PARTIAL0x20)) |
2280 | goto bad_flags; |
2281 | if (community_ext_add(&state->communities, flags, |
2282 | peer->conf.ebgp, p, attr_len) == -1) { |
2283 | /* |
2284 | * mark update as bad and withdraw all routes as per |
2285 | * RFC 7606 |
2286 | */ |
2287 | a->flags |= F_ATTR_PARSE_ERR0x10000; |
2288 | log_peer_warnx(&peer->conf, "bad EXT_COMMUNITIES, " |
2289 | "path invalidated and prefix withdrawn"); |
2290 | } |
2291 | plen += attr_len; |
2292 | break; |
2293 | case ATTR_ORIGINATOR_ID: |
2294 | if (attr_len != 4) |
2295 | goto bad_len; |
2296 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0)) |
2297 | goto bad_flags; |
2298 | goto optattr; |
2299 | case ATTR_CLUSTER_LIST: |
2300 | if (attr_len % 4 != 0) |
2301 | goto bad_len; |
2302 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0)) |
2303 | goto bad_flags; |
2304 | goto optattr; |
2305 | case ATTR_MP_REACH_NLRI: |
2306 | if (attr_len < 5) |
2307 | goto bad_len; |
2308 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0)) |
2309 | goto bad_flags; |
2310 | /* the validity is checked in rde_update_dispatch() */ |
2311 | if (a->flags & F_ATTR_MP_REACH0x00040) |
2312 | goto bad_list; |
2313 | a->flags |= F_ATTR_MP_REACH0x00040; |
2314 | |
2315 | mpa->reach = p; |
2316 | mpa->reach_len = attr_len; |
2317 | plen += attr_len; |
2318 | break; |
2319 | case ATTR_MP_UNREACH_NLRI: |
2320 | if (attr_len < 3) |
2321 | goto bad_len; |
2322 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80, 0)) |
2323 | goto bad_flags; |
2324 | /* the validity is checked in rde_update_dispatch() */ |
2325 | if (a->flags & F_ATTR_MP_UNREACH0x00080) |
2326 | goto bad_list; |
2327 | a->flags |= F_ATTR_MP_UNREACH0x00080; |
2328 | |
2329 | mpa->unreach = p; |
2330 | mpa->unreach_len = attr_len; |
2331 | plen += attr_len; |
2332 | break; |
2333 | case ATTR_AS4_AGGREGATOR: |
2334 | if (attr_len != 8) { |
2335 | /* see ATTR_AGGREGATOR ... */ |
2336 | log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, " |
2337 | "attribute discarded"); |
2338 | plen += attr_len; |
2339 | break; |
2340 | } |
2341 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2342 | ATTR_PARTIAL0x20)) |
2343 | goto bad_flags; |
2344 | if (memcmp(p, &zero, sizeof(uint32_t)) == 0) { |
2345 | /* As per RFC6793 use "attribute discard" here. */ |
2346 | log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, " |
2347 | "AS 0 not allowed, attribute discarded"); |
2348 | plen += attr_len; |
2349 | break; |
2350 | } |
2351 | a->flags |= F_ATTR_AS4BYTE_NEW0x00100; |
2352 | goto optattr; |
2353 | case ATTR_AS4_PATH: |
2354 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2355 | ATTR_PARTIAL0x20)) |
2356 | goto bad_flags; |
2357 | if ((error = aspath_verify(p, attr_len, 1, |
2358 | peer_accept_no_as_set(peer))) != 0) { |
2359 | /* As per RFC6793 use "attribute discard" here. */ |
2360 | log_peer_warnx(&peer->conf, "bad AS4_PATH, " |
2361 | "attribute discarded"); |
2362 | plen += attr_len; |
2363 | break; |
2364 | } |
2365 | a->flags |= F_ATTR_AS4BYTE_NEW0x00100; |
2366 | goto optattr; |
2367 | case ATTR_OTC: |
2368 | if (attr_len != 4) { |
2369 | /* treat-as-withdraw */ |
2370 | a->flags |= F_ATTR_PARSE_ERR0x10000; |
2371 | log_peer_warnx(&peer->conf, "bad OTC, " |
2372 | "path invalidated and prefix withdrawn"); |
2373 | plen += attr_len; |
2374 | break; |
2375 | } |
2376 | if (!CHECK_FLAGS(flags, ATTR_OPTIONAL0x80|ATTR_TRANSITIVE0x40, |
2377 | ATTR_PARTIAL0x20)) |
2378 | goto bad_flags; |
2379 | switch (peer->role) { |
2380 | case ROLE_PROVIDER: |
2381 | case ROLE_RS: |
2382 | a->flags |= F_ATTR_OTC_LEAK0x02000; |
2383 | break; |
2384 | case ROLE_PEER: |
2385 | memcpy(&tmp32, p, sizeof(tmp32)); |
2386 | tmp32 = ntohl(tmp32)(__uint32_t)(__builtin_constant_p(tmp32) ? (__uint32_t)(((__uint32_t )(tmp32) & 0xff) << 24 | ((__uint32_t)(tmp32) & 0xff00) << 8 | ((__uint32_t)(tmp32) & 0xff0000) >> 8 | ((__uint32_t)(tmp32) & 0xff000000) >> 24) : __swap32md (tmp32)); |
2387 | if (tmp32 != peer->conf.remote_as) |
2388 | a->flags |= F_ATTR_OTC_LEAK0x02000; |
2389 | break; |
2390 | default: |
2391 | break; |
2392 | } |
2393 | a->flags |= F_ATTR_OTC0x01000; |
2394 | goto optattr; |
2395 | default: |
2396 | if ((flags & ATTR_OPTIONAL0x80) == 0) { |
2397 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_UNKNWN_WK_ATTR, |
2398 | op, len); |
2399 | return (-1); |
2400 | } |
2401 | optattr: |
2402 | if (attr_optadd(a, flags, type, p, attr_len) == -1) { |
2403 | bad_list: |
2404 | rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, |
2405 | NULL((void *)0), 0); |
2406 | return (-1); |
2407 | } |
2408 | |
2409 | plen += attr_len; |
2410 | break; |
2411 | } |
2412 | |
2413 | return (plen); |
2414 | } |
2415 | |
2416 | int |
2417 | rde_attr_add(struct filterstate *state, u_char *p, uint16_t len) |
2418 | { |
2419 | uint16_t attr_len; |
2420 | uint16_t plen = 0; |
2421 | uint8_t flags; |
2422 | uint8_t type; |
2423 | uint8_t tmp8; |
2424 | |
2425 | if (len < 3) |
2426 | return (-1); |
2427 | |
2428 | UPD_READ(&flags, p, plen, 1); |
2429 | UPD_READ(&type, p, plen, 1); |
2430 | |
2431 | if (flags & ATTR_EXTLEN0x10) { |
2432 | if (len - plen < 2) |
2433 | return (-1); |
2434 | UPD_READ(&attr_len, p, plen, 2); |
2435 | attr_len = ntohs(attr_len)(__uint16_t)(__builtin_constant_p(attr_len) ? (__uint16_t)((( __uint16_t)(attr_len) & 0xffU) << 8 | ((__uint16_t) (attr_len) & 0xff00U) >> 8) : __swap16md(attr_len)); |
2436 | } else { |
2437 | UPD_READ(&tmp8, p, plen, 1); |
2438 | attr_len = tmp8; |
2439 | } |
2440 | |
2441 | if (len - plen < attr_len) |
2442 | return (-1); |
2443 | |
2444 | switch (type) { |
2445 | case ATTR_COMMUNITIES: |
2446 | return community_add(&state->communities, flags, p, attr_len); |
2447 | case ATTR_LARGE_COMMUNITIES: |
2448 | return community_large_add(&state->communities, flags, p, |
2449 | attr_len); |
2450 | case ATTR_EXT_COMMUNITIES: |
2451 | return community_ext_add(&state->communities, flags, 0, |
2452 | p, attr_len); |
2453 | } |
2454 | |
2455 | if (attr_optadd(&state->aspath, flags, type, p, attr_len) == -1) |
2456 | return (-1); |
2457 | return (0); |
2458 | } |
2459 | |
2460 | #undef UPD_READ |
2461 | #undef CHECK_FLAGS |
2462 | |
2463 | uint8_t |
2464 | rde_attr_missing(struct rde_aspath *a, int ebgp, uint16_t nlrilen) |
2465 | { |
2466 | /* ATTR_MP_UNREACH_NLRI may be sent alone */ |
2467 | if (nlrilen == 0 && a->flags & F_ATTR_MP_UNREACH0x00080 && |
2468 | (a->flags & F_ATTR_MP_REACH0x00040) == 0) |
2469 | return (0); |
2470 | |
2471 | if ((a->flags & F_ATTR_ORIGIN0x00001) == 0) |
2472 | return (ATTR_ORIGIN); |
2473 | if ((a->flags & F_ATTR_ASPATH0x00002) == 0) |
2474 | return (ATTR_ASPATH); |
2475 | if ((a->flags & F_ATTR_MP_REACH0x00040) == 0 && |
2476 | (a->flags & F_ATTR_NEXTHOP0x00004) == 0) |
2477 | return (ATTR_NEXTHOP); |
2478 | if (!ebgp) |
2479 | if ((a->flags & F_ATTR_LOCALPREF0x00008) == 0) |
2480 | return (ATTR_LOCALPREF); |
2481 | return (0); |
2482 | } |
2483 | |
2484 | int |
2485 | rde_get_mp_nexthop(u_char *data, uint16_t len, uint8_t aid, |
2486 | struct rde_peer *peer, struct filterstate *state) |
2487 | { |
2488 | struct bgpd_addr nexthop; |
2489 | uint8_t totlen, nhlen; |
2490 | |
2491 | if (len == 0) |
2492 | return (-1); |
2493 | |
2494 | nhlen = *data++; |
2495 | totlen = 1; |
2496 | len--; |
2497 | |
2498 | if (nhlen + 1 > len) |
2499 | return (-1); |
2500 | |
2501 | memset(&nexthop, 0, sizeof(nexthop)); |
2502 | switch (aid) { |
2503 | case AID_INET62: |
2504 | /* |
2505 | * RFC2545 describes that there may be a link-local |
2506 | * address carried in nexthop. Yikes! |
2507 | * This is not only silly, it is wrong and we just ignore |
2508 | * this link-local nexthop. The bgpd session doesn't run |
2509 | * over the link-local address so why should all other |
2510 | * traffic. |
2511 | */ |
2512 | if (nhlen != 16 && nhlen != 32) { |
2513 | log_peer_warnx(&peer->conf, "bad %s nexthop, " |
2514 | "bad size %d", aid2str(aid), nhlen); |
2515 | return (-1); |
2516 | } |
2517 | memcpy(&nexthop.v6ba.v6.s6_addr__u6_addr.__u6_addr8, data, 16); |
2518 | nexthop.aid = AID_INET62; |
2519 | if (IN6_IS_ADDR_LINKLOCAL(&nexthop.v6)(((&nexthop.ba.v6)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&nexthop.ba.v6)->__u6_addr.__u6_addr8[1] & 0xc0 ) == 0x80))) { |
2520 | if (peer->local_if_scope != 0) { |
2521 | nexthop.scope_id = peer->local_if_scope; |
2522 | } else { |
2523 | log_peer_warnx(&peer->conf, |
2524 | "unexpected link-local nexthop: %s", |
2525 | log_addr(&nexthop)); |
2526 | return (-1); |
2527 | } |
2528 | } |
2529 | break; |
2530 | case AID_VPN_IPv43: |
2531 | /* |
2532 | * Neither RFC4364 nor RFC3107 specify the format of the |
2533 | * nexthop in an explicit way. The quality of RFC went down |
2534 | * the toilet the larger the number got. |
2535 | * RFC4364 is very confusing about VPN-IPv4 address and the |
2536 | * VPN-IPv4 prefix that carries also a MPLS label. |
2537 | * So the nexthop is a 12-byte address with a 64bit RD and |
2538 | * an IPv4 address following. In the nexthop case the RD can |
2539 | * be ignored. |
2540 | * Since the nexthop has to be in the main IPv4 table just |
2541 | * create an AID_INET nexthop. So we don't need to handle |
2542 | * AID_VPN_IPv4 in nexthop and kroute. |
2543 | */ |
2544 | if (nhlen != 12) { |
2545 | log_peer_warnx(&peer->conf, "bad %s nexthop, " |
2546 | "bad size %d", aid2str(aid), nhlen); |
2547 | return (-1); |
2548 | } |
2549 | nexthop.aid = AID_INET1; |
2550 | memcpy(&nexthop.v4ba.v4, data + sizeof(uint64_t), |
2551 | sizeof(nexthop.v4ba.v4)); |
2552 | break; |
2553 | case AID_VPN_IPv64: |
2554 | if (nhlen != 24) { |
2555 | log_peer_warnx(&peer->conf, "bad %s nexthop, " |
2556 | "bad size %d", aid2str(aid), nhlen); |
2557 | return (-1); |
2558 | } |
2559 | memcpy(&nexthop.v6ba.v6, data + sizeof(uint64_t), |
2560 | sizeof(nexthop.v6ba.v6)); |
2561 | nexthop.aid = AID_INET62; |
2562 | if (IN6_IS_ADDR_LINKLOCAL(&nexthop.v6)(((&nexthop.ba.v6)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&nexthop.ba.v6)->__u6_addr.__u6_addr8[1] & 0xc0 ) == 0x80))) { |
2563 | if (peer->local_if_scope != 0) { |
2564 | nexthop.scope_id = peer->local_if_scope; |
2565 | } else { |
2566 | log_peer_warnx(&peer->conf, |
2567 | "unexpected link-local nexthop: %s", |
2568 | log_addr(&nexthop)); |
2569 | return (-1); |
2570 | } |
2571 | } |
2572 | break; |
2573 | case AID_FLOWSPECv45: |
2574 | case AID_FLOWSPECv66: |
2575 | /* nexthop must be 0 and ignored for flowspec */ |
2576 | if (nhlen != 0) { |
2577 | log_peer_warnx(&peer->conf, "bad %s nexthop, " |
2578 | "bad size %d", aid2str(aid), nhlen); |
2579 | return (-1); |
2580 | } |
2581 | /* also ignore reserved (old SNPA) field as per RFC4760 */ |
2582 | return (totlen + 1); |
2583 | default: |
2584 | log_peer_warnx(&peer->conf, "bad multiprotocol nexthop, " |
2585 | "bad AID"); |
2586 | return (-1); |
2587 | } |
2588 | |
2589 | state->nexthop = nexthop_get(&nexthop); |
2590 | |
2591 | /* ignore reserved (old SNPA) field as per RFC4760 */ |
2592 | totlen += nhlen + 1; |
2593 | |
2594 | return (totlen); |
2595 | } |
2596 | |
2597 | void |
2598 | rde_update_err(struct rde_peer *peer, uint8_t error, uint8_t suberr, |
2599 | void *data, uint16_t size) |
2600 | { |
2601 | struct ibuf *wbuf; |
2602 | |
2603 | if ((wbuf = imsg_create(ibuf_se, IMSG_UPDATE_ERR, peer->conf.id, 0, |
2604 | size + sizeof(error) + sizeof(suberr))) == NULL((void *)0)) |
2605 | fatal("%s %d imsg_create error", __func__, __LINE__2605); |
2606 | if (imsg_add(wbuf, &error, sizeof(error)) == -1 || |
2607 | imsg_add(wbuf, &suberr, sizeof(suberr)) == -1 || |
2608 | imsg_add(wbuf, data, size) == -1) |
2609 | fatal("%s %d imsg_add error", __func__, __LINE__2609); |
2610 | imsg_close(ibuf_se, wbuf); |
2611 | peer->state = PEER_ERR; |
2612 | } |
2613 | |
2614 | void |
2615 | rde_update_log(const char *message, uint16_t rid, |
2616 | const struct rde_peer *peer, const struct bgpd_addr *next, |
2617 | const struct bgpd_addr *prefix, uint8_t prefixlen) |
2618 | { |
2619 | char *l = NULL((void *)0); |
2620 | char *n = NULL((void *)0); |
2621 | char *p = NULL((void *)0); |
2622 | |
2623 | if (!((conf->log & BGPD_LOG_UPDATES0x0001) || |
2624 | (peer->flags & PEERFLAG_LOG_UPDATES0x02))) |
2625 | return; |
2626 | |
2627 | if (next != NULL((void *)0)) |
2628 | if (asprintf(&n, " via %s", log_addr(next)) == -1) |
2629 | n = NULL((void *)0); |
2630 | if (asprintf(&p, "%s/%u", log_addr(prefix), prefixlen) == -1) |
2631 | p = NULL((void *)0); |
2632 | l = log_fmt_peer(&peer->conf); |
2633 | log_info("Rib %s: %s AS%s: %s %s%s", rib_byid(rid)->name, |
2634 | l, log_as(peer->conf.remote_as), message, |
2635 | p ? p : "out of memory", n ? n : ""); |
2636 | |
2637 | free(l); |
2638 | free(n); |
2639 | free(p); |
2640 | } |
2641 | |
2642 | /* |
2643 | * 4-Byte ASN helper function. |
2644 | * Two scenarios need to be considered: |
2645 | * - NEW session with NEW attributes present -> just remove the attributes |
2646 | * - OLD session with NEW attributes present -> try to merge them |
2647 | */ |
2648 | void |
2649 | rde_as4byte_fixup(struct rde_peer *peer, struct rde_aspath *a) |
2650 | { |
2651 | struct attr *nasp, *naggr, *oaggr; |
2652 | uint32_t as; |
2653 | |
2654 | /* |
2655 | * if either ATTR_AS4_AGGREGATOR or ATTR_AS4_PATH is present |
2656 | * try to fixup the attributes. |
2657 | * Do not fixup if F_ATTR_PARSE_ERR is set. |
2658 | */ |
2659 | if (!(a->flags & F_ATTR_AS4BYTE_NEW0x00100) || a->flags & F_ATTR_PARSE_ERR0x10000) |
2660 | return; |
2661 | |
2662 | /* first get the attributes */ |
2663 | nasp = attr_optget(a, ATTR_AS4_PATH); |
2664 | naggr = attr_optget(a, ATTR_AS4_AGGREGATOR); |
2665 | |
2666 | if (peer_has_as4byte(peer)) { |
2667 | /* NEW session using 4-byte ASNs */ |
2668 | if (nasp) { |
2669 | log_peer_warnx(&peer->conf, "uses 4-byte ASN " |
2670 | "but sent AS4_PATH attribute."); |
2671 | attr_free(a, nasp); |
2672 | } |
2673 | if (naggr) { |
2674 | log_peer_warnx(&peer->conf, "uses 4-byte ASN " |
2675 | "but sent AS4_AGGREGATOR attribute."); |
2676 | attr_free(a, naggr); |
2677 | } |
2678 | return; |
2679 | } |
2680 | /* OLD session using 2-byte ASNs */ |
2681 | /* try to merge the new attributes into the old ones */ |
2682 | if ((oaggr = attr_optget(a, ATTR_AGGREGATOR))) { |
2683 | memcpy(&as, oaggr->data, sizeof(as)); |
2684 | if (ntohl(as)(__uint32_t)(__builtin_constant_p(as) ? (__uint32_t)(((__uint32_t )(as) & 0xff) << 24 | ((__uint32_t)(as) & 0xff00 ) << 8 | ((__uint32_t)(as) & 0xff0000) >> 8 | ((__uint32_t)(as) & 0xff000000) >> 24) : __swap32md (as)) != AS_TRANS23456) { |
2685 | /* per RFC ignore AS4_PATH and AS4_AGGREGATOR */ |
2686 | if (nasp) |
2687 | attr_free(a, nasp); |
2688 | if (naggr) |
2689 | attr_free(a, naggr); |
2690 | return; |
2691 | } |
2692 | if (naggr) { |
2693 | /* switch over to new AGGREGATOR */ |
2694 | attr_free(a, oaggr); |
2695 | if (attr_optadd(a, ATTR_OPTIONAL0x80 | ATTR_TRANSITIVE0x40, |
2696 | ATTR_AGGREGATOR, naggr->data, naggr->len)) |
2697 | fatalx("attr_optadd failed but impossible"); |
2698 | } |
2699 | } |
2700 | /* there is no need for AS4_AGGREGATOR any more */ |
2701 | if (naggr) |
2702 | attr_free(a, naggr); |
2703 | |
2704 | /* merge AS4_PATH with ASPATH */ |
2705 | if (nasp) |
2706 | aspath_merge(a, nasp); |
2707 | } |
2708 | |
2709 | |
2710 | uint8_t |
2711 | rde_aspa_validity(struct rde_peer *peer, struct rde_aspath *asp, uint8_t aid) |
2712 | { |
2713 | if (!peer->conf.ebgp) /* ASPA is only performed on ebgp sessions */ |
2714 | return ASPA_NEVER_KNOWN0x08; |
2715 | if (aid != AID_INET1 && aid != AID_INET62) /* skip uncovered aids */ |
2716 | return ASPA_NEVER_KNOWN0x08; |
2717 | |
2718 | #ifdef MAYBE |
2719 | /* |
2720 | * By default enforce neighbor-as is set for all ebgp sessions. |
2721 | * So if a admin disables this check should we really "reenable" |
2722 | * it here in such a dubious way? |
2723 | * This just fails the ASPA validation for these paths so maybe |
2724 | * this can be helpful. But it is not transparent to the admin. |
2725 | */ |
2726 | |
2727 | /* skip neighbor-as check for transparent RS sessions */ |
2728 | if (peer->role != ROLE_RS_CLIENT && |
2729 | peer->conf.enforce_as != ENFORCE_AS_ON) { |
2730 | uint32_t fas; |
2731 | |
2732 | fas = aspath_neighbor(asp->aspath); |
2733 | if (peer->conf.remote_as != fas) |
2734 | return ASPA_INVALID0x01; |
2735 | } |
2736 | #endif |
2737 | |
2738 | /* if no role is set, the outcome is unknown */ |
2739 | if (peer->role == ROLE_NONE) |
2740 | return ASPA_UNKNOWN0x00; |
2741 | |
2742 | if (peer->role == ROLE_CUSTOMER) |
2743 | return asp->aspa_state.downup; |
2744 | else |
2745 | return asp->aspa_state.onlyup; |
2746 | } |
2747 | |
2748 | /* |
2749 | * route reflector helper function |
2750 | */ |
2751 | void |
2752 | rde_reflector(struct rde_peer *peer, struct rde_aspath *asp) |
2753 | { |
2754 | struct attr *a; |
2755 | uint8_t *p; |
2756 | uint16_t len; |
2757 | uint32_t id; |
2758 | |
2759 | /* do not consider updates with parse errors */ |
2760 | if (asp->flags & F_ATTR_PARSE_ERR0x10000) |
2761 | return; |
2762 | |
2763 | /* check for originator id if eq router_id drop */ |
2764 | if ((a = attr_optget(asp, ATTR_ORIGINATOR_ID)) != NULL((void *)0)) { |
2765 | if (memcmp(&conf->bgpid, a->data, sizeof(conf->bgpid)) == 0) { |
2766 | /* this is coming from myself */ |
2767 | asp->flags |= F_ATTR_LOOP0x00200; |
2768 | return; |
2769 | } |
2770 | } else if (conf->flags & BGPD_FLAG_REFLECTOR0x0004) { |
2771 | if (peer->conf.ebgp) |
2772 | id = conf->bgpid; |
2773 | else |
2774 | id = htonl(peer->remote_bgpid)(__uint32_t)(__builtin_constant_p(peer->remote_bgpid) ? (__uint32_t )(((__uint32_t)(peer->remote_bgpid) & 0xff) << 24 | ((__uint32_t)(peer->remote_bgpid) & 0xff00) << 8 | ((__uint32_t)(peer->remote_bgpid) & 0xff0000) >> 8 | ((__uint32_t)(peer->remote_bgpid) & 0xff000000) >> 24) : __swap32md(peer->remote_bgpid)); |
2775 | if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_ORIGINATOR_ID, |
2776 | &id, sizeof(uint32_t)) == -1) |
2777 | fatalx("attr_optadd failed but impossible"); |
2778 | } |
2779 | |
2780 | /* check for own id in the cluster list */ |
2781 | if (conf->flags & BGPD_FLAG_REFLECTOR0x0004) { |
2782 | if ((a = attr_optget(asp, ATTR_CLUSTER_LIST)) != NULL((void *)0)) { |
2783 | for (len = 0; len < a->len; |
2784 | len += sizeof(conf->clusterid)) |
2785 | /* check if coming from my cluster */ |
2786 | if (memcmp(&conf->clusterid, a->data + len, |
2787 | sizeof(conf->clusterid)) == 0) { |
2788 | asp->flags |= F_ATTR_LOOP0x00200; |
2789 | return; |
2790 | } |
2791 | |
2792 | /* prepend own clusterid by replacing attribute */ |
2793 | len = a->len + sizeof(conf->clusterid); |
2794 | if (len < a->len) |
2795 | fatalx("rde_reflector: cluster-list overflow"); |
2796 | if ((p = malloc(len)) == NULL((void *)0)) |
2797 | fatal("rde_reflector"); |
2798 | memcpy(p, &conf->clusterid, sizeof(conf->clusterid)); |
2799 | memcpy(p + sizeof(conf->clusterid), a->data, a->len); |
2800 | attr_free(asp, a); |
2801 | if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_CLUSTER_LIST, |
2802 | p, len) == -1) |
2803 | fatalx("attr_optadd failed but impossible"); |
2804 | free(p); |
2805 | } else if (attr_optadd(asp, ATTR_OPTIONAL0x80, ATTR_CLUSTER_LIST, |
2806 | &conf->clusterid, sizeof(conf->clusterid)) == -1) |
2807 | fatalx("attr_optadd failed but impossible"); |
2808 | } |
2809 | } |
2810 | |
2811 | /* |
2812 | * control specific functions |
2813 | */ |
2814 | static void |
2815 | rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags, |
2816 | int adjout) |
2817 | { |
2818 | struct ctl_show_rib rib; |
2819 | struct ibuf *wbuf; |
2820 | struct attr *a; |
2821 | struct nexthop *nexthop; |
2822 | struct rib_entry *re; |
2823 | struct prefix *xp; |
2824 | struct rde_peer *peer; |
2825 | time_t staletime; |
2826 | size_t aslen; |
2827 | uint8_t l; |
2828 | |
2829 | nexthop = prefix_nexthop(p); |
2830 | peer = prefix_peer(p); |
2831 | memset(&rib, 0, sizeof(rib)); |
2832 | rib.age = getmonotime() - p->lastchange; |
2833 | rib.local_pref = asp->lpref; |
2834 | rib.med = asp->med; |
2835 | rib.weight = asp->weight; |
2836 | strlcpy(rib.descr, peer->conf.descr, sizeof(rib.descr)); |
2837 | memcpy(&rib.remote_addr, &peer->remote_addr, |
2838 | sizeof(rib.remote_addr)); |
2839 | rib.remote_id = peer->remote_bgpid; |
2840 | if (nexthop != NULL((void *)0)) { |
2841 | rib.exit_nexthop = nexthop->exit_nexthop; |
2842 | rib.true_nexthop = nexthop->true_nexthop; |
2843 | } else { |
2844 | /* announced network can have a NULL nexthop */ |
2845 | rib.exit_nexthop.aid = p->pt->aid; |
2846 | rib.true_nexthop.aid = p->pt->aid; |
2847 | } |
2848 | pt_getaddr(p->pt, &rib.prefix); |
2849 | rib.prefixlen = p->pt->prefixlen; |
2850 | rib.origin = asp->origin; |
2851 | rib.roa_validation_state = prefix_roa_vstate(p); |
2852 | rib.aspa_validation_state = prefix_aspa_vstate(p); |
2853 | rib.dmetric = p->dmetric; |
2854 | rib.flags = 0; |
2855 | if (!adjout) { |
2856 | re = prefix_re(p); |
2857 | TAILQ_FOREACH(xp, &re->prefix_h, entry.list.rib)for((xp) = ((&re->prefix_h)->tqh_first); (xp) != (( void *)0); (xp) = ((xp)->entry.list.rib.tqe_next)) { |
2858 | switch (xp->dmetric) { |
2859 | case PREFIX_DMETRIC_BEST5: |
2860 | if (xp == p) |
2861 | rib.flags |= F_PREF_BEST0x002; |
2862 | break; |
2863 | case PREFIX_DMETRIC_ECMP4: |
2864 | if (xp == p) |
2865 | rib.flags |= F_PREF_ECMP0x100; |
2866 | break; |
2867 | case PREFIX_DMETRIC_AS_WIDE3: |
2868 | if (xp == p) |
2869 | rib.flags |= F_PREF_AS_WIDE0x200; |
2870 | break; |
2871 | default: |
2872 | xp = NULL((void *)0); /* stop loop */ |
2873 | break; |
2874 | } |
2875 | if (xp == NULL((void *)0) || xp == p) |
2876 | break; |
2877 | } |
2878 | } |
2879 | if (!peer->conf.ebgp) |
2880 | rib.flags |= F_PREF_INTERNAL0x004; |
2881 | if (asp->flags & F_PREFIX_ANNOUNCED0x00400) |
2882 | rib.flags |= F_PREF_ANNOUNCE0x008; |
2883 | if (prefix_eligible(p)) |
2884 | rib.flags |= F_PREF_ELIGIBLE0x001; |
2885 | /* otc loop includes parse err so skip the latter if the first is set */ |
2886 | if (asp->flags & F_ATTR_OTC_LEAK0x02000) |
2887 | rib.flags |= F_PREF_OTC_LEAK0x080; |
2888 | else if (asp->flags & F_ATTR_PARSE_ERR0x10000) |
2889 | rib.flags |= F_PREF_INVALID0x020; |
2890 | staletime = peer->staletime[p->pt->aid]; |
2891 | if (staletime && p->lastchange <= staletime) |
2892 | rib.flags |= F_PREF_STALE0x010; |
2893 | if (!adjout) { |
2894 | if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_RECV0x01)) { |
2895 | rib.path_id = p->path_id; |
2896 | rib.flags |= F_PREF_PATH_ID0x040; |
2897 | } |
2898 | } else { |
2899 | if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_SEND0x02)) { |
2900 | rib.path_id = p->path_id_tx; |
2901 | rib.flags |= F_PREF_PATH_ID0x040; |
2902 | } |
2903 | } |
2904 | aslen = aspath_length(asp->aspath); |
2905 | |
2906 | if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid, |
2907 | sizeof(rib) + aslen)) == NULL((void *)0)) |
2908 | return; |
2909 | if (imsg_add(wbuf, &rib, sizeof(rib)) == -1 || |
2910 | imsg_add(wbuf, aspath_dump(asp->aspath), aslen) == -1) |
2911 | return; |
2912 | imsg_close(ibuf_se_ctl, wbuf); |
2913 | |
2914 | if (flags & F_CTL_DETAIL0x1000) { |
2915 | struct rde_community *comm = prefix_communities(p); |
2916 | size_t len = comm->nentries * sizeof(struct community); |
2917 | if (comm->nentries > 0) { |
2918 | if (imsg_compose(ibuf_se_ctl, |
2919 | IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, pid, -1, |
2920 | comm->communities, len) == -1) |
2921 | return; |
2922 | } |
2923 | for (l = 0; l < asp->others_len; l++) { |
2924 | if ((a = asp->others[l]) == NULL((void *)0)) |
2925 | break; |
2926 | if ((wbuf = imsg_create(ibuf_se_ctl, |
2927 | IMSG_CTL_SHOW_RIB_ATTR, 0, pid, 0)) == NULL((void *)0)) |
2928 | return; |
2929 | if (attr_writebuf(wbuf, a->flags, a->type, a->data, |
2930 | a->len) == -1) { |
2931 | ibuf_free(wbuf); |
2932 | return; |
2933 | } |
2934 | imsg_close(ibuf_se_ctl, wbuf); |
2935 | } |
2936 | } |
2937 | } |
2938 | |
2939 | int |
2940 | rde_match_peer(struct rde_peer *p, struct ctl_neighbor *n) |
2941 | { |
2942 | char *s; |
2943 | |
2944 | if (n && n->addr.aid) { |
2945 | if (memcmp(&p->conf.remote_addr, &n->addr, |
2946 | sizeof(p->conf.remote_addr))) |
2947 | return 0; |
2948 | } else if (n && n->descr[0]) { |
2949 | s = n->is_group ? p->conf.group : p->conf.descr; |
2950 | /* cannot trust n->descr to be properly terminated */ |
2951 | if (strncmp(s, n->descr, sizeof(n->descr))) |
2952 | return 0; |
2953 | } |
2954 | return 1; |
2955 | } |
2956 | |
2957 | static void |
2958 | rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req, int adjout) |
2959 | { |
2960 | struct rde_aspath *asp; |
2961 | |
2962 | if (!rde_match_peer(prefix_peer(p), &req->neighbor)) |
2963 | return; |
2964 | |
2965 | asp = prefix_aspath(p); |
2966 | if ((req->flags & F_CTL_BEST0x8000) && p->dmetric != PREFIX_DMETRIC_BEST5) |
2967 | return; |
2968 | if ((req->flags & F_CTL_INVALID0x40000) && |
2969 | (asp->flags & F_ATTR_PARSE_ERR0x10000) == 0) |
2970 | return; |
2971 | if ((req->flags & F_CTL_INELIGIBLE0x10000) && prefix_eligible(p)) |
2972 | return; |
2973 | if ((req->flags & F_CTL_LEAKED0x20000) && |
2974 | (asp->flags & F_ATTR_OTC_LEAK0x02000) == 0) |
2975 | return; |
2976 | if ((req->flags & F_CTL_HAS_PATHID0x800000)) { |
2977 | /* Match against the transmit path id if adjout is used. */ |
2978 | if (adjout) { |
2979 | if (req->path_id != p->path_id_tx) |
2980 | return; |
2981 | } else { |
2982 | if (req->path_id != p->path_id) |
2983 | return; |
2984 | } |
2985 | } |
2986 | if (req->as.type != AS_UNDEF && |
2987 | !aspath_match(asp->aspath, &req->as, 0)) |
2988 | return; |
2989 | if (req->community.flags != 0) { |
2990 | if (!community_match(prefix_communities(p), &req->community, |
2991 | NULL((void *)0))) |
2992 | return; |
2993 | } |
2994 | if (!ovs_match(p, req->flags)) |
2995 | return; |
2996 | if (!avs_match(p, req->flags)) |
2997 | return; |
2998 | rde_dump_rib_as(p, asp, req->pid, req->flags, adjout); |
2999 | } |
3000 | |
3001 | static void |
3002 | rde_dump_upcall(struct rib_entry *re, void *ptr) |
3003 | { |
3004 | struct rde_dump_ctx *ctx = ptr; |
3005 | struct prefix *p; |
3006 | |
3007 | if (re == NULL((void *)0)) |
3008 | return; |
3009 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) |
3010 | rde_dump_filter(p, &ctx->req, 0); |
3011 | } |
3012 | |
3013 | static void |
3014 | rde_dump_adjout_upcall(struct prefix *p, void *ptr) |
3015 | { |
3016 | struct rde_dump_ctx *ctx = ptr; |
3017 | |
3018 | if ((p->flags & PREFIX_FLAG_ADJOUT0x10) == 0) |
3019 | fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__); |
3020 | if (p->flags & (PREFIX_FLAG_WITHDRAW0x01 | PREFIX_FLAG_DEAD0x04)) |
3021 | return; |
3022 | rde_dump_filter(p, &ctx->req, 1); |
3023 | } |
3024 | |
3025 | static int |
3026 | rde_dump_throttled(void *arg) |
3027 | { |
3028 | struct rde_dump_ctx *ctx = arg; |
3029 | |
3030 | return (ctx->throttled != 0); |
3031 | } |
3032 | |
3033 | static void |
3034 | rde_dump_done(void *arg, uint8_t aid) |
3035 | { |
3036 | struct rde_dump_ctx *ctx = arg; |
3037 | struct rde_peer *peer; |
3038 | u_int error; |
3039 | |
3040 | if (ctx->req.flags & F_CTL_ADJ_OUT0x4000) { |
3041 | peer = peer_match(&ctx->req.neighbor, ctx->peerid); |
3042 | if (peer == NULL((void *)0)) |
3043 | goto done; |
3044 | ctx->peerid = peer->conf.id; |
3045 | switch (ctx->req.type) { |
3046 | case IMSG_CTL_SHOW_RIB: |
3047 | if (prefix_dump_new(peer, ctx->req.aid, |
3048 | CTL_MSG_HIGH_MARK500, ctx, rde_dump_adjout_upcall, |
3049 | rde_dump_done, rde_dump_throttled) == -1) |
3050 | goto nomem; |
3051 | break; |
3052 | case IMSG_CTL_SHOW_RIB_PREFIX: |
3053 | if (prefix_dump_subtree(peer, &ctx->req.prefix, |
3054 | ctx->req.prefixlen, CTL_MSG_HIGH_MARK500, ctx, |
3055 | rde_dump_adjout_upcall, rde_dump_done, |
3056 | rde_dump_throttled) == -1) |
3057 | goto nomem; |
3058 | break; |
3059 | default: |
3060 | fatalx("%s: unsupported imsg type", __func__); |
3061 | } |
3062 | return; |
3063 | } |
3064 | done: |
3065 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid, -1, NULL((void *)0), 0); |
3066 | LIST_REMOVE(ctx, entry)do { if ((ctx)->entry.le_next != ((void *)0)) (ctx)->entry .le_next->entry.le_prev = (ctx)->entry.le_prev; *(ctx)-> entry.le_prev = (ctx)->entry.le_next; ; ; } while (0); |
3067 | free(ctx); |
3068 | return; |
3069 | |
3070 | nomem: |
3071 | log_warn(__func__); |
3072 | error = CTL_RES_NOMEM; |
3073 | imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, ctx->req.pid, -1, &error, |
3074 | sizeof(error)); |
3075 | return; |
3076 | } |
3077 | |
3078 | void |
3079 | rde_dump_ctx_new(struct ctl_show_rib_request *req, pid_t pid, |
3080 | enum imsg_type type) |
3081 | { |
3082 | struct rde_dump_ctx *ctx; |
3083 | struct rib_entry *re; |
3084 | struct prefix *p; |
3085 | u_int error; |
3086 | uint8_t hostplen, plen; |
3087 | uint16_t rid; |
3088 | |
3089 | if ((ctx = calloc(1, sizeof(*ctx))) == NULL((void *)0)) { |
3090 | nomem: |
3091 | log_warn(__func__); |
3092 | error = CTL_RES_NOMEM; |
3093 | imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error, |
3094 | sizeof(error)); |
3095 | free(ctx); |
3096 | return; |
3097 | } |
3098 | |
3099 | memcpy(&ctx->req, req, sizeof(struct ctl_show_rib_request)); |
3100 | ctx->req.pid = pid; |
3101 | ctx->req.type = type; |
3102 | |
3103 | if (req->flags & (F_CTL_ADJ_IN0x2000 | F_CTL_INVALID0x40000)) { |
3104 | rid = RIB_ADJ_IN0; |
3105 | } else if (req->flags & F_CTL_ADJ_OUT0x4000) { |
3106 | struct rde_peer *peer; |
3107 | |
3108 | peer = peer_match(&req->neighbor, 0); |
3109 | if (peer == NULL((void *)0)) { |
3110 | error = CTL_RES_NOSUCHPEER; |
3111 | imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, |
3112 | &error, sizeof(error)); |
3113 | free(ctx); |
3114 | return; |
3115 | } |
3116 | ctx->peerid = peer->conf.id; |
3117 | switch (ctx->req.type) { |
3118 | case IMSG_CTL_SHOW_RIB: |
3119 | if (prefix_dump_new(peer, ctx->req.aid, |
3120 | CTL_MSG_HIGH_MARK500, ctx, rde_dump_adjout_upcall, |
3121 | rde_dump_done, rde_dump_throttled) == -1) |
3122 | goto nomem; |
3123 | break; |
3124 | case IMSG_CTL_SHOW_RIB_PREFIX: |
3125 | if (req->flags & F_LONGER0x0200) { |
3126 | if (prefix_dump_subtree(peer, &req->prefix, |
3127 | req->prefixlen, CTL_MSG_HIGH_MARK500, ctx, |
3128 | rde_dump_adjout_upcall, |
3129 | rde_dump_done, rde_dump_throttled) == -1) |
3130 | goto nomem; |
3131 | break; |
3132 | } |
3133 | switch (req->prefix.aid) { |
3134 | case AID_INET1: |
3135 | case AID_VPN_IPv43: |
3136 | hostplen = 32; |
3137 | break; |
3138 | case AID_INET62: |
3139 | case AID_VPN_IPv64: |
3140 | hostplen = 128; |
3141 | break; |
3142 | default: |
3143 | fatalx("%s: unknown af", __func__); |
3144 | } |
3145 | |
3146 | do { |
3147 | if (req->flags & F_SHORTER0x0400) { |
3148 | for (plen = 0; plen <= req->prefixlen; |
3149 | plen++) { |
3150 | p = prefix_adjout_lookup(peer, |
3151 | &req->prefix, plen); |
3152 | /* dump all matching paths */ |
3153 | while (p != NULL((void *)0)) { |
3154 | rde_dump_adjout_upcall( |
3155 | p, ctx); |
3156 | p = prefix_adjout_next( |
3157 | peer, p); |
3158 | } |
3159 | } |
3160 | p = NULL((void *)0); |
3161 | } else if (req->prefixlen == hostplen) { |
3162 | p = prefix_adjout_match(peer, |
3163 | &req->prefix); |
3164 | } else { |
3165 | p = prefix_adjout_lookup(peer, |
3166 | &req->prefix, req->prefixlen); |
3167 | } |
3168 | /* dump all matching paths */ |
3169 | while (p != NULL((void *)0)) { |
3170 | rde_dump_adjout_upcall(p, ctx); |
3171 | p = prefix_adjout_next(peer, p); |
3172 | } |
3173 | } while ((peer = peer_match(&req->neighbor, |
3174 | peer->conf.id))); |
3175 | |
3176 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid, |
3177 | -1, NULL((void *)0), 0); |
3178 | free(ctx); |
3179 | return; |
3180 | default: |
3181 | fatalx("%s: unsupported imsg type", __func__); |
3182 | } |
3183 | |
3184 | LIST_INSERT_HEAD(&rde_dump_h, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_dump_h)->lh_first ) != ((void *)0)) (&rde_dump_h)->lh_first->entry.le_prev = &(ctx)->entry.le_next; (&rde_dump_h)->lh_first = (ctx); (ctx)->entry.le_prev = &(&rde_dump_h)-> lh_first; } while (0); |
3185 | return; |
3186 | } else if ((rid = rib_find(req->rib)) == RIB_NOTFOUND0xffff) { |
3187 | log_warnx("%s: no such rib %s", __func__, req->rib); |
3188 | error = CTL_RES_NOSUCHRIB; |
3189 | imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error, |
3190 | sizeof(error)); |
3191 | free(ctx); |
3192 | return; |
3193 | } |
3194 | |
3195 | switch (ctx->req.type) { |
3196 | case IMSG_CTL_SHOW_NETWORK: |
3197 | if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK500, ctx, |
3198 | network_dump_upcall, rde_dump_done, |
3199 | rde_dump_throttled) == -1) |
3200 | goto nomem; |
3201 | break; |
3202 | case IMSG_CTL_SHOW_RIB: |
3203 | if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK500, ctx, |
3204 | rde_dump_upcall, rde_dump_done, rde_dump_throttled) == -1) |
3205 | goto nomem; |
3206 | break; |
3207 | case IMSG_CTL_SHOW_RIB_PREFIX: |
3208 | if (req->flags & F_LONGER0x0200) { |
3209 | if (rib_dump_subtree(rid, &req->prefix, req->prefixlen, |
3210 | CTL_MSG_HIGH_MARK500, ctx, rde_dump_upcall, |
3211 | rde_dump_done, rde_dump_throttled) == -1) |
3212 | goto nomem; |
3213 | break; |
3214 | } |
3215 | switch (req->prefix.aid) { |
3216 | case AID_INET1: |
3217 | case AID_VPN_IPv43: |
3218 | hostplen = 32; |
3219 | break; |
3220 | case AID_INET62: |
3221 | case AID_VPN_IPv64: |
3222 | hostplen = 128; |
3223 | break; |
3224 | default: |
3225 | fatalx("%s: unknown af", __func__); |
3226 | } |
3227 | |
3228 | if (req->flags & F_SHORTER0x0400) { |
3229 | for (plen = 0; plen <= req->prefixlen; plen++) { |
3230 | re = rib_get_addr(rib_byid(rid), &req->prefix, |
3231 | plen); |
3232 | rde_dump_upcall(re, ctx); |
3233 | } |
3234 | } else if (req->prefixlen == hostplen) { |
3235 | re = rib_match(rib_byid(rid), &req->prefix); |
3236 | rde_dump_upcall(re, ctx); |
3237 | } else { |
3238 | re = rib_get_addr(rib_byid(rid), &req->prefix, |
3239 | req->prefixlen); |
3240 | rde_dump_upcall(re, ctx); |
3241 | } |
3242 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid, |
3243 | -1, NULL((void *)0), 0); |
3244 | free(ctx); |
3245 | return; |
3246 | default: |
3247 | fatalx("%s: unsupported imsg type", __func__); |
3248 | } |
3249 | LIST_INSERT_HEAD(&rde_dump_h, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_dump_h)->lh_first ) != ((void *)0)) (&rde_dump_h)->lh_first->entry.le_prev = &(ctx)->entry.le_next; (&rde_dump_h)->lh_first = (ctx); (ctx)->entry.le_prev = &(&rde_dump_h)-> lh_first; } while (0); |
3250 | } |
3251 | |
3252 | void |
3253 | rde_dump_ctx_throttle(pid_t pid, int throttle) |
3254 | { |
3255 | struct rde_dump_ctx *ctx; |
3256 | |
3257 | LIST_FOREACH(ctx, &rde_dump_h, entry)for((ctx) = ((&rde_dump_h)->lh_first); (ctx)!= ((void * )0); (ctx) = ((ctx)->entry.le_next)) { |
3258 | if (ctx->req.pid == pid) { |
3259 | ctx->throttled = throttle; |
3260 | return; |
3261 | } |
3262 | } |
3263 | } |
3264 | |
3265 | void |
3266 | rde_dump_ctx_terminate(pid_t pid) |
3267 | { |
3268 | struct rde_dump_ctx *ctx; |
3269 | |
3270 | LIST_FOREACH(ctx, &rde_dump_h, entry)for((ctx) = ((&rde_dump_h)->lh_first); (ctx)!= ((void * )0); (ctx) = ((ctx)->entry.le_next)) { |
3271 | if (ctx->req.pid == pid) { |
3272 | rib_dump_terminate(ctx); |
3273 | return; |
3274 | } |
3275 | } |
3276 | } |
3277 | |
3278 | static int |
3279 | rde_mrt_throttled(void *arg) |
3280 | { |
3281 | struct mrt *mrt = arg; |
3282 | |
3283 | return (mrt->wbuf.queued > SESS_MSG_LOW_MARK500); |
3284 | } |
3285 | |
3286 | static void |
3287 | rde_mrt_done(void *ptr, uint8_t aid) |
3288 | { |
3289 | mrt_done(ptr); |
3290 | } |
3291 | |
3292 | void |
3293 | rde_dump_mrt_new(struct mrt *mrt, pid_t pid, int fd) |
3294 | { |
3295 | struct rde_mrt_ctx *ctx; |
3296 | uint16_t rid; |
3297 | |
3298 | if ((ctx = calloc(1, sizeof(*ctx))) == NULL((void *)0)) { |
3299 | log_warn("rde_dump_mrt_new"); |
3300 | return; |
3301 | } |
3302 | memcpy(&ctx->mrt, mrt, sizeof(struct mrt)); |
3303 | TAILQ_INIT(&ctx->mrt.wbuf.bufs)do { (&ctx->mrt.wbuf.bufs)->tqh_first = ((void *)0) ; (&ctx->mrt.wbuf.bufs)->tqh_last = &(&ctx-> mrt.wbuf.bufs)->tqh_first; } while (0); |
3304 | ctx->mrt.wbuf.fd = fd; |
3305 | ctx->mrt.state = MRT_STATE_RUNNING; |
3306 | rid = rib_find(ctx->mrt.rib); |
3307 | if (rid == RIB_NOTFOUND0xffff) { |
3308 | log_warnx("non existing RIB %s for mrt dump", ctx->mrt.rib); |
3309 | free(ctx); |
3310 | return; |
3311 | } |
3312 | |
3313 | if (ctx->mrt.type == MRT_TABLE_DUMP_V2) |
3314 | mrt_dump_v2_hdr(&ctx->mrt, conf); |
3315 | |
3316 | if (rib_dump_new(rid, AID_UNSPEC0, CTL_MSG_HIGH_MARK500, &ctx->mrt, |
3317 | mrt_dump_upcall, rde_mrt_done, rde_mrt_throttled) == -1) |
3318 | fatal("%s: rib_dump_new", __func__); |
3319 | |
3320 | LIST_INSERT_HEAD(&rde_mrts, ctx, entry)do { if (((ctx)->entry.le_next = (&rde_mrts)->lh_first ) != ((void *)0)) (&rde_mrts)->lh_first->entry.le_prev = &(ctx)->entry.le_next; (&rde_mrts)->lh_first = (ctx); (ctx)->entry.le_prev = &(&rde_mrts)-> lh_first; } while (0); |
3321 | rde_mrt_cnt++; |
3322 | } |
3323 | |
3324 | /* |
3325 | * kroute specific functions |
3326 | */ |
3327 | int |
3328 | rde_l3vpn_import(struct rde_community *comm, struct l3vpn *rd) |
3329 | { |
3330 | struct filter_set *s; |
3331 | |
3332 | TAILQ_FOREACH(s, &rd->import, entry)for((s) = ((&rd->import)->tqh_first); (s) != ((void *)0); (s) = ((s)->entry.tqe_next)) { |
3333 | if (community_match(comm, &s->action.community, 0)) |
3334 | return (1); |
3335 | } |
3336 | return (0); |
3337 | } |
3338 | |
3339 | void |
3340 | rde_send_kroute_flush(struct rib *rib) |
3341 | { |
3342 | if (imsg_compose(ibuf_main, IMSG_KROUTE_FLUSH, rib->rtableid, 0, -1, |
3343 | NULL((void *)0), 0) == -1) |
3344 | fatal("%s %d imsg_compose error", __func__, __LINE__3344); |
3345 | } |
3346 | |
3347 | void |
3348 | rde_send_kroute(struct rib *rib, struct prefix *new, struct prefix *old) |
3349 | { |
3350 | struct kroute_full kf; |
3351 | struct prefix *p; |
3352 | struct l3vpn *vpn; |
3353 | enum imsg_type type; |
3354 | |
3355 | /* |
3356 | * Make sure that self announce prefixes are not committed to the |
3357 | * FIB. If both prefixes are unreachable no update is needed. |
3358 | */ |
3359 | if ((old == NULL((void *)0) || prefix_aspath(old)->flags & F_PREFIX_ANNOUNCED0x00400) && |
3360 | (new == NULL((void *)0) || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED0x00400)) |
3361 | return; |
3362 | |
3363 | if (new == NULL((void *)0) || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED0x00400) { |
3364 | type = IMSG_KROUTE_DELETE; |
3365 | p = old; |
3366 | } else { |
3367 | type = IMSG_KROUTE_CHANGE; |
3368 | p = new; |
3369 | } |
3370 | |
3371 | memset(&kf, 0, sizeof(kf)); |
3372 | pt_getaddr(p->pt, &kf.prefix); |
3373 | kf.prefixlen = p->pt->prefixlen; |
3374 | if (type == IMSG_KROUTE_CHANGE) { |
3375 | if (prefix_nhflags(p) == NEXTHOP_REJECT0x02) |
3376 | kf.flags |= F_REJECT0x0020; |
3377 | if (prefix_nhflags(p) == NEXTHOP_BLACKHOLE0x04) |
3378 | kf.flags |= F_BLACKHOLE0x0040; |
3379 | kf.nexthop = prefix_nexthop(p)->exit_nexthop; |
3380 | strlcpy(kf.label, rtlabel_id2name(prefix_aspath(p)->rtlabelid), |
3381 | sizeof(kf.label)); |
3382 | } |
3383 | |
3384 | switch (kf.prefix.aid) { |
3385 | case AID_VPN_IPv43: |
3386 | case AID_VPN_IPv64: |
3387 | if (!(rib->flags & F_RIB_LOCAL0x0001)) |
3388 | /* not Loc-RIB, no update for VPNs */ |
3389 | break; |
3390 | |
3391 | SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != ( (void *)0); (vpn) = ((vpn)->entry.sqe_next)) { |
3392 | if (!rde_l3vpn_import(prefix_communities(p), vpn)) |
3393 | continue; |
3394 | /* XXX not ideal but this will change */ |
3395 | kf.ifindex = if_nametoindex(vpn->ifmpe); |
3396 | if (imsg_compose(ibuf_main, type, vpn->rtableid, 0, -1, |
3397 | &kf, sizeof(kf)) == -1) |
3398 | fatal("%s %d imsg_compose error", __func__, |
3399 | __LINE__3399); |
3400 | } |
3401 | break; |
3402 | default: |
3403 | if (imsg_compose(ibuf_main, type, rib->rtableid, 0, -1, |
3404 | &kf, sizeof(kf)) == -1) |
3405 | fatal("%s %d imsg_compose error", __func__, __LINE__3405); |
3406 | break; |
3407 | } |
3408 | } |
3409 | |
3410 | /* |
3411 | * update specific functions |
3412 | */ |
3413 | int |
3414 | rde_evaluate_all(void) |
3415 | { |
3416 | return rde_eval_all; |
3417 | } |
3418 | |
3419 | /* flush Adj-RIB-Out by withdrawing all prefixes */ |
3420 | static void |
3421 | rde_up_flush_upcall(struct prefix *p, void *ptr) |
3422 | { |
3423 | prefix_adjout_withdraw(p); |
3424 | } |
3425 | |
3426 | u_char queue_buf[4096]; |
3427 | |
3428 | int |
3429 | rde_update_queue_pending(void) |
3430 | { |
3431 | struct rde_peer *peer; |
3432 | uint8_t aid; |
3433 | |
3434 | if (ibuf_se && ibuf_se->w.queued >= SESS_MSG_HIGH_MARK2000) |
3435 | return 0; |
3436 | |
3437 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3438 | if (peer->conf.id == 0) |
3439 | continue; |
3440 | if (peer->state != PEER_UP) |
3441 | continue; |
3442 | if (peer->throttled) |
3443 | continue; |
3444 | for (aid = 0; aid < AID_MAX7; aid++) { |
3445 | if (!RB_EMPTY(&peer->updates[aid])((&peer->updates[aid])->rbh_root == ((void *)0)) || |
3446 | !RB_EMPTY(&peer->withdraws[aid])((&peer->withdraws[aid])->rbh_root == ((void *)0))) |
3447 | return 1; |
3448 | } |
3449 | } |
3450 | return 0; |
3451 | } |
3452 | |
3453 | void |
3454 | rde_update_queue_runner(uint8_t aid) |
3455 | { |
3456 | struct rde_peer *peer; |
3457 | struct ibuf *buf; |
3458 | int sent, max = RDE_RUNNER_ROUNDS100; |
3459 | |
3460 | /* first withdraws ... */ |
3461 | do { |
3462 | sent = 0; |
3463 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3464 | if (peer->conf.id == 0) |
3465 | continue; |
3466 | if (peer->state != PEER_UP) |
3467 | continue; |
3468 | if (peer->throttled) |
3469 | continue; |
3470 | if (RB_EMPTY(&peer->withdraws[aid])((&peer->withdraws[aid])->rbh_root == ((void *)0))) |
3471 | continue; |
3472 | |
3473 | if ((buf = ibuf_dynamic(4, 4096 - MSGSIZE_HEADER19)) == |
3474 | NULL((void *)0)) |
3475 | fatal("%s", __func__); |
3476 | if (up_dump_withdraws(buf, peer, aid) == -1) { |
3477 | ibuf_free(buf); |
3478 | continue; |
3479 | } |
3480 | if (imsg_compose_ibuf(ibuf_se, IMSG_UPDATE, |
3481 | peer->conf.id, 0, buf) == -1) |
3482 | fatal("%s: imsg_create error", __func__); |
3483 | sent++; |
3484 | } |
3485 | max -= sent; |
3486 | } while (sent != 0 && max > 0); |
3487 | |
3488 | /* ... then updates */ |
3489 | max = RDE_RUNNER_ROUNDS100; |
3490 | do { |
3491 | sent = 0; |
3492 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3493 | if (peer->conf.id == 0) |
3494 | continue; |
3495 | if (peer->state != PEER_UP) |
3496 | continue; |
3497 | if (peer->throttled) |
3498 | continue; |
3499 | if (RB_EMPTY(&peer->updates[aid])((&peer->updates[aid])->rbh_root == ((void *)0))) |
3500 | continue; |
3501 | |
3502 | if (up_is_eor(peer, aid)) { |
3503 | int sent_eor = peer->sent_eor & (1 << aid); |
3504 | if (peer->capa.grestart.restart && !sent_eor) |
3505 | rde_peer_send_eor(peer, aid); |
3506 | if (peer->capa.enhanced_rr && sent_eor) |
3507 | rde_peer_send_rrefresh(peer, aid, |
3508 | ROUTE_REFRESH_END_RR2); |
3509 | continue; |
3510 | } |
3511 | |
3512 | if ((buf = ibuf_dynamic(4, 4096 - MSGSIZE_HEADER19)) == |
3513 | NULL((void *)0)) |
3514 | fatal("%s", __func__); |
3515 | if (up_dump_update(buf, peer, aid) == -1) { |
3516 | ibuf_free(buf); |
3517 | continue; |
3518 | } |
3519 | if (imsg_compose_ibuf(ibuf_se, IMSG_UPDATE, |
3520 | peer->conf.id, 0, buf) == -1) |
3521 | fatal("%s: imsg_compose_ibuf error", __func__); |
3522 | sent++; |
3523 | } |
3524 | max -= sent; |
3525 | } while (sent != 0 && max > 0); |
3526 | } |
3527 | |
3528 | /* |
3529 | * pf table specific functions |
3530 | */ |
3531 | struct rde_pftable_node { |
3532 | RB_ENTRY(rde_pftable_node)struct { struct rde_pftable_node *rbe_left; struct rde_pftable_node *rbe_right; struct rde_pftable_node *rbe_parent; int rbe_color ; } entry; |
3533 | struct pt_entry *prefix; |
3534 | int refcnt; |
3535 | uint16_t id; |
3536 | }; |
3537 | RB_HEAD(rde_pftable_tree, rde_pftable_node)struct rde_pftable_tree { struct rde_pftable_node *rbh_root; }; |
3538 | |
3539 | static inline int |
3540 | rde_pftable_cmp(struct rde_pftable_node *a, struct rde_pftable_node *b) |
3541 | { |
3542 | if (a->prefix > b->prefix) |
3543 | return 1; |
3544 | if (a->prefix < b->prefix) |
3545 | return -1; |
3546 | return (a->id - b->id); |
3547 | } |
3548 | |
3549 | RB_GENERATE_STATIC(rde_pftable_tree, rde_pftable_node, entry, rde_pftable_cmp)__attribute__((__unused__)) static void rde_pftable_tree_RB_INSERT_COLOR (struct rde_pftable_tree *head, struct rde_pftable_node *elm) { struct rde_pftable_node *parent, *gparent, *tmp; while ((parent = (elm)->entry.rbe_parent) && (parent)->entry. rbe_color == 1) { gparent = (parent)->entry.rbe_parent; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent)-> entry.rbe_right; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry .rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0 ); elm = gparent; continue; } if ((parent)->entry.rbe_right == elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left; if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } else { tmp = (gparent)-> entry.rbe_left; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry. rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0) ; elm = gparent; continue; } if ((parent)->entry.rbe_left == elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent )->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)-> entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right ; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } } (head->rbh_root)-> entry.rbe_color = 0; } __attribute__((__unused__)) static void rde_pftable_tree_RB_REMOVE_COLOR(struct rde_pftable_tree *head , struct rde_pftable_node *parent, struct rde_pftable_node *elm ) { struct rde_pftable_node *tmp; while ((elm == ((void *)0) || (elm)->entry.rbe_color == 0) && elm != (head)-> rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = ( parent)->entry.rbe_right; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry. rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right ; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right ; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)-> entry.rbe_left)->entry.rbe_color == 0) && ((tmp)-> entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right) ->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ( (tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry. rbe_right)->entry.rbe_color == 0) { struct rde_pftable_node *oleft; if ((oleft = (tmp)->entry.rbe_left)) (oleft)-> entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oleft ) = (tmp)->entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry.rbe_right)) { ((oleft)->entry.rbe_right )->entry.rbe_parent = (tmp); } do {} while (0); if (((oleft )->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if (( tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp )->entry.rbe_parent)->entry.rbe_left = (oleft); else (( tmp)->entry.rbe_parent)->entry.rbe_right = (oleft); } else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry.rbe_parent = (oleft); do {} while (0) ; if (((oleft)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_right) ((tmp)->entry.rbe_right )->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right ; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); elm = (head)->rbh_root; break ; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)-> entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; ( parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent )->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp )->entry.rbe_right)) { ((tmp)->entry.rbe_right)->entry .rbe_parent = (parent); } do {} while (0); if (((tmp)->entry .rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent )->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent )->entry.rbe_parent)->entry.rbe_right = (tmp); } else ( head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent ); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp )->entry.rbe_color = 1; elm = parent; parent = (elm)->entry .rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void * )0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) { struct rde_pftable_node *oright; if ((oright = (tmp)->entry .rbe_right)) (oright)->entry.rbe_color = 0; (tmp)->entry .rbe_color = 1; do { (oright) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right = (oright)->entry.rbe_left)) { ((oright)->entry.rbe_left)->entry.rbe_parent = (tmp); } do {} while (0); if (((oright)->entry.rbe_parent = (tmp)-> entry.rbe_parent)) { if ((tmp) == ((tmp)->entry.rbe_parent )->entry.rbe_left) ((tmp)->entry.rbe_parent)->entry. rbe_left = (oright); else ((tmp)->entry.rbe_parent)->entry .rbe_right = (oright); } else (head)->rbh_root = (oright); (oright)->entry.rbe_left = (tmp); (tmp)->entry.rbe_parent = (oright); do {} while (0); if (((oright)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left ; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_left ) ((tmp)->entry.rbe_left)->entry.rbe_color = 0; do { (tmp ) = (parent)->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->entry.rbe_right)-> entry.rbe_parent = (parent); } do {} while (0); if (((tmp)-> entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent ) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent )->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent )->entry.rbe_parent)->entry.rbe_right = (tmp); } else ( head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent ); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); elm = (head)->rbh_root; break; } } } if (elm) (elm)->entry .rbe_color = 0; } __attribute__((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_REMOVE(struct rde_pftable_tree *head, struct rde_pftable_node *elm) { struct rde_pftable_node *child, *parent , *old = elm; int color; if ((elm)->entry.rbe_left == ((void *)0)) child = (elm)->entry.rbe_right; else if ((elm)-> entry.rbe_right == ((void *)0)) child = (elm)->entry.rbe_left ; else { struct rde_pftable_node *left; elm = (elm)->entry .rbe_right; while ((left = (elm)->entry.rbe_left)) elm = left ; child = (elm)->entry.rbe_right; parent = (elm)->entry .rbe_parent; color = (elm)->entry.rbe_color; if (child) (child )->entry.rbe_parent = parent; if (parent) { if ((parent)-> entry.rbe_left == elm) (parent)->entry.rbe_left = child; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->entry = (old)->entry; if ((old )->entry.rbe_parent) { if (((old)->entry.rbe_parent)-> entry.rbe_left == old) ((old)->entry.rbe_parent)->entry .rbe_left = elm; else ((old)->entry.rbe_parent)->entry. rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left)->entry.rbe_parent = elm; if ((old)->entry.rbe_right) ((old)->entry.rbe_right)-> entry.rbe_parent = elm; if (parent) { left = parent; do { do { } while (0); } while ((left = (left)->entry.rbe_parent)); } goto color; } parent = (elm)->entry.rbe_parent; color = ( elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm ) (parent)->entry.rbe_left = child; else (parent)->entry .rbe_right = child; do {} while (0); } else (head)->rbh_root = child; color: if (color == 0) rde_pftable_tree_RB_REMOVE_COLOR (head, parent, child); return (old); } __attribute__((__unused__ )) static struct rde_pftable_node * rde_pftable_tree_RB_INSERT (struct rde_pftable_tree *head, struct rde_pftable_node *elm) { struct rde_pftable_node *tmp; struct rde_pftable_node *parent = ((void *)0); int comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp = (rde_pftable_cmp)(elm, parent); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left = (elm)->entry.rbe_right = ((void *)0); (elm)->entry.rbe_color = 1; } while (0); if (parent != ((void *)0)) { if (comp < 0) (parent)->entry.rbe_left = elm; else (parent)->entry .rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; rde_pftable_tree_RB_INSERT_COLOR(head, elm); return ( ((void *)0)); } __attribute__((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_FIND(struct rde_pftable_tree *head, struct rde_pftable_node *elm) { struct rde_pftable_node *tmp = (head )->rbh_root; int comp; while (tmp) { comp = rde_pftable_cmp (elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (((void *)0)); } __attribute__((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_NFIND(struct rde_pftable_tree *head, struct rde_pftable_node *elm) { struct rde_pftable_node *tmp = (head)->rbh_root; struct rde_pftable_node *res = ((void *)0); int comp; while (tmp) { comp = rde_pftable_cmp (elm, tmp); if (comp < 0) { res = tmp; tmp = (tmp)->entry .rbe_left; } else if (comp > 0) tmp = (tmp)->entry.rbe_right ; else return (tmp); } return (res); } __attribute__((__unused__ )) static struct rde_pftable_node * rde_pftable_tree_RB_NEXT( struct rde_pftable_node *elm) { if ((elm)->entry.rbe_right ) { elm = (elm)->entry.rbe_right; while ((elm)->entry.rbe_left ) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry .rbe_parent && (elm == ((elm)->entry.rbe_parent)-> entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry .rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent ; elm = (elm)->entry.rbe_parent; } } return (elm); } __attribute__ ((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_PREV (struct rde_pftable_node *elm) { if ((elm)->entry.rbe_left ) { elm = (elm)->entry.rbe_left; while ((elm)->entry.rbe_right ) elm = (elm)->entry.rbe_right; } else { if ((elm)->entry .rbe_parent && (elm == ((elm)->entry.rbe_parent)-> entry.rbe_right)) elm = (elm)->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry .rbe_parent)->entry.rbe_left)) elm = (elm)->entry.rbe_parent ; elm = (elm)->entry.rbe_parent; } } return (elm); } __attribute__ ((__unused__)) static struct rde_pftable_node * rde_pftable_tree_RB_MINMAX (struct rde_pftable_tree *head, int val) { struct rde_pftable_node *tmp = (head)->rbh_root; struct rde_pftable_node *parent = ((void *)0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->entry.rbe_left; else tmp = (tmp)->entry.rbe_right ; } return (parent); }; |
3550 | |
3551 | struct rde_pftable_tree pftable_tree = RB_INITIALIZER(&pftable_tree){ ((void *)0) }; |
3552 | int need_commit; |
3553 | |
3554 | static void |
3555 | rde_pftable_send(uint16_t id, struct pt_entry *pt, int del) |
3556 | { |
3557 | struct pftable_msg pfm; |
3558 | |
3559 | if (id == 0) |
3560 | return; |
3561 | |
3562 | /* do not run while cleaning up */ |
3563 | if (rde_quit) |
3564 | return; |
3565 | |
3566 | memset(&pfm, 0, sizeof(pfm)); |
3567 | strlcpy(pfm.pftable, pftable_id2name(id), sizeof(pfm.pftable)); |
3568 | pt_getaddr(pt, &pfm.addr); |
3569 | pfm.len = pt->prefixlen; |
3570 | |
3571 | if (imsg_compose(ibuf_main, |
3572 | del ? IMSG_PFTABLE_REMOVE : IMSG_PFTABLE_ADD, |
3573 | 0, 0, -1, &pfm, sizeof(pfm)) == -1) |
3574 | fatal("%s %d imsg_compose error", __func__, __LINE__3574); |
3575 | |
3576 | need_commit = 1; |
3577 | } |
3578 | |
3579 | void |
3580 | rde_pftable_add(uint16_t id, struct prefix *p) |
3581 | { |
3582 | struct rde_pftable_node *pfn, node; |
3583 | |
3584 | memset(&node, 0, sizeof(node)); |
3585 | node.prefix = p->pt; |
3586 | node.id = id; |
3587 | |
3588 | pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node)rde_pftable_tree_RB_FIND(&pftable_tree, &node); |
3589 | if (pfn == NULL((void *)0)) { |
3590 | if ((pfn = calloc(1, sizeof(*pfn))) == NULL((void *)0)) |
3591 | fatal("%s", __func__); |
3592 | pfn->prefix = pt_ref(p->pt); |
3593 | pfn->id = id; |
3594 | |
3595 | if (RB_INSERT(rde_pftable_tree, &pftable_tree, pfn)rde_pftable_tree_RB_INSERT(&pftable_tree, pfn) != NULL((void *)0)) |
3596 | fatalx("%s: tree corrupt", __func__); |
3597 | |
3598 | rde_pftable_send(id, p->pt, 0); |
3599 | } |
3600 | pfn->refcnt++; |
3601 | } |
3602 | |
3603 | void |
3604 | rde_pftable_del(uint16_t id, struct prefix *p) |
3605 | { |
3606 | struct rde_pftable_node *pfn, node; |
3607 | |
3608 | memset(&node, 0, sizeof(node)); |
3609 | node.prefix = p->pt; |
3610 | node.id = id; |
3611 | |
3612 | pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node)rde_pftable_tree_RB_FIND(&pftable_tree, &node); |
3613 | if (pfn == NULL((void *)0)) |
3614 | return; |
3615 | |
3616 | if (--pfn->refcnt <= 0) { |
3617 | rde_pftable_send(id, p->pt, 1); |
3618 | |
3619 | if (RB_REMOVE(rde_pftable_tree, &pftable_tree, pfn)rde_pftable_tree_RB_REMOVE(&pftable_tree, pfn) == NULL((void *)0)) |
3620 | fatalx("%s: tree corrupt", __func__); |
3621 | |
3622 | pt_unref(pfn->prefix); |
3623 | free(pfn); |
3624 | } |
3625 | } |
3626 | |
3627 | void |
3628 | rde_commit_pftable(void) |
3629 | { |
3630 | /* do not run while cleaning up */ |
3631 | if (rde_quit) |
3632 | return; |
3633 | |
3634 | if (!need_commit) |
3635 | return; |
3636 | |
3637 | if (imsg_compose(ibuf_main, IMSG_PFTABLE_COMMIT, 0, 0, -1, NULL((void *)0), 0) == |
3638 | -1) |
3639 | fatal("%s %d imsg_compose error", __func__, __LINE__3639); |
3640 | |
3641 | need_commit = 0; |
3642 | } |
3643 | |
3644 | /* |
3645 | * nexthop specific functions |
3646 | */ |
3647 | void |
3648 | rde_send_nexthop(struct bgpd_addr *next, int insert) |
3649 | { |
3650 | int type; |
3651 | |
3652 | if (insert) |
3653 | type = IMSG_NEXTHOP_ADD; |
3654 | else |
3655 | type = IMSG_NEXTHOP_REMOVE; |
3656 | |
3657 | if (imsg_compose(ibuf_main, type, 0, 0, -1, next, |
3658 | sizeof(struct bgpd_addr)) == -1) |
3659 | fatal("%s %d imsg_compose error", __func__, __LINE__3659); |
3660 | } |
3661 | |
3662 | /* |
3663 | * soft reconfig specific functions |
3664 | */ |
3665 | void |
3666 | rde_reload_done(void) |
3667 | { |
3668 | struct rde_peer *peer; |
3669 | struct filter_head *fh; |
3670 | struct rde_prefixset_head prefixsets_old; |
3671 | struct rde_prefixset_head originsets_old; |
3672 | struct as_set_head as_sets_old; |
3673 | uint16_t rid; |
3674 | int reload = 0; |
3675 | |
3676 | softreconfig = 0; |
3677 | |
3678 | SIMPLEQ_INIT(&prefixsets_old)do { (&prefixsets_old)->sqh_first = ((void *)0); (& prefixsets_old)->sqh_last = &(&prefixsets_old)-> sqh_first; } while (0); |
3679 | SIMPLEQ_INIT(&originsets_old)do { (&originsets_old)->sqh_first = ((void *)0); (& originsets_old)->sqh_last = &(&originsets_old)-> sqh_first; } while (0); |
3680 | SIMPLEQ_INIT(&as_sets_old)do { (&as_sets_old)->sqh_first = ((void *)0); (&as_sets_old )->sqh_last = &(&as_sets_old)->sqh_first; } while (0); |
3681 | SIMPLEQ_CONCAT(&prefixsets_old, &conf->rde_prefixsets)do { if (!((((&conf->rde_prefixsets))->sqh_first) == ((void *)0))) { *(&prefixsets_old)->sqh_last = (& conf->rde_prefixsets)->sqh_first; (&prefixsets_old) ->sqh_last = (&conf->rde_prefixsets)->sqh_last; do { ((&conf->rde_prefixsets))->sqh_first = ((void *) 0); ((&conf->rde_prefixsets))->sqh_last = &((& conf->rde_prefixsets))->sqh_first; } while (0); } } while (0); |
3682 | SIMPLEQ_CONCAT(&originsets_old, &conf->rde_originsets)do { if (!((((&conf->rde_originsets))->sqh_first) == ((void *)0))) { *(&originsets_old)->sqh_last = (& conf->rde_originsets)->sqh_first; (&originsets_old) ->sqh_last = (&conf->rde_originsets)->sqh_last; do { ((&conf->rde_originsets))->sqh_first = ((void *) 0); ((&conf->rde_originsets))->sqh_last = &((& conf->rde_originsets))->sqh_first; } while (0); } } while (0); |
3683 | SIMPLEQ_CONCAT(&as_sets_old, &conf->as_sets)do { if (!((((&conf->as_sets))->sqh_first) == ((void *)0))) { *(&as_sets_old)->sqh_last = (&conf->as_sets )->sqh_first; (&as_sets_old)->sqh_last = (&conf ->as_sets)->sqh_last; do { ((&conf->as_sets))-> sqh_first = ((void *)0); ((&conf->as_sets))->sqh_last = &((&conf->as_sets))->sqh_first; } while (0); } } while (0); |
3684 | |
3685 | /* merge the main config */ |
3686 | copy_config(conf, nconf); |
3687 | |
3688 | /* need to copy the sets and roa table and clear them in nconf */ |
3689 | SIMPLEQ_CONCAT(&conf->rde_prefixsets, &nconf->rde_prefixsets)do { if (!((((&nconf->rde_prefixsets))->sqh_first) == ((void *)0))) { *(&conf->rde_prefixsets)->sqh_last = (&nconf->rde_prefixsets)->sqh_first; (&conf-> rde_prefixsets)->sqh_last = (&nconf->rde_prefixsets )->sqh_last; do { ((&nconf->rde_prefixsets))->sqh_first = ((void *)0); ((&nconf->rde_prefixsets))->sqh_last = &((&nconf->rde_prefixsets))->sqh_first; } while (0); } } while (0); |
3690 | SIMPLEQ_CONCAT(&conf->rde_originsets, &nconf->rde_originsets)do { if (!((((&nconf->rde_originsets))->sqh_first) == ((void *)0))) { *(&conf->rde_originsets)->sqh_last = (&nconf->rde_originsets)->sqh_first; (&conf-> rde_originsets)->sqh_last = (&nconf->rde_originsets )->sqh_last; do { ((&nconf->rde_originsets))->sqh_first = ((void *)0); ((&nconf->rde_originsets))->sqh_last = &((&nconf->rde_originsets))->sqh_first; } while (0); } } while (0); |
3691 | SIMPLEQ_CONCAT(&conf->as_sets, &nconf->as_sets)do { if (!((((&nconf->as_sets))->sqh_first) == ((void *)0))) { *(&conf->as_sets)->sqh_last = (&nconf ->as_sets)->sqh_first; (&conf->as_sets)->sqh_last = (&nconf->as_sets)->sqh_last; do { ((&nconf-> as_sets))->sqh_first = ((void *)0); ((&nconf->as_sets ))->sqh_last = &((&nconf->as_sets))->sqh_first ; } while (0); } } while (0); |
3692 | |
3693 | /* apply new set of l3vpn, sync will be done later */ |
3694 | free_l3vpns(&conf->l3vpns); |
3695 | SIMPLEQ_CONCAT(&conf->l3vpns, &nconf->l3vpns)do { if (!((((&nconf->l3vpns))->sqh_first) == ((void *)0))) { *(&conf->l3vpns)->sqh_last = (&nconf-> l3vpns)->sqh_first; (&conf->l3vpns)->sqh_last = ( &nconf->l3vpns)->sqh_last; do { ((&nconf->l3vpns ))->sqh_first = ((void *)0); ((&nconf->l3vpns))-> sqh_last = &((&nconf->l3vpns))->sqh_first; } while (0); } } while (0); |
3696 | /* XXX WHERE IS THE SYNC ??? */ |
3697 | |
3698 | free_config(nconf); |
3699 | nconf = NULL((void *)0); |
3700 | |
3701 | /* sync peerself with conf */ |
3702 | peerself->remote_bgpid = ntohl(conf->bgpid)(__uint32_t)(__builtin_constant_p(conf->bgpid) ? (__uint32_t )(((__uint32_t)(conf->bgpid) & 0xff) << 24 | ((__uint32_t )(conf->bgpid) & 0xff00) << 8 | ((__uint32_t)(conf ->bgpid) & 0xff0000) >> 8 | ((__uint32_t)(conf-> bgpid) & 0xff000000) >> 24) : __swap32md(conf->bgpid )); |
3703 | peerself->conf.local_as = conf->as; |
3704 | peerself->conf.remote_as = conf->as; |
3705 | peerself->conf.remote_addr.aid = AID_INET1; |
3706 | peerself->conf.remote_addr.v4ba.v4.s_addr = conf->bgpid; |
3707 | peerself->conf.remote_masklen = 32; |
3708 | peerself->short_as = conf->short_as; |
3709 | |
3710 | rde_mark_prefixsets_dirty(&prefixsets_old, &conf->rde_prefixsets); |
3711 | rde_mark_prefixsets_dirty(&originsets_old, &conf->rde_originsets); |
3712 | as_sets_mark_dirty(&as_sets_old, &conf->as_sets); |
3713 | |
3714 | |
3715 | /* make sure that rde_eval_all is correctly set after a config change */ |
3716 | rde_eval_all = 0; |
3717 | |
3718 | /* Make the new outbound filter rules the active one. */ |
3719 | filterlist_free(out_rules); |
3720 | out_rules = out_rules_tmp; |
3721 | out_rules_tmp = NULL((void *)0); |
3722 | |
3723 | /* check if filter changed */ |
3724 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3725 | if (peer->conf.id == 0) /* ignore peerself */ |
3726 | continue; |
3727 | peer->reconf_out = 0; |
3728 | peer->reconf_rib = 0; |
3729 | if (peer->export_type != peer->conf.export_type) { |
3730 | log_peer_info(&peer->conf, "export type change, " |
3731 | "reloading"); |
3732 | peer->reconf_rib = 1; |
3733 | } |
3734 | if ((peer->flags & PEERFLAG_EVALUATE_ALL0x04) != |
3735 | (peer->conf.flags & PEERFLAG_EVALUATE_ALL0x04)) { |
3736 | log_peer_info(&peer->conf, "rde evaluate change, " |
3737 | "reloading"); |
3738 | peer->reconf_rib = 1; |
3739 | } |
3740 | if ((peer->flags & PEERFLAG_TRANS_AS0x01) != |
3741 | (peer->conf.flags & PEERFLAG_TRANS_AS0x01)) { |
3742 | log_peer_info(&peer->conf, "transparent-as change, " |
3743 | "reloading"); |
3744 | peer->reconf_rib = 1; |
3745 | } |
3746 | if (peer->loc_rib_id != rib_find(peer->conf.rib)) { |
3747 | log_peer_info(&peer->conf, "rib change, reloading"); |
3748 | peer->loc_rib_id = rib_find(peer->conf.rib); |
3749 | if (peer->loc_rib_id == RIB_NOTFOUND0xffff) |
3750 | fatalx("King Bula's peer met an unknown RIB"); |
3751 | peer->reconf_rib = 1; |
3752 | } |
3753 | /* |
3754 | * Update add-path settings but only if the session is |
3755 | * running with add-path and the config uses add-path |
3756 | * as well. |
3757 | */ |
3758 | if (peer_has_add_path(peer, AID_UNSPEC0, CAPA_AP_SEND0x02)) { |
3759 | if (peer->conf.eval.mode != ADDPATH_EVAL_NONE && |
3760 | memcmp(&peer->eval, &peer->conf.eval, |
3761 | sizeof(peer->eval)) != 0) { |
3762 | log_peer_info(&peer->conf, |
3763 | "addpath eval change, reloading"); |
3764 | peer->reconf_out = 1; |
3765 | peer->eval = peer->conf.eval; |
3766 | } |
3767 | /* add-path send needs rde_eval_all */ |
3768 | rde_eval_all = 1; |
3769 | } |
3770 | if (peer->role != peer->conf.role) { |
3771 | if (reload == 0) |
3772 | log_debug("peer role change: " |
3773 | "reloading Adj-RIB-In"); |
3774 | peer->role = peer->conf.role; |
3775 | reload++; |
3776 | } |
3777 | peer->export_type = peer->conf.export_type; |
3778 | peer->flags = peer->conf.flags; |
3779 | if (peer->flags & PEERFLAG_EVALUATE_ALL0x04) |
3780 | rde_eval_all = 1; |
3781 | |
3782 | if (peer->reconf_rib) { |
3783 | if (prefix_dump_new(peer, AID_UNSPEC0, |
3784 | RDE_RUNNER_ROUNDS100, NULL((void *)0), rde_up_flush_upcall, |
3785 | rde_softreconfig_in_done, NULL((void *)0)) == -1) |
3786 | fatal("%s: prefix_dump_new", __func__); |
3787 | log_peer_info(&peer->conf, "flushing Adj-RIB-Out"); |
3788 | softreconfig++; /* account for the running flush */ |
3789 | continue; |
3790 | } |
3791 | |
3792 | /* reapply outbound filters for this peer */ |
3793 | fh = peer_apply_out_filter(peer, out_rules); |
3794 | |
3795 | if (!rde_filter_equal(peer->out_rules, fh)) { |
3796 | char *p = log_fmt_peer(&peer->conf); |
3797 | log_debug("out filter change: reloading peer %s", p); |
3798 | free(p); |
3799 | peer->reconf_out = 1; |
3800 | } |
3801 | filterlist_free(fh); |
3802 | } |
3803 | |
3804 | /* bring ribs in sync */ |
3805 | for (rid = 0; rid < rib_size; rid++) { |
3806 | struct rib *rib = rib_byid(rid); |
3807 | if (rib == NULL((void *)0)) |
3808 | continue; |
3809 | rde_filter_calc_skip_steps(rib->in_rules_tmp); |
3810 | |
3811 | /* flip rules, make new active */ |
3812 | fh = rib->in_rules; |
3813 | rib->in_rules = rib->in_rules_tmp; |
3814 | rib->in_rules_tmp = fh; |
3815 | |
3816 | switch (rib->state) { |
3817 | case RECONF_DELETE: |
3818 | rib_free(rib); |
3819 | break; |
3820 | case RECONF_RELOAD: |
3821 | if (rib_update(rib)) { |
3822 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3823 | /* ignore peerself */ |
3824 | if (peer->conf.id == 0) |
3825 | continue; |
3826 | /* skip peers using a different rib */ |
3827 | if (peer->loc_rib_id != rib->id) |
3828 | continue; |
3829 | /* peer rib is already being flushed */ |
3830 | if (peer->reconf_rib) |
3831 | continue; |
3832 | |
3833 | if (prefix_dump_new(peer, AID_UNSPEC0, |
3834 | RDE_RUNNER_ROUNDS100, NULL((void *)0), |
3835 | rde_up_flush_upcall, |
3836 | rde_softreconfig_in_done, |
3837 | NULL((void *)0)) == -1) |
3838 | fatal("%s: prefix_dump_new", |
3839 | __func__); |
3840 | |
3841 | log_peer_info(&peer->conf, |
3842 | "flushing Adj-RIB-Out"); |
3843 | /* account for the running flush */ |
3844 | softreconfig++; |
3845 | } |
3846 | } |
3847 | |
3848 | rib->state = RECONF_KEEP; |
3849 | /* FALLTHROUGH */ |
3850 | case RECONF_KEEP: |
3851 | if (rde_filter_equal(rib->in_rules, rib->in_rules_tmp)) |
3852 | /* rib is in sync */ |
3853 | break; |
3854 | log_debug("in filter change: reloading RIB %s", |
3855 | rib->name); |
3856 | rib->state = RECONF_RELOAD; |
3857 | reload++; |
3858 | break; |
3859 | case RECONF_REINIT: |
3860 | /* new rib */ |
3861 | rib->state = RECONF_RELOAD; |
3862 | reload++; |
3863 | break; |
3864 | case RECONF_NONE: |
3865 | break; |
3866 | } |
3867 | filterlist_free(rib->in_rules_tmp); |
3868 | rib->in_rules_tmp = NULL((void *)0); |
3869 | } |
3870 | |
3871 | /* old filters removed, free all sets */ |
3872 | free_rde_prefixsets(&prefixsets_old); |
3873 | free_rde_prefixsets(&originsets_old); |
3874 | as_sets_free(&as_sets_old); |
3875 | |
3876 | log_info("RDE reconfigured"); |
3877 | |
3878 | softreconfig++; |
3879 | if (reload > 0) { |
3880 | if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0, RDE_RUNNER_ROUNDS100, |
3881 | NULL((void *)0), rde_softreconfig_in, rde_softreconfig_in_done, |
3882 | NULL((void *)0)) == -1) |
3883 | fatal("%s: rib_dump_new", __func__); |
3884 | log_info("running softreconfig in"); |
3885 | } else { |
3886 | rde_softreconfig_in_done((void *)1, AID_UNSPEC0); |
3887 | } |
3888 | } |
3889 | |
3890 | static void |
3891 | rde_softreconfig_in_done(void *arg, uint8_t dummy) |
3892 | { |
3893 | struct rde_peer *peer; |
3894 | uint16_t i; |
3895 | |
3896 | softreconfig--; |
3897 | /* one guy done but other dumps are still running */ |
3898 | if (softreconfig > 0) |
3899 | return; |
3900 | |
3901 | if (arg == NULL((void *)0)) |
3902 | log_info("softreconfig in done"); |
3903 | |
3904 | /* now do the Adj-RIB-Out sync and a possible FIB sync */ |
3905 | softreconfig = 0; |
3906 | for (i = 0; i < rib_size; i++) { |
3907 | struct rib *rib = rib_byid(i); |
3908 | if (rib == NULL((void *)0)) |
3909 | continue; |
3910 | rib->state = RECONF_NONE; |
3911 | if (rib->fibstate == RECONF_RELOAD) { |
3912 | if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100, |
3913 | rib, rde_softreconfig_sync_fib, |
3914 | rde_softreconfig_sync_done, NULL((void *)0)) == -1) |
3915 | fatal("%s: rib_dump_new", __func__); |
3916 | softreconfig++; |
3917 | log_info("starting fib sync for rib %s", |
3918 | rib->name); |
3919 | } else if (rib->fibstate == RECONF_REINIT) { |
3920 | if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100, |
3921 | rib, rde_softreconfig_sync_reeval, |
3922 | rde_softreconfig_sync_done, NULL((void *)0)) == -1) |
3923 | fatal("%s: rib_dump_new", __func__); |
3924 | softreconfig++; |
3925 | log_info("starting re-evaluation of rib %s", |
3926 | rib->name); |
3927 | } |
3928 | } |
3929 | |
3930 | RB_FOREACH(peer, peer_tree, &peertable)for ((peer) = peer_tree_RB_MINMAX(&peertable, -1); (peer) != ((void *)0); (peer) = peer_tree_RB_NEXT(peer)) { |
3931 | uint8_t aid; |
3932 | |
3933 | if (peer->reconf_out) { |
3934 | if (peer->export_type == EXPORT_NONE) { |
3935 | /* nothing to do here */ |
3936 | peer->reconf_out = 0; |
3937 | } else if (peer->export_type == EXPORT_DEFAULT_ROUTE) { |
3938 | /* just resend the default route */ |
3939 | for (aid = 0; aid < AID_MAX7; aid++) { |
3940 | if (peer->capa.mp[aid]) |
3941 | up_generate_default(peer, aid); |
3942 | } |
3943 | peer->reconf_out = 0; |
3944 | } else |
3945 | rib_byid(peer->loc_rib_id)->state = |
3946 | RECONF_RELOAD; |
3947 | } else if (peer->reconf_rib) { |
3948 | /* dump the full table to neighbors that changed rib */ |
3949 | for (aid = 0; aid < AID_MAX7; aid++) { |
3950 | if (peer->capa.mp[aid]) |
3951 | peer_dump(peer, aid); |
3952 | } |
3953 | } |
3954 | } |
3955 | |
3956 | for (i = 0; i < rib_size; i++) { |
3957 | struct rib *rib = rib_byid(i); |
3958 | if (rib == NULL((void *)0)) |
3959 | continue; |
3960 | if (rib->state == RECONF_RELOAD) { |
3961 | if (rib_dump_new(i, AID_UNSPEC0, RDE_RUNNER_ROUNDS100, |
3962 | rib, rde_softreconfig_out, |
3963 | rde_softreconfig_out_done, NULL((void *)0)) == -1) |
3964 | fatal("%s: rib_dump_new", __func__); |
3965 | softreconfig++; |
3966 | log_info("starting softreconfig out for rib %s", |
3967 | rib->name); |
3968 | } |
3969 | } |
3970 | |
3971 | /* if nothing to do move to last stage */ |
3972 | if (softreconfig == 0) |
3973 | rde_softreconfig_done(); |
3974 | } |
3975 | |
3976 | static void |
3977 | rde_softreconfig_out_done(void *arg, uint8_t aid) |
3978 | { |
3979 | struct rib *rib = arg; |
3980 | |
3981 | /* this RIB dump is done */ |
3982 | log_info("softreconfig out done for %s", rib->name); |
3983 | |
3984 | /* check if other dumps are still running */ |
3985 | if (--softreconfig == 0) |
3986 | rde_softreconfig_done(); |
3987 | } |
3988 | |
3989 | static void |
3990 | rde_softreconfig_done(void) |
3991 | { |
3992 | uint16_t i; |
3993 | |
3994 | for (i = 0; i < rib_size; i++) { |
3995 | struct rib *rib = rib_byid(i); |
3996 | if (rib == NULL((void *)0)) |
3997 | continue; |
3998 | rib->state = RECONF_NONE; |
3999 | } |
4000 | |
4001 | log_info("RDE soft reconfiguration done"); |
4002 | imsg_compose(ibuf_main, IMSG_RECONF_DONE, 0, 0, |
4003 | -1, NULL((void *)0), 0); |
4004 | } |
4005 | |
4006 | static void |
4007 | rde_softreconfig_in(struct rib_entry *re, void *bula) |
4008 | { |
4009 | struct filterstate state; |
4010 | struct rib *rib; |
4011 | struct prefix *p; |
4012 | struct pt_entry *pt; |
4013 | struct rde_peer *peer; |
4014 | struct rde_aspath *asp; |
4015 | enum filter_actions action; |
4016 | struct bgpd_addr prefix; |
4017 | uint16_t i; |
4018 | uint8_t aspa_vstate; |
4019 | |
4020 | pt = re->prefix; |
4021 | pt_getaddr(pt, &prefix); |
4022 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) { |
4023 | asp = prefix_aspath(p); |
4024 | peer = prefix_peer(p); |
4025 | |
4026 | /* possible role change update ASPA validation state */ |
4027 | if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN0x08) |
4028 | aspa_vstate = ASPA_NEVER_KNOWN0x08; |
4029 | else |
4030 | aspa_vstate = rde_aspa_validity(peer, asp, pt->aid); |
4031 | prefix_set_vstate(p, prefix_roa_vstate(p), aspa_vstate); |
4032 | |
4033 | /* skip announced networks, they are never filtered */ |
4034 | if (asp->flags & F_PREFIX_ANNOUNCED0x00400) |
4035 | continue; |
4036 | |
4037 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
4038 | rib = rib_byid(i); |
4039 | if (rib == NULL((void *)0)) |
4040 | continue; |
4041 | |
4042 | if (rib->state != RECONF_RELOAD) |
4043 | continue; |
4044 | |
4045 | rde_filterstate_prep(&state, p); |
4046 | action = rde_filter(rib->in_rules, peer, peer, &prefix, |
4047 | pt->prefixlen, &state); |
4048 | |
4049 | if (action == ACTION_ALLOW) { |
4050 | /* update Local-RIB */ |
4051 | prefix_update(rib, peer, p->path_id, |
4052 | p->path_id_tx, &state, |
4053 | &prefix, pt->prefixlen); |
4054 | } else if (action == ACTION_DENY) { |
4055 | /* remove from Local-RIB */ |
4056 | prefix_withdraw(rib, peer, p->path_id, &prefix, |
4057 | pt->prefixlen); |
4058 | } |
4059 | |
4060 | rde_filterstate_clean(&state); |
4061 | } |
4062 | } |
4063 | } |
4064 | |
4065 | static void |
4066 | rde_softreconfig_out(struct rib_entry *re, void *arg) |
4067 | { |
4068 | if (prefix_best(re) == NULL((void *)0)) |
4069 | /* no valid path for prefix */ |
4070 | return; |
4071 | |
4072 | rde_generate_updates(re, NULL((void *)0), NULL((void *)0), EVAL_RECONF); |
4073 | } |
4074 | |
4075 | static void |
4076 | rde_softreconfig_sync_reeval(struct rib_entry *re, void *arg) |
4077 | { |
4078 | struct prefix_queue prefixes = TAILQ_HEAD_INITIALIZER(prefixes){ ((void *)0), &(prefixes).tqh_first }; |
4079 | struct prefix *p, *next; |
4080 | struct rib *rib = arg; |
4081 | |
4082 | if (rib->flags & F_RIB_NOEVALUATE0x0002) { |
4083 | /* |
4084 | * evaluation process is turned off |
4085 | * all dependent adj-rib-out were already flushed |
4086 | * unlink nexthop if it was linked |
4087 | */ |
4088 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) { |
4089 | if (p->flags & PREFIX_NEXTHOP_LINKED0x40) |
4090 | nexthop_unlink(p); |
4091 | p->dmetric = PREFIX_DMETRIC_INVALID1; |
4092 | } |
4093 | return; |
4094 | } |
4095 | |
4096 | /* evaluation process is turned on, so evaluate all prefixes again */ |
4097 | TAILQ_CONCAT(&prefixes, &re->prefix_h, entry.list.rib)do { if (!(((&re->prefix_h)->tqh_first) == ((void * )0))) { *(&prefixes)->tqh_last = (&re->prefix_h )->tqh_first; (&re->prefix_h)->tqh_first->entry .list.rib.tqe_prev = (&prefixes)->tqh_last; (&prefixes )->tqh_last = (&re->prefix_h)->tqh_last; do { (( &re->prefix_h))->tqh_first = ((void *)0); ((&re ->prefix_h))->tqh_last = &((&re->prefix_h))-> tqh_first; } while (0); } } while (0); |
4098 | |
4099 | /* |
4100 | * TODO: this code works but is not optimal. prefix_evaluate() |
4101 | * does a lot of extra work in the worst case. Would be better |
4102 | * to resort the list once and then call rde_generate_updates() |
4103 | * and rde_send_kroute() once. |
4104 | */ |
4105 | TAILQ_FOREACH_SAFE(p, &prefixes, entry.list.rib, next)for ((p) = ((&prefixes)->tqh_first); (p) != ((void *)0 ) && ((next) = ((p)->entry.list.rib.tqe_next), 1); (p) = (next)) { |
4106 | /* need to re-link the nexthop if not already linked */ |
4107 | TAILQ_REMOVE(&prefixes, p, entry.list.rib)do { if (((p)->entry.list.rib.tqe_next) != ((void *)0)) (p )->entry.list.rib.tqe_next->entry.list.rib.tqe_prev = ( p)->entry.list.rib.tqe_prev; else (&prefixes)->tqh_last = (p)->entry.list.rib.tqe_prev; *(p)->entry.list.rib.tqe_prev = (p)->entry.list.rib.tqe_next; ; ; } while (0); |
4108 | if ((p->flags & PREFIX_NEXTHOP_LINKED0x40) == 0) |
4109 | nexthop_link(p); |
4110 | prefix_evaluate(re, p, NULL((void *)0)); |
4111 | } |
4112 | } |
4113 | |
4114 | static void |
4115 | rde_softreconfig_sync_fib(struct rib_entry *re, void *bula) |
4116 | { |
4117 | struct prefix *p; |
4118 | |
4119 | if ((p = prefix_best(re)) != NULL((void *)0)) |
4120 | rde_send_kroute(re_rib(re), p, NULL((void *)0)); |
4121 | } |
4122 | |
4123 | static void |
4124 | rde_softreconfig_sync_done(void *arg, uint8_t aid) |
4125 | { |
4126 | struct rib *rib = arg; |
4127 | |
4128 | /* this RIB dump is done */ |
4129 | if (rib->fibstate == RECONF_RELOAD) |
4130 | log_info("fib sync done for %s", rib->name); |
4131 | else |
4132 | log_info("re-evaluation done for %s", rib->name); |
4133 | rib->fibstate = RECONF_NONE; |
4134 | |
4135 | /* check if other dumps are still running */ |
4136 | if (--softreconfig == 0) |
4137 | rde_softreconfig_done(); |
4138 | } |
4139 | |
4140 | /* |
4141 | * ROA specific functions. The roa set is updated independent of the config |
4142 | * so this runs outside of the softreconfig handlers. |
4143 | */ |
4144 | static void |
4145 | rde_rpki_softreload(struct rib_entry *re, void *bula) |
4146 | { |
4147 | struct filterstate state; |
4148 | struct rib *rib; |
4149 | struct prefix *p; |
4150 | struct pt_entry *pt; |
4151 | struct rde_peer *peer; |
4152 | struct rde_aspath *asp; |
4153 | enum filter_actions action; |
4154 | struct bgpd_addr prefix; |
4155 | uint8_t roa_vstate, aspa_vstate; |
4156 | uint16_t i; |
4157 | |
4158 | pt = re->prefix; |
4159 | pt_getaddr(pt, &prefix); |
4160 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) { |
4161 | asp = prefix_aspath(p); |
4162 | peer = prefix_peer(p); |
4163 | |
4164 | /* ROA validation state update */ |
4165 | roa_vstate = rde_roa_validity(&rde_roa, |
4166 | &prefix, pt->prefixlen, aspath_origin(asp->aspath)); |
4167 | |
4168 | /* ASPA validation state update (if needed) */ |
4169 | if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN0x08) { |
4170 | aspa_vstate = ASPA_NEVER_KNOWN0x08; |
4171 | } else { |
4172 | if (asp->aspa_generation != rde_aspa_generation) { |
4173 | asp->aspa_generation = rde_aspa_generation; |
4174 | aspa_validation(rde_aspa, asp->aspath, |
4175 | &asp->aspa_state); |
4176 | } |
4177 | aspa_vstate = rde_aspa_validity(peer, asp, pt->aid); |
4178 | } |
4179 | |
4180 | if (roa_vstate == prefix_roa_vstate(p) && |
4181 | aspa_vstate == prefix_aspa_vstate(p)) |
4182 | continue; |
4183 | |
4184 | prefix_set_vstate(p, roa_vstate, aspa_vstate); |
4185 | /* skip announced networks, they are never filtered */ |
4186 | if (asp->flags & F_PREFIX_ANNOUNCED0x00400) |
4187 | continue; |
4188 | |
4189 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
4190 | rib = rib_byid(i); |
4191 | if (rib == NULL((void *)0)) |
4192 | continue; |
4193 | |
4194 | rde_filterstate_prep(&state, p); |
4195 | action = rde_filter(rib->in_rules, peer, peer, &prefix, |
4196 | pt->prefixlen, &state); |
4197 | |
4198 | if (action == ACTION_ALLOW) { |
4199 | /* update Local-RIB */ |
4200 | prefix_update(rib, peer, p->path_id, |
4201 | p->path_id_tx, &state, |
4202 | &prefix, pt->prefixlen); |
4203 | } else if (action == ACTION_DENY) { |
4204 | /* remove from Local-RIB */ |
4205 | prefix_withdraw(rib, peer, p->path_id, &prefix, |
4206 | pt->prefixlen); |
4207 | } |
4208 | |
4209 | rde_filterstate_clean(&state); |
4210 | } |
4211 | } |
4212 | } |
4213 | |
4214 | static int rpki_update_pending; |
4215 | |
4216 | static void |
4217 | rde_rpki_softreload_done(void *arg, uint8_t aid) |
4218 | { |
4219 | /* the roa update is done */ |
4220 | log_info("RPKI softreload done"); |
4221 | rpki_update_pending = 0; |
4222 | } |
4223 | |
4224 | static void |
4225 | rde_rpki_reload(void) |
4226 | { |
4227 | if (rpki_update_pending) { |
4228 | log_info("RPKI softreload skipped, old still running"); |
4229 | return; |
4230 | } |
4231 | |
4232 | rpki_update_pending = 1; |
4233 | if (rib_dump_new(RIB_ADJ_IN0, AID_UNSPEC0, RDE_RUNNER_ROUNDS100, |
4234 | rib_byid(RIB_ADJ_IN0), rde_rpki_softreload, |
4235 | rde_rpki_softreload_done, NULL((void *)0)) == -1) |
4236 | fatal("%s: rib_dump_new", __func__); |
4237 | } |
4238 | |
4239 | static int |
4240 | rde_roa_reload(void) |
4241 | { |
4242 | struct rde_prefixset roa_old; |
4243 | |
4244 | if (rpki_update_pending) { |
4245 | trie_free(&roa_new.th); /* can't use new roa table */ |
4246 | return 1; /* force call to rde_rpki_reload */ |
4247 | } |
4248 | |
4249 | roa_old = rde_roa; |
4250 | rde_roa = roa_new; |
4251 | memset(&roa_new, 0, sizeof(roa_new)); |
4252 | |
4253 | /* check if roa changed */ |
4254 | if (trie_equal(&rde_roa.th, &roa_old.th)) { |
4255 | rde_roa.lastchange = roa_old.lastchange; |
4256 | trie_free(&roa_old.th); /* old roa no longer needed */ |
4257 | return 0; |
4258 | } |
4259 | |
4260 | rde_roa.lastchange = getmonotime(); |
4261 | trie_free(&roa_old.th); /* old roa no longer needed */ |
4262 | |
4263 | log_debug("ROA change: reloading Adj-RIB-In"); |
4264 | return 1; |
4265 | } |
4266 | |
4267 | static int |
4268 | rde_aspa_reload(void) |
4269 | { |
4270 | struct rde_aspa *aspa_old; |
4271 | |
4272 | if (rpki_update_pending) { |
4273 | aspa_table_free(aspa_new); /* can't use new aspa table */ |
4274 | aspa_new = NULL((void *)0); |
4275 | return 1; /* rpki_client_relaod warns */ |
4276 | } |
4277 | |
4278 | aspa_old = rde_aspa; |
4279 | rde_aspa = aspa_new; |
4280 | aspa_new = NULL((void *)0); |
4281 | |
4282 | /* check if aspa changed */ |
4283 | if (aspa_table_equal(rde_aspa, aspa_old)) { |
4284 | aspa_table_unchanged(rde_aspa, aspa_old); |
4285 | aspa_table_free(aspa_old); /* old aspa no longer needed */ |
4286 | return 0; |
4287 | } |
4288 | |
4289 | aspa_table_free(aspa_old); /* old aspa no longer needed */ |
4290 | log_debug("ASPA change: reloading Adj-RIB-In"); |
4291 | rde_aspa_generation++; |
4292 | return 1; |
4293 | } |
4294 | |
4295 | /* |
4296 | * generic helper function |
4297 | */ |
4298 | uint32_t |
4299 | rde_local_as(void) |
4300 | { |
4301 | return (conf->as); |
4302 | } |
4303 | |
4304 | int |
4305 | rde_decisionflags(void) |
4306 | { |
4307 | return (conf->flags & BGPD_FLAG_DECISION_MASK0x0f00); |
4308 | } |
4309 | |
4310 | /* End-of-RIB marker, RFC 4724 */ |
4311 | static void |
4312 | rde_peer_recv_eor(struct rde_peer *peer, uint8_t aid) |
4313 | { |
4314 | peer->stats.prefix_rcvd_eor++; |
4315 | peer->recv_eor |= 1 << aid; |
4316 | |
4317 | /* |
4318 | * First notify SE to avert a possible race with the restart timeout. |
4319 | * If the timeout fires before this imsg is processed by the SE it will |
4320 | * result in the same operation since the timeout issues a FLUSH which |
4321 | * does the same as the RESTARTED action (flushing stale routes). |
4322 | * The logic in the SE is so that only one of FLUSH or RESTARTED will |
4323 | * be sent back to the RDE and so peer_flush is only called once. |
4324 | */ |
4325 | if (imsg_compose(ibuf_se, IMSG_SESSION_RESTARTED, peer->conf.id, |
4326 | 0, -1, &aid, sizeof(aid)) == -1) |
4327 | fatal("imsg_compose error while receiving EoR"); |
4328 | |
4329 | log_peer_info(&peer->conf, "received %s EOR marker", |
4330 | aid2str(aid)); |
4331 | } |
4332 | |
4333 | static void |
4334 | rde_peer_send_eor(struct rde_peer *peer, uint8_t aid) |
4335 | { |
4336 | uint16_t afi; |
4337 | uint8_t safi; |
4338 | |
4339 | peer->stats.prefix_sent_eor++; |
4340 | peer->sent_eor |= 1 << aid; |
4341 | |
4342 | if (aid == AID_INET1) { |
4343 | u_char null[4]; |
4344 | |
4345 | memset(&null, 0, 4); |
4346 | if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id, |
4347 | 0, -1, &null, 4) == -1) |
4348 | fatal("imsg_compose error while sending EoR"); |
4349 | } else { |
4350 | uint16_t i; |
4351 | u_char buf[10]; |
4352 | |
4353 | if (aid2afi(aid, &afi, &safi) == -1) |
4354 | fatalx("peer_send_eor: bad AID"); |
4355 | |
4356 | i = 0; /* v4 withdrawn len */ |
4357 | memcpy(&buf[0], &i, sizeof(i)); |
4358 | i = htons(6)(__uint16_t)(__builtin_constant_p(6) ? (__uint16_t)(((__uint16_t )(6) & 0xffU) << 8 | ((__uint16_t)(6) & 0xff00U ) >> 8) : __swap16md(6)); /* path attr len */ |
4359 | memcpy(&buf[2], &i, sizeof(i)); |
4360 | buf[4] = ATTR_OPTIONAL0x80; |
4361 | buf[5] = ATTR_MP_UNREACH_NLRI; |
4362 | buf[6] = 3; /* withdrawn len */ |
4363 | i = htons(afi)(__uint16_t)(__builtin_constant_p(afi) ? (__uint16_t)(((__uint16_t )(afi) & 0xffU) << 8 | ((__uint16_t)(afi) & 0xff00U ) >> 8) : __swap16md(afi)); |
4364 | memcpy(&buf[7], &i, sizeof(i)); |
4365 | buf[9] = safi; |
4366 | |
4367 | if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id, |
4368 | 0, -1, &buf, 10) == -1) |
4369 | fatal("%s %d imsg_compose error in peer_send_eor", |
4370 | __func__, __LINE__4370); |
4371 | } |
4372 | |
4373 | log_peer_info(&peer->conf, "sending %s EOR marker", |
4374 | aid2str(aid)); |
4375 | } |
4376 | |
4377 | void |
4378 | rde_peer_send_rrefresh(struct rde_peer *peer, uint8_t aid, uint8_t subtype) |
4379 | { |
4380 | struct route_refresh rr; |
4381 | |
4382 | /* not strickly needed, the SE checks as well */ |
4383 | if (peer->capa.enhanced_rr == 0) |
4384 | return; |
4385 | |
4386 | switch (subtype) { |
4387 | case ROUTE_REFRESH_END_RR2: |
4388 | case ROUTE_REFRESH_BEGIN_RR1: |
4389 | break; |
4390 | default: |
4391 | fatalx("%s unexpected subtype %d", __func__, subtype); |
4392 | } |
4393 | |
4394 | rr.aid = aid; |
4395 | rr.subtype = subtype; |
4396 | |
4397 | if (imsg_compose(ibuf_se, IMSG_REFRESH, peer->conf.id, 0, -1, |
4398 | &rr, sizeof(rr)) == -1) |
4399 | |
4400 | log_peer_info(&peer->conf, "sending %s %s marker", |
4401 | aid2str(aid), subtype == ROUTE_REFRESH_END_RR2 ? "EoRR" : "BoRR"); |
4402 | } |
4403 | |
4404 | /* |
4405 | * network announcement stuff |
4406 | */ |
4407 | void |
4408 | network_add(struct network_config *nc, struct filterstate *state) |
4409 | { |
4410 | struct l3vpn *vpn; |
4411 | struct filter_set_head *vpnset = NULL((void *)0); |
4412 | struct in_addr prefix4; |
4413 | struct in6_addr prefix6; |
4414 | uint32_t path_id_tx; |
4415 | uint16_t i; |
4416 | uint8_t vstate; |
4417 | |
4418 | if (nc->rd != 0) { |
4419 | SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != ( (void *)0); (vpn) = ((vpn)->entry.sqe_next)) { |
4420 | if (vpn->rd != nc->rd) |
4421 | continue; |
4422 | switch (nc->prefix.aid) { |
4423 | case AID_INET1: |
4424 | prefix4 = nc->prefix.v4ba.v4; |
4425 | memset(&nc->prefix, 0, sizeof(nc->prefix)); |
4426 | nc->prefix.aid = AID_VPN_IPv43; |
4427 | nc->prefix.rd = vpn->rd; |
4428 | nc->prefix.v4ba.v4 = prefix4; |
4429 | nc->prefix.labellen = 3; |
4430 | nc->prefix.labelstack[0] = |
4431 | (vpn->label >> 12) & 0xff; |
4432 | nc->prefix.labelstack[1] = |
4433 | (vpn->label >> 4) & 0xff; |
4434 | nc->prefix.labelstack[2] = |
4435 | (vpn->label << 4) & 0xf0; |
4436 | nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01; |
4437 | vpnset = &vpn->export; |
4438 | break; |
4439 | case AID_INET62: |
4440 | prefix6 = nc->prefix.v6ba.v6; |
4441 | memset(&nc->prefix, 0, sizeof(nc->prefix)); |
4442 | nc->prefix.aid = AID_VPN_IPv64; |
4443 | nc->prefix.rd = vpn->rd; |
4444 | nc->prefix.v6ba.v6 = prefix6; |
4445 | nc->prefix.labellen = 3; |
4446 | nc->prefix.labelstack[0] = |
4447 | (vpn->label >> 12) & 0xff; |
4448 | nc->prefix.labelstack[1] = |
4449 | (vpn->label >> 4) & 0xff; |
4450 | nc->prefix.labelstack[2] = |
4451 | (vpn->label << 4) & 0xf0; |
4452 | nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01; |
4453 | vpnset = &vpn->export; |
4454 | break; |
4455 | default: |
4456 | log_warnx("unable to VPNize prefix"); |
4457 | filterset_free(&nc->attrset); |
4458 | return; |
4459 | } |
4460 | break; |
4461 | } |
4462 | if (vpn == NULL((void *)0)) { |
4463 | log_warnx("network_add: " |
4464 | "prefix %s/%u in non-existing l3vpn %s", |
4465 | log_addr(&nc->prefix), nc->prefixlen, |
4466 | log_rd(nc->rd)); |
4467 | return; |
4468 | } |
4469 | } |
4470 | |
4471 | rde_apply_set(&nc->attrset, peerself, peerself, state, nc->prefix.aid); |
4472 | if (vpnset) |
4473 | rde_apply_set(vpnset, peerself, peerself, state, |
4474 | nc->prefix.aid); |
4475 | |
4476 | vstate = rde_roa_validity(&rde_roa, &nc->prefix, nc->prefixlen, |
4477 | aspath_origin(state->aspath.aspath)); |
4478 | rde_filterstate_set_vstate(state, vstate, ASPA_NEVER_KNOWN0x08); |
4479 | |
4480 | path_id_tx = pathid_assign(peerself, 0, &nc->prefix, nc->prefixlen); |
4481 | if (prefix_update(rib_byid(RIB_ADJ_IN0), peerself, 0, path_id_tx, |
4482 | state, &nc->prefix, nc->prefixlen) == 1) |
4483 | peerself->stats.prefix_cnt++; |
4484 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
4485 | struct rib *rib = rib_byid(i); |
4486 | if (rib == NULL((void *)0)) |
4487 | continue; |
4488 | rde_update_log("announce", i, peerself, |
4489 | state->nexthop ? &state->nexthop->exit_nexthop : NULL((void *)0), |
4490 | &nc->prefix, nc->prefixlen); |
4491 | prefix_update(rib, peerself, 0, path_id_tx, state, &nc->prefix, |
4492 | nc->prefixlen); |
4493 | } |
4494 | filterset_free(&nc->attrset); |
4495 | } |
4496 | |
4497 | void |
4498 | network_delete(struct network_config *nc) |
4499 | { |
4500 | struct l3vpn *vpn; |
4501 | struct in_addr prefix4; |
4502 | struct in6_addr prefix6; |
4503 | uint32_t i; |
4504 | |
4505 | if (nc->rd) { |
4506 | SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry)for((vpn) = ((&conf->l3vpns)->sqh_first); (vpn) != ( (void *)0); (vpn) = ((vpn)->entry.sqe_next)) { |
4507 | if (vpn->rd != nc->rd) |
4508 | continue; |
4509 | switch (nc->prefix.aid) { |
4510 | case AID_INET1: |
4511 | prefix4 = nc->prefix.v4ba.v4; |
4512 | memset(&nc->prefix, 0, sizeof(nc->prefix)); |
4513 | nc->prefix.aid = AID_VPN_IPv43; |
4514 | nc->prefix.rd = vpn->rd; |
4515 | nc->prefix.v4ba.v4 = prefix4; |
4516 | nc->prefix.labellen = 3; |
4517 | nc->prefix.labelstack[0] = |
4518 | (vpn->label >> 12) & 0xff; |
4519 | nc->prefix.labelstack[1] = |
4520 | (vpn->label >> 4) & 0xff; |
4521 | nc->prefix.labelstack[2] = |
4522 | (vpn->label << 4) & 0xf0; |
4523 | nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01; |
4524 | break; |
4525 | case AID_INET62: |
4526 | prefix6 = nc->prefix.v6ba.v6; |
4527 | memset(&nc->prefix, 0, sizeof(nc->prefix)); |
4528 | nc->prefix.aid = AID_VPN_IPv64; |
4529 | nc->prefix.rd = vpn->rd; |
4530 | nc->prefix.v6ba.v6 = prefix6; |
4531 | nc->prefix.labellen = 3; |
4532 | nc->prefix.labelstack[0] = |
4533 | (vpn->label >> 12) & 0xff; |
4534 | nc->prefix.labelstack[1] = |
4535 | (vpn->label >> 4) & 0xff; |
4536 | nc->prefix.labelstack[2] = |
4537 | (vpn->label << 4) & 0xf0; |
4538 | nc->prefix.labelstack[2] |= BGP_MPLS_BOS0x01; |
4539 | break; |
4540 | default: |
4541 | log_warnx("unable to VPNize prefix"); |
4542 | return; |
4543 | } |
4544 | } |
4545 | } |
4546 | |
4547 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
4548 | struct rib *rib = rib_byid(i); |
4549 | if (rib == NULL((void *)0)) |
4550 | continue; |
4551 | if (prefix_withdraw(rib, peerself, 0, &nc->prefix, |
4552 | nc->prefixlen)) |
4553 | rde_update_log("withdraw announce", i, peerself, |
4554 | NULL((void *)0), &nc->prefix, nc->prefixlen); |
4555 | } |
4556 | if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peerself, 0, &nc->prefix, |
4557 | nc->prefixlen)) |
4558 | peerself->stats.prefix_cnt--; |
4559 | } |
4560 | |
4561 | static void |
4562 | network_dump_upcall(struct rib_entry *re, void *ptr) |
4563 | { |
4564 | struct prefix *p; |
4565 | struct rde_aspath *asp; |
4566 | struct kroute_full kf; |
4567 | struct bgpd_addr addr; |
4568 | struct rde_dump_ctx *ctx = ptr; |
4569 | |
4570 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) { |
4571 | asp = prefix_aspath(p); |
4572 | if (!(asp->flags & F_PREFIX_ANNOUNCED0x00400)) |
4573 | continue; |
4574 | pt_getaddr(p->pt, &addr); |
4575 | |
4576 | memset(&kf, 0, sizeof(kf)); |
4577 | kf.prefix = addr; |
4578 | kf.prefixlen = p->pt->prefixlen; |
4579 | if (prefix_nhvalid(p) && prefix_nexthop(p) != NULL((void *)0)) |
4580 | kf.nexthop = prefix_nexthop(p)->true_nexthop; |
4581 | else |
4582 | kf.nexthop.aid = kf.prefix.aid; |
4583 | if ((asp->flags & F_ANN_DYNAMIC0x00800) == 0) |
4584 | kf.flags = F_STATIC0x0008; |
4585 | if (imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NETWORK, 0, |
4586 | ctx->req.pid, -1, &kf, sizeof(kf)) == -1) |
4587 | log_warnx("%s: imsg_compose error", __func__); |
4588 | } |
4589 | } |
4590 | |
4591 | static void |
4592 | network_flush_upcall(struct rib_entry *re, void *ptr) |
4593 | { |
4594 | struct bgpd_addr addr; |
4595 | struct prefix *p; |
4596 | uint32_t i; |
4597 | uint8_t prefixlen; |
4598 | |
4599 | p = prefix_bypeer(re, peerself, 0); |
4600 | if (p == NULL((void *)0)) |
4601 | return; |
4602 | if ((prefix_aspath(p)->flags & F_ANN_DYNAMIC0x00800) != F_ANN_DYNAMIC0x00800) |
4603 | return; |
4604 | |
4605 | pt_getaddr(re->prefix, &addr); |
4606 | prefixlen = re->prefix->prefixlen; |
4607 | |
4608 | for (i = RIB_LOC_START1; i < rib_size; i++) { |
4609 | struct rib *rib = rib_byid(i); |
4610 | if (rib == NULL((void *)0)) |
4611 | continue; |
4612 | if (prefix_withdraw(rib, peerself, 0, &addr, prefixlen) == 1) |
4613 | rde_update_log("flush announce", i, peerself, |
4614 | NULL((void *)0), &addr, prefixlen); |
4615 | } |
4616 | |
4617 | if (prefix_withdraw(rib_byid(RIB_ADJ_IN0), peerself, 0, &addr, |
4618 | prefixlen) == 1) |
4619 | peerself->stats.prefix_cnt--; |
4620 | } |
4621 | |
4622 | /* |
4623 | * flowspec announcement stuff |
4624 | */ |
4625 | void |
4626 | flowspec_add(struct flowspec *f, struct filterstate *state, |
4627 | struct filter_set_head *attrset) |
4628 | { |
4629 | struct pt_entry *pte; |
4630 | uint32_t path_id_tx; |
4631 | |
4632 | rde_apply_set(attrset, peerself, peerself, state, f->aid); |
4633 | rde_filterstate_set_vstate(state, ROA_NOTFOUND0x0, ASPA_NEVER_KNOWN0x08); |
4634 | path_id_tx = peerself->path_id_tx; /* XXX should use pathid_assign() */ |
4635 | |
4636 | pte = pt_get_flow(f); |
4637 | if (pte == NULL((void *)0)) |
4638 | pte = pt_add_flow(f); |
4639 | |
4640 | if (prefix_flowspec_update(peerself, state, pte, path_id_tx) == 1) |
4641 | peerself->stats.prefix_cnt++; |
4642 | } |
4643 | |
4644 | void |
4645 | flowspec_delete(struct flowspec *f) |
4646 | { |
4647 | struct pt_entry *pte; |
4648 | |
4649 | pte = pt_get_flow(f); |
4650 | if (pte == NULL((void *)0)) |
4651 | return; |
4652 | |
4653 | if (prefix_flowspec_withdraw(peerself, pte) == 1) |
4654 | peerself->stats.prefix_cnt--; |
4655 | } |
4656 | |
4657 | static void |
4658 | flowspec_flush_upcall(struct rib_entry *re, void *ptr) |
4659 | { |
4660 | struct prefix *p; |
4661 | |
4662 | p = prefix_bypeer(re, peerself, 0); |
4663 | if (p == NULL((void *)0)) |
4664 | return; |
4665 | if ((prefix_aspath(p)->flags & F_ANN_DYNAMIC0x00800) != F_ANN_DYNAMIC0x00800) |
4666 | return; |
4667 | if (prefix_flowspec_withdraw(peerself, re->prefix) == 1) |
4668 | peerself->stats.prefix_cnt--; |
4669 | } |
4670 | |
4671 | static void |
4672 | flowspec_dump_upcall(struct rib_entry *re, void *ptr) |
4673 | { |
4674 | pid_t *pid = ptr; |
4675 | struct prefix *p; |
4676 | struct rde_aspath *asp; |
4677 | struct rde_community *comm; |
4678 | struct flowspec ff; |
4679 | struct ibuf *ibuf; |
4680 | uint8_t *flow; |
4681 | int len; |
4682 | |
4683 | TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)for((p) = ((&re->prefix_h)->tqh_first); (p) != ((void *)0); (p) = ((p)->entry.list.rib.tqe_next)) { |
4684 | asp = prefix_aspath(p); |
4685 | if (!(asp->flags & F_PREFIX_ANNOUNCED0x00400)) |
4686 | continue; |
4687 | comm = prefix_communities(p); |
4688 | |
4689 | len = pt_getflowspec(p->pt, &flow); |
4690 | |
4691 | memset(&ff, 0, sizeof(ff)); |
4692 | ff.aid = p->pt->aid; |
4693 | ff.len = len; |
4694 | if ((asp->flags & F_ANN_DYNAMIC0x00800) == 0) |
4695 | ff.flags = F_STATIC0x0008; |
4696 | if ((ibuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_FLOWSPEC, 0, |
4697 | *pid, FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data)) + len)) == NULL((void *)0)) |
4698 | continue; |
4699 | if (imsg_add(ibuf, &ff, FLOWSPEC_SIZE(__builtin_offsetof(struct flowspec, data))) == -1 || |
4700 | imsg_add(ibuf, flow, len) == -1) |
4701 | continue; |
4702 | imsg_close(ibuf_se_ctl, ibuf); |
4703 | if (comm->nentries > 0) { |
4704 | if (imsg_compose(ibuf_se_ctl, |
4705 | IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, *pid, -1, |
4706 | comm->communities, |
4707 | comm->nentries * sizeof(struct community)) == -1) |
4708 | continue; |
4709 | } |
4710 | } |
4711 | } |
4712 | |
4713 | static void |
4714 | flowspec_dump_done(void *ptr, uint8_t aid) |
4715 | { |
4716 | pid_t *pid = ptr; |
4717 | |
4718 | imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, *pid, -1, NULL((void *)0), 0); |
4719 | } |
4720 | |
4721 | |
4722 | /* clean up */ |
4723 | void |
4724 | rde_shutdown(void) |
4725 | { |
4726 | /* |
4727 | * the decision process is turned off if rde_quit = 1 and |
4728 | * rde_shutdown depends on this. |
4729 | */ |
4730 | |
4731 | /* First all peers go down */ |
4732 | peer_foreach(peer_down, NULL((void *)0)); |
4733 | |
4734 | /* free filters */ |
4735 | filterlist_free(out_rules); |
4736 | filterlist_free(out_rules_tmp); |
4737 | |
4738 | /* kill the VPN configs */ |
4739 | free_l3vpns(&conf->l3vpns); |
4740 | |
4741 | /* now check everything */ |
4742 | rib_shutdown(); |
4743 | nexthop_shutdown(); |
4744 | path_shutdown(); |
4745 | attr_shutdown(); |
4746 | pt_shutdown(); |
4747 | peer_shutdown(); |
4748 | } |
4749 | |
4750 | struct rde_prefixset * |
4751 | rde_find_prefixset(char *name, struct rde_prefixset_head *p) |
4752 | { |
4753 | struct rde_prefixset *ps; |
4754 | |
4755 | SIMPLEQ_FOREACH(ps, p, entry)for((ps) = ((p)->sqh_first); (ps) != ((void *)0); (ps) = ( (ps)->entry.sqe_next)) { |
4756 | if (!strcmp(ps->name, name)) |
4757 | return (ps); |
4758 | } |
4759 | return (NULL((void *)0)); |
4760 | } |
4761 | |
4762 | void |
4763 | rde_mark_prefixsets_dirty(struct rde_prefixset_head *psold, |
4764 | struct rde_prefixset_head *psnew) |
4765 | { |
4766 | struct rde_prefixset *new, *old; |
4767 | |
4768 | SIMPLEQ_FOREACH(new, psnew, entry)for((new) = ((psnew)->sqh_first); (new) != ((void *)0); (new ) = ((new)->entry.sqe_next)) { |
4769 | if ((psold == NULL((void *)0)) || |
4770 | (old = rde_find_prefixset(new->name, psold)) == NULL((void *)0)) { |
4771 | new->dirty = 1; |
4772 | new->lastchange = getmonotime(); |
4773 | } else { |
4774 | if (trie_equal(&new->th, &old->th) == 0) { |
4775 | new->dirty = 1; |
4776 | new->lastchange = getmonotime(); |
4777 | } else |
4778 | new->lastchange = old->lastchange; |
4779 | } |
4780 | } |
4781 | } |
4782 | |
4783 | uint8_t |
4784 | rde_roa_validity(struct rde_prefixset *ps, struct bgpd_addr *prefix, |
4785 | uint8_t plen, uint32_t as) |
4786 | { |
4787 | int r; |
4788 | |
4789 | r = trie_roa_check(&ps->th, prefix, plen, as); |
4790 | return (r & ROA_MASK0x3); |
4791 | } |
4792 | |
4793 | static int |
4794 | ovs_match(struct prefix *p, uint32_t flag) |
4795 | { |
4796 | if (flag & (F_CTL_OVS_VALID0x80000|F_CTL_OVS_INVALID0x100000|F_CTL_OVS_NOTFOUND0x200000)) { |
4797 | switch (prefix_roa_vstate(p)) { |
4798 | case ROA_VALID0x2: |
4799 | if (!(flag & F_CTL_OVS_VALID0x80000)) |
4800 | return 0; |
4801 | break; |
4802 | case ROA_INVALID0x1: |
4803 | if (!(flag & F_CTL_OVS_INVALID0x100000)) |
4804 | return 0; |
4805 | break; |
4806 | case ROA_NOTFOUND0x0: |
4807 | if (!(flag & F_CTL_OVS_NOTFOUND0x200000)) |
4808 | return 0; |
4809 | break; |
4810 | default: |
4811 | break; |
4812 | } |
4813 | } |
4814 | |
4815 | return 1; |
4816 | } |
4817 | |
4818 | static int |
4819 | avs_match(struct prefix *p, uint32_t flag) |
4820 | { |
4821 | if (flag & (F_CTL_AVS_VALID0x1000000|F_CTL_AVS_INVALID0x2000000|F_CTL_AVS_UNKNOWN0x4000000)) { |
4822 | switch (prefix_aspa_vstate(p) & ASPA_MASK0x03) { |
4823 | case ASPA_VALID0x02: |
4824 | if (!(flag & F_CTL_AVS_VALID0x1000000)) |
4825 | return 0; |
4826 | break; |
4827 | case ASPA_INVALID0x01: |
4828 | if (!(flag & F_CTL_AVS_INVALID0x2000000)) |
4829 | return 0; |
4830 | break; |
4831 | case ASPA_UNKNOWN0x00: |
4832 | if (!(flag & F_CTL_AVS_UNKNOWN0x4000000)) |
4833 | return 0; |
4834 | break; |
4835 | default: |
4836 | break; |
4837 | } |
4838 | } |
4839 | |
4840 | return 1; |
4841 | } |