Bug Summary

File:src/usr.sbin/ospf6d/kroute.c
Warning:line 1098, column 8
Although the value stored to 'iface' is used in the enclosing expression, the value is never actually read from 'iface'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name kroute.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/ospf6d/obj -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/usr.sbin/ospf6d -internal-isystem /usr/local/llvm16/lib/clang/16/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/ospf6d/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fno-jump-tables -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/scan/2024-01-11-140451-98009-1 -x c /usr/src/usr.sbin/ospf6d/kroute.c
1/* $OpenBSD: kroute.c,v 1.68 2023/06/21 09:47:03 sthen Exp $ */
2
3/*
4 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
5 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/types.h>
21#include <sys/socket.h>
22#include <sys/sysctl.h>
23#include <sys/tree.h>
24#include <sys/uio.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <net/if.h>
28#include <net/if_dl.h>
29#include <net/if_types.h>
30#include <net/route.h>
31#include <err.h>
32#include <errno(*__errno()).h>
33#include <fcntl.h>
34#include <stdio.h>
35#include <stdlib.h>
36#include <string.h>
37#include <unistd.h>
38#include <limits.h>
39
40#include "ospf6d.h"
41#include "ospfe.h"
42#include "log.h"
43
44struct {
45 u_int32_t rtseq;
46 pid_t pid;
47 int fib_sync;
48 int fib_serial;
49 u_int8_t fib_prio;
50 int fd;
51 struct event ev;
52 struct event reload;
53 u_int rdomain;
54#define KR_RELOAD_IDLE0 0
55#define KR_RELOAD_FETCH1 1
56#define KR_RELOAD_HOLD2 2
57 int reload_state;
58} kr_state;
59
60struct kroute_node {
61 RB_ENTRY(kroute_node)struct { struct kroute_node *rbe_left; struct kroute_node *rbe_right
; struct kroute_node *rbe_parent; int rbe_color; }
entry;
62 struct kroute_node *next;
63 struct kroute r;
64 int serial;
65};
66
67void kr_redist_remove(struct kroute_node *, struct kroute_node *);
68int kr_redist_eval(struct kroute *, struct kroute *);
69void kr_redistribute(struct kroute_node *);
70int kroute_compare(struct kroute_node *, struct kroute_node *);
71int kr_change_fib(struct kroute_node *, struct kroute *, int, int);
72int kr_delete_fib(struct kroute_node *);
73
74struct kroute_node *kroute_find(const struct in6_addr *, u_int8_t,
75 u_int8_t);
76struct kroute_node *kroute_matchgw(struct kroute_node *,
77 struct in6_addr *, unsigned int);
78int kroute_insert(struct kroute_node *);
79int kroute_remove(struct kroute_node *);
80void kroute_clear(void);
81
82struct iface *kif_update(u_short, int, struct if_data *,
83 struct sockaddr_dl *);
84int kif_validate(u_short);
85
86struct kroute_node *kroute_match(struct in6_addr *);
87
88int protect_lo(void);
89void get_rtaddrs(int, struct sockaddr *, struct sockaddr **);
90void if_change(u_short, int, struct if_data *, struct sockaddr_dl *);
91void if_newaddr(u_short, struct sockaddr_in6 *,
92 struct sockaddr_in6 *, struct sockaddr_in6 *);
93void if_deladdr(u_short, struct sockaddr_in6 *,
94 struct sockaddr_in6 *, struct sockaddr_in6 *);
95void if_announce(void *);
96
97int send_rtmsg(int, int, struct kroute *);
98int dispatch_rtmsg(void);
99int fetchtable(void);
100int refetchtable(void);
101int rtmsg_process(char *, size_t);
102void kr_fib_reload_timer(int, short, void *);
103void kr_fib_reload_arm_timer(int);
104
105RB_HEAD(kroute_tree, kroute_node)struct kroute_tree { struct kroute_node *rbh_root; } krt;
106RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)void kroute_tree_RB_INSERT_COLOR(struct kroute_tree *, struct
kroute_node *); void kroute_tree_RB_REMOVE_COLOR(struct kroute_tree
*, struct kroute_node *, struct kroute_node *); struct kroute_node
*kroute_tree_RB_REMOVE(struct kroute_tree *, struct kroute_node
*); struct kroute_node *kroute_tree_RB_INSERT(struct kroute_tree
*, struct kroute_node *); struct kroute_node *kroute_tree_RB_FIND
(struct kroute_tree *, struct kroute_node *); struct kroute_node
*kroute_tree_RB_NFIND(struct kroute_tree *, struct kroute_node
*); struct kroute_node *kroute_tree_RB_NEXT(struct kroute_node
*); struct kroute_node *kroute_tree_RB_PREV(struct kroute_node
*); struct kroute_node *kroute_tree_RB_MINMAX(struct kroute_tree
*, int);
107RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)void kroute_tree_RB_INSERT_COLOR(struct kroute_tree *head, struct
kroute_node *elm) { struct kroute_node *parent, *gparent, *tmp
; while ((parent = (elm)->entry.rbe_parent) && (parent
)->entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent
; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent
)->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } void kroute_tree_RB_REMOVE_COLOR(struct
kroute_tree *head, struct kroute_node *parent, struct kroute_node
*elm) { struct kroute_node *tmp; while ((elm == ((void *)0) ||
(elm)->entry.rbe_color == 0) && elm != (head)->
rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = (
parent)->entry.rbe_right; if ((tmp)->entry.rbe_color ==
1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.
rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right
; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->
entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)
->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm
= parent; parent = (elm)->entry.rbe_parent; } else { if (
(tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.
rbe_right)->entry.rbe_color == 0) { struct kroute_node *oleft
; if ((oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color
= 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)->
entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry
.rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent
= (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent
= (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry
.rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent)
->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent
)->entry.rbe_right = (oleft); } else (head)->rbh_root =
(oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry
.rbe_parent = (oleft); do {} while (0); if (((oleft)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry
.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry
.rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color =
0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)->
entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry
.rbe_left)->entry.rbe_parent = (parent); } do {} while (0)
; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); elm = (head)->rbh_root; break; } } else {
tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color
== 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry
.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.
rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.
rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent
= (parent); } do {} while (0); if (((tmp)->entry.rbe_parent
= (parent)->entry.rbe_parent)) { if ((parent) == ((parent
)->entry.rbe_parent)->entry.rbe_left) ((parent)->entry
.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry
.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->entry.rbe_right = (parent); (parent)->
entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void *)0
) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) &&
((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry
.rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color
= 1; elm = parent; parent = (elm)->entry.rbe_parent; } else
{ if ((tmp)->entry.rbe_left == ((void *)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) { struct kroute_node
*oright; if ((oright = (tmp)->entry.rbe_right)) (oright)->
entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright
) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right
= (oright)->entry.rbe_left)) { ((oright)->entry.rbe_left
)->entry.rbe_parent = (tmp); } do {} while (0); if (((oright
)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((
tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp
)->entry.rbe_parent)->entry.rbe_left = (oright); else (
(tmp)->entry.rbe_parent)->entry.rbe_right = (oright); }
else (head)->rbh_root = (oright); (oright)->entry.rbe_left
= (tmp); (tmp)->entry.rbe_parent = (oright); do {} while (
0); if (((oright)->entry.rbe_parent)) do {} while (0); } while
(0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left
; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } } if (elm) (elm)->entry.rbe_color = 0; } struct kroute_node
* kroute_tree_RB_REMOVE(struct kroute_tree *head, struct kroute_node
*elm) { struct kroute_node *child, *parent, *old = elm; int color
; if ((elm)->entry.rbe_left == ((void *)0)) child = (elm)->
entry.rbe_right; else if ((elm)->entry.rbe_right == ((void
*)0)) child = (elm)->entry.rbe_left; else { struct kroute_node
*left; elm = (elm)->entry.rbe_right; while ((left = (elm)
->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right
; parent = (elm)->entry.rbe_parent; color = (elm)->entry
.rbe_color; if (child) (child)->entry.rbe_parent = parent;
if (parent) { if ((parent)->entry.rbe_left == elm) (parent
)->entry.rbe_left = child; else (parent)->entry.rbe_right
= child; do {} while (0); } else (head)->rbh_root = child
; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->
entry = (old)->entry; if ((old)->entry.rbe_parent) { if
(((old)->entry.rbe_parent)->entry.rbe_left == old) ((old
)->entry.rbe_parent)->entry.rbe_left = elm; else ((old)
->entry.rbe_parent)->entry.rbe_right = elm; do {} while
(0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left
)->entry.rbe_parent = elm; if ((old)->entry.rbe_right) (
(old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent
) { left = parent; do { do {} while (0); } while ((left = (left
)->entry.rbe_parent)); } goto color; } parent = (elm)->
entry.rbe_parent; color = (elm)->entry.rbe_color; if (child
) (child)->entry.rbe_parent = parent; if (parent) { if ((parent
)->entry.rbe_left == elm) (parent)->entry.rbe_left = child
; else (parent)->entry.rbe_right = child; do {} while (0);
} else (head)->rbh_root = child; color: if (color == 0) kroute_tree_RB_REMOVE_COLOR
(head, parent, child); return (old); } struct kroute_node * kroute_tree_RB_INSERT
(struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node
*tmp; struct kroute_node *parent = ((void *)0); int comp = 0
; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp
= (kroute_compare)(elm, parent); if (comp < 0) tmp = (tmp
)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry
.rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent
= parent; (elm)->entry.rbe_left = (elm)->entry.rbe_right
= ((void *)0); (elm)->entry.rbe_color = 1; } while (0); if
(parent != ((void *)0)) { if (comp < 0) (parent)->entry
.rbe_left = elm; else (parent)->entry.rbe_right = elm; do {
} while (0); } else (head)->rbh_root = elm; kroute_tree_RB_INSERT_COLOR
(head, elm); return (((void *)0)); } struct kroute_node * kroute_tree_RB_FIND
(struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node
*tmp = (head)->rbh_root; int comp; while (tmp) { comp = kroute_compare
(elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else
if (comp > 0) tmp = (tmp)->entry.rbe_right; else return
(tmp); } return (((void *)0)); } struct kroute_node * kroute_tree_RB_NFIND
(struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node
*tmp = (head)->rbh_root; struct kroute_node *res = ((void
*)0); int comp; while (tmp) { comp = kroute_compare(elm, tmp
); if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left
; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else
return (tmp); } return (res); } struct kroute_node * kroute_tree_RB_NEXT
(struct kroute_node *elm) { if ((elm)->entry.rbe_right) { elm
= (elm)->entry.rbe_right; while ((elm)->entry.rbe_left
) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry
.rbe_parent && (elm == ((elm)->entry.rbe_parent)->
entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while
((elm)->entry.rbe_parent && (elm == ((elm)->entry
.rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent
; elm = (elm)->entry.rbe_parent; } } return (elm); } struct
kroute_node * kroute_tree_RB_PREV(struct kroute_node *elm) {
if ((elm)->entry.rbe_left) { elm = (elm)->entry.rbe_left
; while ((elm)->entry.rbe_right) elm = (elm)->entry.rbe_right
; } else { if ((elm)->entry.rbe_parent && (elm == (
(elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm)
->entry.rbe_parent; else { while ((elm)->entry.rbe_parent
&& (elm == ((elm)->entry.rbe_parent)->entry.rbe_left
)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent
; } } return (elm); } struct kroute_node * kroute_tree_RB_MINMAX
(struct kroute_tree *head, int val) { struct kroute_node *tmp
= (head)->rbh_root; struct kroute_node *parent = ((void *
)0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->
entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return
(parent); }
108
109int
110kr_init(int fs, u_int rdomain, int redis_label_or_prefix, u_int8_t fib_prio)
111{
112 int opt = 0, rcvbuf, default_rcvbuf;
113 socklen_t optlen;
114 int filter_prio = fib_prio;
115 int filter_flags = RTF_LLINFO0x400 | RTF_BROADCAST0x400000;
116
117 kr_state.fib_sync = fs;
118 kr_state.rdomain = rdomain;
119 kr_state.fib_prio = fib_prio;
120
121 if ((kr_state.fd = socket(AF_ROUTE17,
122 SOCK_RAW3 | SOCK_CLOEXEC0x8000 | SOCK_NONBLOCK0x4000, AF_INET624)) == -1) {
123 log_warn("kr_init: socket");
124 return (-1);
125 }
126
127 /* not interested in my own messages */
128 if (setsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_USELOOPBACK0x0040,
129 &opt, sizeof(opt)) == -1)
130 log_warn("kr_init: setsockopt"); /* not fatal */
131
132 if (redis_label_or_prefix) {
133 filter_prio = 0;
134 log_info("%s: priority filter disabled", __func__);
135 } else
136 log_debug("%s: priority filter enabled", __func__);
137
138 if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_PRIOFILTER3, &filter_prio,
139 sizeof(filter_prio)) == -1) {
140 log_warn("%s: setsockopt AF_ROUTE ROUTE_PRIOFILTER", __func__);
141 /* not fatal */
142 }
143
144 if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_FLAGFILTER4, &filter_flags,
145 sizeof(filter_flags)) == -1) {
146 log_warn("%s: setsockopt AF_ROUTE ROUTE_FLAGFILTER", __func__);
147 /* not fatal */
148 }
149
150 /* grow receive buffer, don't wanna miss messages */
151 optlen = sizeof(default_rcvbuf);
152 if (getsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_RCVBUF0x1002,
153 &default_rcvbuf, &optlen) == -1)
154 log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF");
155 else
156 for (rcvbuf = MAX_RTSOCK_BUF(2 * 1024 * 1024);
157 rcvbuf > default_rcvbuf &&
158 setsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_RCVBUF0x1002,
159 &rcvbuf, sizeof(rcvbuf)) == -1 && errno(*__errno()) == ENOBUFS55;
160 rcvbuf /= 2)
161 ; /* nothing */
162
163 kr_state.pid = getpid();
164 kr_state.rtseq = 1;
165
166 RB_INIT(&krt)do { (&krt)->rbh_root = ((void *)0); } while (0);
167
168 if (fetchtable() == -1)
169 return (-1);
170
171 if (protect_lo() == -1)
172 return (-1);
173
174 event_set(&kr_state.ev, kr_state.fd, EV_READ0x02 | EV_PERSIST0x10,
175 kr_dispatch_msg, NULL((void *)0));
176 event_add(&kr_state.ev, NULL((void *)0));
177
178 kr_state.reload_state = KR_RELOAD_IDLE0;
179 evtimer_set(&kr_state.reload, kr_fib_reload_timer, NULL)event_set(&kr_state.reload, -1, 0, kr_fib_reload_timer, (
(void *)0))
;
180
181 return (0);
182}
183
184int
185kr_change_fib(struct kroute_node *kr, struct kroute *kroute, int krcount,
186 int action)
187{
188 int i;
189 struct kroute_node *kn, *nkn;
190
191 if (action == RTM_ADD0x1) {
192 /*
193 * First remove all stale multipath routes.
194 * This step must be skipped when the action is RTM_CHANGE
195 * because it is already a single path route that will be
196 * changed.
197 */
198 for (kn = kr; kn != NULL((void *)0); kn = nkn) {
199 for (i = 0; i < krcount; i++) {
200 if (kn->r.scope == kroute[i].scope &&
201 IN6_ARE_ADDR_EQUAL(&kn->r.nexthop,(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8
[0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0
], sizeof(struct in6_addr)) == 0)
202 &kroute[i].nexthop)(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8
[0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0
], sizeof(struct in6_addr)) == 0)
)
203 break;
204 }
205 nkn = kn->next;
206 if (i == krcount) {
207 /* stale route */
208 if (kr_delete_fib(kn) == -1)
209 log_warnx("kr_delete_fib failed");
210 /*
211 * if head element was removed we need to adjust
212 * the head
213 */
214 if (kr == kn)
215 kr = nkn;
216 }
217 }
218 }
219
220 /*
221 * now add or change the route
222 */
223 for (i = 0; i < krcount; i++) {
224 /* nexthop ::1 -> ignore silently */
225 if (IN6_IS_ADDR_LOOPBACK(&kroute[i].nexthop)((*(const u_int32_t *)(const void *)(&(&kroute[i].nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kroute[i].nexthop)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kroute[i].nexthop)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&kroute[i].nexthop)->__u6_addr.__u6_addr8[12]) == (__uint32_t
)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) &
0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8
| ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t
)(1) & 0xff000000) >> 24) : __swap32md(1))))
)
226 continue;
227
228 if (action == RTM_ADD0x1 && kr) {
229 for (kn = kr; kn != NULL((void *)0); kn = kn->next) {
230 if (kn->r.scope == kroute[i].scope &&
231 IN6_ARE_ADDR_EQUAL(&kn->r.nexthop,(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8
[0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0
], sizeof(struct in6_addr)) == 0)
232 &kroute[i].nexthop)(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8
[0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0
], sizeof(struct in6_addr)) == 0)
)
233 break;
234 }
235
236 if (kn != NULL((void *)0))
237 /* nexthop already present, skip it */
238 continue;
239 } else
240 /* modify first entry */
241 kn = kr;
242
243 /* send update */
244 if (send_rtmsg(kr_state.fd, action, &kroute[i]) == -1)
245 return (-1);
246
247 /* create new entry unless we are changing the first entry */
248 if (action == RTM_ADD0x1)
249 if ((kn = calloc(1, sizeof(*kn))) == NULL((void *)0))
250 fatal(NULL((void *)0));
251
252 kn->r.prefix = kroute[i].prefix;
253 kn->r.prefixlen = kroute[i].prefixlen;
254 kn->r.nexthop = kroute[i].nexthop;
255 kn->r.scope = kroute[i].scope;
256 kn->r.flags = kroute[i].flags | F_OSPFD_INSERTED0x0001;
257 kn->r.priority = kr_state.fib_prio;
258 kn->r.ext_tag = kroute[i].ext_tag;
259 rtlabel_unref(kn->r.rtlabel); /* for RTM_CHANGE */
260 kn->r.rtlabel = kroute[i].rtlabel;
261
262 if (action == RTM_ADD0x1)
263 if (kroute_insert(kn) == -1) {
264 log_debug("kr_update_fib: cannot insert %s",
265 log_in6addr(&kn->r.nexthop));
266 free(kn);
267 }
268 action = RTM_ADD0x1;
269 }
270 return (0);
271}
272
273int
274kr_change(struct kroute *kroute, int krcount)
275{
276 struct kroute_node *kr;
277 int action = RTM_ADD0x1;
278
279 kroute->rtlabel = rtlabel_tag2id(kroute->ext_tag);
280
281 kr = kroute_find(&kroute->prefix, kroute->prefixlen, kr_state.fib_prio);
282 if (kr != NULL((void *)0) && kr->next == NULL((void *)0) && krcount == 1) {
283 /*
284 * single path OSPF route.
285 * The kernel does not allow to change a gateway route to a
286 * cloning route or contrary. In this case remove and add the
287 * route, otherwise change the existing one.
288 */
289 if ((IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kroute->nexthop)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0)
)
&&
290 !IN6_IS_ADDR_UNSPECIFIED(&kr->r.nexthop)((*(const u_int32_t *)(const void *)(&(&kr->r.nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->r.nexthop)->__u6_addr.
__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kr->r.nexthop)->__u6_addr.__u6_addr8[8])
== 0) && (*(const u_int32_t *)(const void *)(&(&
kr->r.nexthop)->__u6_addr.__u6_addr8[12]) == 0))
) ||
291 (!IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kroute->nexthop)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0)
)
&&
292 IN6_IS_ADDR_UNSPECIFIED(&kr->r.nexthop)((*(const u_int32_t *)(const void *)(&(&kr->r.nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->r.nexthop)->__u6_addr.
__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kr->r.nexthop)->__u6_addr.__u6_addr8[8])
== 0) && (*(const u_int32_t *)(const void *)(&(&
kr->r.nexthop)->__u6_addr.__u6_addr8[12]) == 0))
)) {
293 if (kr_delete_fib(kr) == 0)
294 kr = NULL((void *)0);
295 else {
296 log_warn("kr_change: failed to remove route: "
297 "%s/%d", log_in6addr(&kr->r.prefix),
298 kr->r.prefixlen);
299 return (-1);
300 }
301 } else
302 action = RTM_CHANGE0x3;
303 }
304
305 return (kr_change_fib(kr, kroute, krcount, action));
306}
307
308int
309kr_delete_fib(struct kroute_node *kr)
310{
311 if (kr->r.priority != kr_state.fib_prio)
312 log_warn("kr_delete_fib: %s/%d has wrong priority %d",
313 log_in6addr(&kr->r.prefix), kr->r.prefixlen,
314 kr->r.priority);
315
316 if (send_rtmsg(kr_state.fd, RTM_DELETE0x2, &kr->r) == -1)
317 return (-1);
318
319 if (kroute_remove(kr) == -1)
320 return (-1);
321
322 return (0);
323}
324
325int
326kr_delete(struct kroute *kroute)
327{
328 struct kroute_node *kr, *nkr;
329
330 if ((kr = kroute_find(&kroute->prefix, kroute->prefixlen,
331 kr_state.fib_prio)) == NULL((void *)0))
332 return (0);
333
334 while (kr != NULL((void *)0)) {
335 nkr = kr->next;
336 if (kr_delete_fib(kr) == -1)
337 return (-1);
338 kr = nkr;
339 }
340
341 return (0);
342}
343
344void
345kr_shutdown(void)
346{
347 kr_fib_decouple();
348 kroute_clear();
349}
350
351void
352kr_fib_couple(void)
353{
354 struct kroute_node *kr;
355 struct kroute_node *kn;
356
357 if (kr_state.fib_sync == 1) /* already coupled */
358 return;
359
360 kr_state.fib_sync = 1;
361
362 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
363 if (kr->r.priority == kr_state.fib_prio)
364 for (kn = kr; kn != NULL((void *)0); kn = kn->next)
365 send_rtmsg(kr_state.fd, RTM_ADD0x1, &kn->r);
366
367 log_info("kernel routing table coupled");
368}
369
370void
371kr_fib_decouple(void)
372{
373 struct kroute_node *kr;
374 struct kroute_node *kn;
375
376 if (kr_state.fib_sync == 0) /* already decoupled */
377 return;
378
379 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
380 if (kr->r.priority == kr_state.fib_prio)
381 for (kn = kr; kn != NULL((void *)0); kn = kn->next)
382 send_rtmsg(kr_state.fd, RTM_DELETE0x2, &kn->r);
383
384 kr_state.fib_sync = 0;
385
386 log_info("kernel routing table decoupled");
387}
388
389void
390kr_fib_reload_timer(int fd, short event, void *bula)
391{
392 if (kr_state.reload_state == KR_RELOAD_FETCH1) {
393 kr_fib_reload();
394 kr_state.reload_state = KR_RELOAD_HOLD2;
395 kr_fib_reload_arm_timer(KR_RELOAD_HOLD_TIMER5000);
396 } else {
397 kr_state.reload_state = KR_RELOAD_IDLE0;
398 }
399}
400
401void
402kr_fib_reload_arm_timer(int delay)
403{
404 struct timeval tv;
405
406 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
407 tv.tv_sec = delay / 1000;
408 tv.tv_usec = (delay % 1000) * 1000;
409
410 if (evtimer_add(&kr_state.reload, &tv)event_add(&kr_state.reload, &tv) == -1)
411 fatal("add_reload_timer");
412}
413
414void
415kr_fib_reload(void)
416{
417 struct kroute_node *krn, *kr, *kn;
418
419 log_info("reloading interface list and routing table");
420
421 kr_state.fib_serial++;
422
423 if (fetchifs(0) != 0 || fetchtable() != 0)
424 return;
425
426 for (kr = RB_MIN(kroute_tree, &krt)kroute_tree_RB_MINMAX(&krt, -1); kr != NULL((void *)0); kr = krn) {
427 krn = RB_NEXT(kroute_tree, &krt, kr)kroute_tree_RB_NEXT(kr);
428
429 do {
430 kn = kr->next;
431
432 if (kr->serial != kr_state.fib_serial) {
433
434 if (kr->r.priority == kr_state.fib_prio) {
435 kr->serial = kr_state.fib_serial;
436 if (send_rtmsg(kr_state.fd,
437 RTM_ADD0x1, &kr->r) != 0)
438 break;
439 } else
440 kroute_remove(kr);
441 }
442
443 } while ((kr = kn) != NULL((void *)0));
444 }
445}
446
447void
448kr_fib_update_prio(u_int8_t fib_prio)
449{
450 struct kroute_node *kr;
451
452 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
453 if ((kr->r.flags & F_OSPFD_INSERTED0x0001))
454 kr->r.priority = fib_prio;
455
456 log_info("fib priority changed from %hhu to %hhu", kr_state.fib_prio,
457 fib_prio);
458
459 kr_state.fib_prio = fib_prio;
460}
461
462void
463kr_dispatch_msg(int fd, short event, void *bula)
464{
465 /* XXX this is stupid */
466 dispatch_rtmsg();
467}
468
469void
470kr_show_route(struct imsg *imsg)
471{
472 struct kroute_node *kr;
473 struct kroute_node *kn;
474 int flags;
475 struct in6_addr addr;
476
477 switch (imsg->hdr.type) {
478 case IMSG_CTL_KROUTE:
479 if (imsg->hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(flags)) {
480 log_warnx("kr_show_route: wrong imsg len");
481 return;
482 }
483 memcpy(&flags, imsg->data, sizeof(flags));
484 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
485 if (!flags || kr->r.flags & flags) {
486 kn = kr;
487 do {
488 main_imsg_compose_ospfe(IMSG_CTL_KROUTE,
489 imsg->hdr.pid,
490 &kn->r, sizeof(kn->r));
491 } while ((kn = kn->next) != NULL((void *)0));
492 }
493 break;
494 case IMSG_CTL_KROUTE_ADDR:
495 if (imsg->hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) +
496 sizeof(struct in6_addr)) {
497 log_warnx("kr_show_route: wrong imsg len");
498 return;
499 }
500 memcpy(&addr, imsg->data, sizeof(addr));
501 kr = kroute_match(&addr);
502 if (kr != NULL((void *)0))
503 main_imsg_compose_ospfe(IMSG_CTL_KROUTE, imsg->hdr.pid,
504 &kr->r, sizeof(kr->r));
505 break;
506 default:
507 log_debug("kr_show_route: error handling imsg");
508 break;
509 }
510
511 main_imsg_compose_ospfe(IMSG_CTL_END, imsg->hdr.pid, NULL((void *)0), 0);
512}
513
514void
515kr_redist_remove(struct kroute_node *kh, struct kroute_node *kn)
516{
517 struct kroute *kr;
518
519 /* was the route redistributed? */
520 if ((kn->r.flags & F_REDISTRIBUTED0x0200) == 0)
521 return;
522
523 /* remove redistributed flag */
524 kn->r.flags &= ~F_REDISTRIBUTED0x0200;
525 kr = &kn->r;
526
527 /* probably inform the RDE (check if no other path is redistributed) */
528 for (kn = kh; kn; kn = kn->next)
529 if (kn->r.flags & F_REDISTRIBUTED0x0200)
530 break;
531
532 if (kn == NULL((void *)0))
533 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, kr,
534 sizeof(struct kroute));
535}
536
537int
538kr_redist_eval(struct kroute *kr, struct kroute *new_kr)
539{
540 u_int32_t metric = 0;
541
542 /* Only non-ospfd routes are considered for redistribution. */
543 if (!(kr->flags & F_KERNEL0x0002))
544 goto dont_redistribute;
545
546 /* Dynamic routes are not redistributable. */
547 if (kr->flags & F_DYNAMIC0x0040)
548 goto dont_redistribute;
549
550 /* interface is not up and running so don't announce */
551 if (kr->flags & F_DOWN0x0010)
552 goto dont_redistribute;
553
554 /*
555 * We consider loopback, multicast, link- and site-local,
556 * IPv4 mapped and IPv4 compatible addresses as not redistributable.
557 */
558 if (IN6_IS_ADDR_LOOPBACK(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8
[4]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kr->prefix)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&kr->prefix
)->__u6_addr.__u6_addr8[12]) == (__uint32_t)(__builtin_constant_p
(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 |
((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1)
& 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000
) >> 24) : __swap32md(1))))
||
559 IN6_IS_ADDR_MULTICAST(&kr->prefix)((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xff) ||
560 IN6_IS_ADDR_LINKLOCAL(&kr->prefix)(((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&kr->prefix)->__u6_addr.__u6_addr8[1] & 0xc0
) == 0x80))
||
561 IN6_IS_ADDR_SITELOCAL(&kr->prefix)(((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xfe) &&
(((&kr->prefix)->__u6_addr.__u6_addr8[1] & 0xc0
) == 0xc0))
||
562 IN6_IS_ADDR_V4MAPPED(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8
[4]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kr->prefix)->__u6_addr.__u6_addr8[8]) == (__uint32_t
)(__builtin_constant_p(0x0000ffff) ? (__uint32_t)(((__uint32_t
)(0x0000ffff) & 0xff) << 24 | ((__uint32_t)(0x0000ffff
) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff) & 0xff0000
) >> 8 | ((__uint32_t)(0x0000ffff) & 0xff000000) >>
24) : __swap32md(0x0000ffff))))
||
563 IN6_IS_ADDR_V4COMPAT(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8
[4]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kr->prefix)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&kr->prefix
)->__u6_addr.__u6_addr8[12]) != 0) && (*(const u_int32_t
*)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8
[12]) != (__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)(
((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) &
0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >>
8 | ((__uint32_t)(1) & 0xff000000) >> 24) : __swap32md
(1))))
)
564 goto dont_redistribute;
565 /*
566 * Consider networks with nexthop loopback as not redistributable
567 * unless it is a reject or blackhole route.
568 */
569 if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop)((*(const u_int32_t *)(const void *)(&(&kr->nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kr->nexthop)->__u6_addr.__u6_addr8
[4]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kr->nexthop)->__u6_addr.__u6_addr8[8]) == 0) &&
(*(const u_int32_t *)(const void *)(&(&kr->nexthop
)->__u6_addr.__u6_addr8[12]) == (__uint32_t)(__builtin_constant_p
(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 |
((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1)
& 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000
) >> 24) : __swap32md(1))))
&&
570 !(kr->flags & (F_BLACKHOLE0x0100|F_REJECT0x0080)))
571 goto dont_redistribute;
572
573 /* Should we redistribute this route? */
574 if (!ospf_redistribute(kr, &metric))
575 goto dont_redistribute;
576
577 /* prefix should be redistributed */
578 kr->flags |= F_REDISTRIBUTED0x0200;
579 /*
580 * only one of all multipath routes can be redistributed so
581 * redistribute the best one.
582 */
583 if (new_kr->metric > metric) {
584 *new_kr = *kr;
585 new_kr->metric = metric;
586 }
587
588 return (1);
589
590dont_redistribute:
591 /* was the route redistributed? */
592 if ((kr->flags & F_REDISTRIBUTED0x0200) == 0)
593 return (0);
594
595 kr->flags &= ~F_REDISTRIBUTED0x0200;
596 return (1);
597}
598
599void
600kr_redistribute(struct kroute_node *kh)
601{
602 struct kroute_node *kn;
603 struct kroute kr;
604 int redistribute = 0;
605
606 /* only the highest prio route can be redistributed */
607 if (kroute_find(&kh->r.prefix, kh->r.prefixlen, RTP_ANY64) != kh)
608 return;
609
610 bzero(&kr, sizeof(kr));
611 kr.metric = UINT_MAX0xffffffffU;
612 for (kn = kh; kn; kn = kn->next)
613 if (kr_redist_eval(&kn->r, &kr))
614 redistribute = 1;
615
616 if (!redistribute)
617 return;
618
619 if (kr.flags & F_REDISTRIBUTED0x0200) {
620 main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, &kr,
621 sizeof(struct kroute));
622 } else {
623 kr = kh->r;
624 main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &kr,
625 sizeof(struct kroute));
626 }
627}
628
629void
630kr_reload(int redis_label_or_prefix)
631{
632 struct kroute_node *kr, *kn;
633 u_int32_t dummy;
634 int r;
635 int filter_prio = kr_state.fib_prio;
636
637 /* update the priority filter */
638 if (redis_label_or_prefix) {
639 filter_prio = 0;
640 log_info("%s: priority filter disabled", __func__);
641 } else
642 log_debug("%s: priority filter enabled", __func__);
643
644 if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_PRIOFILTER3, &filter_prio,
645 sizeof(filter_prio)) == -1) {
646 log_warn("%s: setsockopt AF_ROUTE ROUTE_PRIOFILTER", __func__);
647 /* not fatal */
648 }
649
650 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
{
651 for (kn = kr; kn; kn = kn->next) {
652 r = ospf_redistribute(&kn->r, &dummy);
653 /*
654 * if it is redistributed, redistribute again metric
655 * may have changed.
656 */
657 if ((kn->r.flags & F_REDISTRIBUTED0x0200 && !r) || r)
658 break;
659 }
660 if (kn) {
661 /*
662 * kr_redistribute copes with removes and RDE with
663 * duplicates
664 */
665 kr_redistribute(kr);
666 }
667 }
668}
669
670/* rb-tree compare */
671int
672kroute_compare(struct kroute_node *a, struct kroute_node *b)
673{
674 int i;
675
676 /* XXX maybe switch a & b */
677 i = memcmp(&a->r.prefix, &b->r.prefix, sizeof(a->r.prefix));
678 if (i)
679 return (i);
680 if (a->r.prefixlen < b->r.prefixlen)
681 return (-1);
682 if (a->r.prefixlen > b->r.prefixlen)
683 return (1);
684
685 /* if the priority is RTP_ANY finish on the first address hit */
686 if (a->r.priority == RTP_ANY64 || b->r.priority == RTP_ANY64)
687 return (0);
688 if (a->r.priority < b->r.priority)
689 return (-1);
690 if (a->r.priority > b->r.priority)
691 return (1);
692 return (0);
693}
694
695/* tree management */
696struct kroute_node *
697kroute_find(const struct in6_addr *prefix, u_int8_t prefixlen, u_int8_t prio)
698{
699 struct kroute_node s;
700 struct kroute_node *kn, *tmp;
701
702 s.r.prefix = *prefix;
703 s.r.prefixlen = prefixlen;
704 s.r.priority = prio;
705
706 kn = RB_FIND(kroute_tree, &krt, &s)kroute_tree_RB_FIND(&krt, &s);
707 if (kn && prio == RTP_ANY64) {
708 tmp = RB_PREV(kroute_tree, &krt, kn)kroute_tree_RB_PREV(kn);
709 while (tmp) {
710 if (kroute_compare(&s, tmp) == 0)
711 kn = tmp;
712 else
713 break;
714 tmp = RB_PREV(kroute_tree, &krt, kn)kroute_tree_RB_PREV(kn);
715 }
716 }
717 return (kn);
718}
719
720struct kroute_node *
721kroute_matchgw(struct kroute_node *kr, struct in6_addr *nh, unsigned int scope)
722{
723 while (kr) {
724 if (scope == kr->r.scope &&
725 IN6_ARE_ADDR_EQUAL(&kr->r.nexthop, nh)(memcmp(&(&kr->r.nexthop)->__u6_addr.__u6_addr8
[0], &(nh)->__u6_addr.__u6_addr8[0], sizeof(struct in6_addr
)) == 0)
)
726 return (kr);
727 kr = kr->next;
728 }
729
730 return (NULL((void *)0));
731}
732
733int
734kroute_insert(struct kroute_node *kr)
735{
736 struct kroute_node *krm, *krh;
737
738 kr->serial = kr_state.fib_serial;
739
740 if ((krh = RB_INSERT(kroute_tree, &krt, kr)kroute_tree_RB_INSERT(&krt, kr)) != NULL((void *)0)) {
741 /*
742 * Multipath route, add at end of list.
743 */
744 krm = krh;
745 while (krm->next != NULL((void *)0))
746 krm = krm->next;
747 krm->next = kr;
748 kr->next = NULL((void *)0); /* to be sure */
749 } else
750 krh = kr;
751
752 if (!(kr->r.flags & F_KERNEL0x0002)) {
753 /* don't validate or redistribute ospf route */
754 kr->r.flags &= ~F_DOWN0x0010;
755 return (0);
756 }
757
758 if (kif_validate(kr->r.ifindex))
759 kr->r.flags &= ~F_DOWN0x0010;
760 else
761 kr->r.flags |= F_DOWN0x0010;
762
763 kr_redistribute(krh);
764 return (0);
765}
766
767int
768kroute_remove(struct kroute_node *kr)
769{
770 struct kroute_node *krm;
771
772 if ((krm = RB_FIND(kroute_tree, &krt, kr)kroute_tree_RB_FIND(&krt, kr)) == NULL((void *)0)) {
773 log_warnx("kroute_remove failed to find %s/%u",
774 log_in6addr(&kr->r.prefix), kr->r.prefixlen);
775 return (-1);
776 }
777
778 if (krm == kr) {
779 /* head element */
780 if (RB_REMOVE(kroute_tree, &krt, kr)kroute_tree_RB_REMOVE(&krt, kr) == NULL((void *)0)) {
781 log_warnx("kroute_remove failed for %s/%u",
782 log_in6addr(&kr->r.prefix), kr->r.prefixlen);
783 return (-1);
784 }
785 if (kr->next != NULL((void *)0)) {
786 if (RB_INSERT(kroute_tree, &krt, kr->next)kroute_tree_RB_INSERT(&krt, kr->next) != NULL((void *)0)) {
787 log_warnx("kroute_remove failed to add %s/%u",
788 log_in6addr(&kr->r.prefix),
789 kr->r.prefixlen);
790 return (-1);
791 }
792 }
793 } else {
794 /* somewhere in the list */
795 while (krm->next != kr && krm->next != NULL((void *)0))
796 krm = krm->next;
797 if (krm->next == NULL((void *)0)) {
798 log_warnx("kroute_remove multipath list corrupted "
799 "for %s/%u", log_in6addr(&kr->r.prefix),
800 kr->r.prefixlen);
801 return (-1);
802 }
803 krm->next = kr->next;
804 }
805
806 kr_redist_remove(krm, kr);
807 rtlabel_unref(kr->r.rtlabel);
808
809 free(kr);
810 return (0);
811}
812
813void
814kroute_clear(void)
815{
816 struct kroute_node *kr;
817
818 while ((kr = RB_MIN(kroute_tree, &krt)kroute_tree_RB_MINMAX(&krt, -1)) != NULL((void *)0))
819 kroute_remove(kr);
820}
821
822struct iface *
823kif_update(u_short ifindex, int flags, struct if_data *ifd,
824 struct sockaddr_dl *sdl)
825{
826 struct iface *iface;
827 char ifname[IF_NAMESIZE16];
828
829 if ((iface = if_find(ifindex)) == NULL((void *)0)) {
830 bzero(ifname, sizeof(ifname));
831 if (sdl && sdl->sdl_family == AF_LINK18) {
832 if (sdl->sdl_nlen >= sizeof(ifname))
833 memcpy(ifname, sdl->sdl_data,
834 sizeof(ifname) - 1);
835 else if (sdl->sdl_nlen > 0)
836 memcpy(ifname, sdl->sdl_data, sdl->sdl_nlen);
837 else
838 return (NULL((void *)0));
839 } else
840 return (NULL((void *)0));
841 if ((iface = if_new(ifindex, ifname)) == NULL((void *)0))
842 return (NULL((void *)0));
843 }
844
845 if_update(iface, ifd->ifi_mtu, flags, ifd->ifi_type,
846 ifd->ifi_link_state, ifd->ifi_baudrate, ifd->ifi_rdomain);
847
848 return (iface);
849}
850
851int
852kif_validate(u_short ifindex)
853{
854 struct iface *iface;
855
856 if ((iface = if_find(ifindex)) == NULL((void *)0)) {
857 log_warnx("interface with index %u not found", ifindex);
858 return (-1);
859 }
860
861 return ((iface->flags & IFF_UP0x1) && LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0));
862}
863
864struct kroute_node *
865kroute_match(struct in6_addr *key)
866{
867 int i;
868 struct kroute_node *kr;
869 struct in6_addr ina;
870
871 /* we will never match the default route */
872 for (i = 128; i > 0; i--) {
873 inet6applymask(&ina, key, i);
874 if ((kr = kroute_find(&ina, i, RTP_ANY64)) != NULL((void *)0))
875 return (kr);
876 }
877
878 /* if we don't have a match yet, try to find a default route */
879 if ((kr = kroute_find(&in6addr_any, 0, RTP_ANY64)) != NULL((void *)0))
880 return (kr);
881
882 return (NULL((void *)0));
883}
884
885/* misc */
886int
887protect_lo(void)
888{
889 struct kroute_node *kr;
890
891 /* special protection for loopback */
892 if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL((void *)0)) {
893 log_warn("protect_lo");
894 return (-1);
895 }
896 memcpy(&kr->r.prefix, &in6addr_loopback, sizeof(kr->r.prefix));
897 kr->r.prefixlen = 128;
898 kr->r.flags = F_KERNEL0x0002|F_CONNECTED0x0008;
899
900 if (RB_INSERT(kroute_tree, &krt, kr)kroute_tree_RB_INSERT(&krt, kr) != NULL((void *)0))
901 free(kr); /* kernel route already there, no problem */
902
903 return (0);
904}
905
906#define ROUNDUP(a)((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof
(long))
\
907 ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
908
909void
910get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info)
911{
912 int i;
913
914 for (i = 0; i < RTAX_MAX15; i++) {
915 if (addrs & (1 << i)) {
916 rti_info[i] = sa;
917 sa = (struct sockaddr *)((char *)(sa) +
918 ROUNDUP(sa->sa_len)((sa->sa_len) > 0 ? (1 + (((sa->sa_len) - 1) | (sizeof
(long) - 1))) : sizeof(long))
);
919 } else
920 rti_info[i] = NULL((void *)0);
921 }
922}
923
924void
925if_change(u_short ifindex, int flags, struct if_data *ifd,
926 struct sockaddr_dl *sdl)
927{
928 struct kroute_node *kr, *tkr;
929 struct iface *iface;
930 u_int8_t wasvalid, isvalid;
931
932 wasvalid = kif_validate(ifindex);
933
934 if ((iface = kif_update(ifindex, flags, ifd, sdl)) == NULL((void *)0)) {
935 log_warn("if_change: kif_update(%u)", ifindex);
936 return;
937 }
938
939 /* inform engine and rde about state change */
940 main_imsg_compose_rde(IMSG_IFINFO, 0, iface, sizeof(struct iface));
941 main_imsg_compose_ospfe(IMSG_IFINFO, 0, iface, sizeof(struct iface));
942
943 isvalid = (iface->flags & IFF_UP0x1) &&
944 LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0);
945
946 if (wasvalid == isvalid)
947 return; /* nothing changed wrt validity */
948
949 /* update redistribute list */
950 RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void
*)0); (kr) = kroute_tree_RB_NEXT(kr))
{
951 for (tkr = kr; tkr != NULL((void *)0); tkr = tkr->next) {
952 if (tkr->r.ifindex == ifindex) {
953 if (isvalid)
954 tkr->r.flags &= ~F_DOWN0x0010;
955 else
956 tkr->r.flags |= F_DOWN0x0010;
957
958 }
959 }
960 kr_redistribute(kr);
961 }
962}
963
964void
965if_newaddr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask,
966 struct sockaddr_in6 *brd)
967{
968 struct iface *iface;
969 struct iface_addr *ia;
970 struct ifaddrchange ifc;
971
972 if (ifa == NULL((void *)0) || ifa->sin6_family != AF_INET624)
973 return;
974 if ((iface = if_find(ifindex)) == NULL((void *)0)) {
975 log_warnx("if_newaddr: corresponding if %d not found", ifindex);
976 return;
977 }
978
979 /* We only care about link-local and global-scope. */
980 if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0))
||
981 IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == (__uint32_t
)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) &
0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8
| ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t
)(1) & 0xff000000) >> 24) : __swap32md(1))))
||
982 IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr)((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
||
983 IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe
) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8
[1] & 0xc0) == 0xc0))
||
984 IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == (__uint32_t)(__builtin_constant_p(0x0000ffff) ? (__uint32_t
)(((__uint32_t)(0x0000ffff) & 0xff) << 24 | ((__uint32_t
)(0x0000ffff) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff
) & 0xff0000) >> 8 | ((__uint32_t)(0x0000ffff) &
0xff000000) >> 24) : __swap32md(0x0000ffff))))
||
985 IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) != 0) &&
(*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[12]) != (__uint32_t)(__builtin_constant_p
(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 |
((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1)
& 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000
) >> 24) : __swap32md(1))))
)
986 return;
987
988 clearscope(&ifa->sin6_addr);
989
990 if (IN6_IS_ADDR_LINKLOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe
) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8
[1] & 0xc0) == 0x80))
||
991 iface->flags & IFF_LOOPBACK0x8)
992 iface->addr = ifa->sin6_addr;
993
994 if ((ia = calloc(1, sizeof(struct iface_addr))) == NULL((void *)0))
995 fatal("if_newaddr");
996
997 ia->addr = ifa->sin6_addr;
998
999 if (mask)
1000 ia->prefixlen = mask2prefixlen(mask);
1001 else
1002 ia->prefixlen = 0;
1003 if (brd && brd->sin6_family == AF_INET624)
1004 ia->dstbrd = brd->sin6_addr;
1005 else
1006 bzero(&ia->dstbrd, sizeof(ia->dstbrd));
1007
1008 switch (iface->type) {
1009 case IF_TYPE_BROADCAST:
1010 case IF_TYPE_NBMA:
1011 log_debug("if_newaddr: ifindex %u, addr %s/%d",
1012 ifindex, log_in6addr(&ia->addr), ia->prefixlen);
1013 break;
1014 case IF_TYPE_VIRTUALLINK: /* FIXME */
1015 break;
1016 case IF_TYPE_POINTOPOINT:
1017 case IF_TYPE_POINTOMULTIPOINT:
1018 log_debug("if_newaddr: ifindex %u, addr %s/%d, "
1019 "dest %s", ifindex, log_in6addr(&ia->addr),
1020 ia->prefixlen, log_in6addr(&ia->dstbrd));
1021 break;
1022 default:
1023 fatalx("if_newaddr: unknown interface type");
1024 }
1025
1026 TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry)do { (ia)->entry.tqe_next = ((void *)0); (ia)->entry.tqe_prev
= (&iface->ifa_list)->tqh_last; *(&iface->ifa_list
)->tqh_last = (ia); (&iface->ifa_list)->tqh_last
= &(ia)->entry.tqe_next; } while (0)
;
1027 /* inform engine and rde if interface is used */
1028 if (iface->cflags & F_IFACE_CONFIGURED0x02) {
1029 ifc.addr = ia->addr;
1030 ifc.dstbrd = ia->dstbrd;
1031 ifc.prefixlen = ia->prefixlen;
1032 ifc.ifindex = ifindex;
1033 main_imsg_compose_ospfe(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc));
1034 main_imsg_compose_rde(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc));
1035 }
1036}
1037
1038void
1039if_deladdr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask,
1040 struct sockaddr_in6 *brd)
1041{
1042 struct iface *iface;
1043 struct iface_addr *ia, *nia;
1044 struct ifaddrchange ifc;
1045
1046 if (ifa == NULL((void *)0) || ifa->sin6_family != AF_INET624)
1047 return;
1048 if ((iface = if_find(ifindex)) == NULL((void *)0)) {
1049 log_warnx("if_deladdr: corresponding if %d not found", ifindex);
1050 return;
1051 }
1052
1053 /* We only care about link-local and global-scope. */
1054 if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0))
||
1055 IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == (__uint32_t
)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) &
0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8
| ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t
)(1) & 0xff000000) >> 24) : __swap32md(1))))
||
1056 IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr)((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
||
1057 IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe
) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8
[1] & 0xc0) == 0xc0))
||
1058 IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == (__uint32_t)(__builtin_constant_p(0x0000ffff) ? (__uint32_t
)(((__uint32_t)(0x0000ffff) & 0xff) << 24 | ((__uint32_t
)(0x0000ffff) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff
) & 0xff0000) >> 8 | ((__uint32_t)(0x0000ffff) &
0xff000000) >> 24) : __swap32md(0x0000ffff))))
||
1059 IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&ifa->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8]
) == 0) && (*(const u_int32_t *)(const void *)(&(
&ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) != 0) &&
(*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr
)->__u6_addr.__u6_addr8[12]) != (__uint32_t)(__builtin_constant_p
(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 |
((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1)
& 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000
) >> 24) : __swap32md(1))))
)
1060 return;
1061
1062 clearscope(&ifa->sin6_addr);
1063
1064 for (ia = TAILQ_FIRST(&iface->ifa_list)((&iface->ifa_list)->tqh_first); ia != NULL((void *)0); ia = nia) {
1065 nia = TAILQ_NEXT(ia, entry)((ia)->entry.tqe_next);
1066
1067 if (IN6_ARE_ADDR_EQUAL(&ia->addr, &ifa->sin6_addr)(memcmp(&(&ia->addr)->__u6_addr.__u6_addr8[0], &
(&ifa->sin6_addr)->__u6_addr.__u6_addr8[0], sizeof(
struct in6_addr)) == 0)
) {
1068 log_debug("if_deladdr: ifindex %u, addr %s/%d",
1069 ifindex, log_in6addr(&ia->addr), ia->prefixlen);
1070 TAILQ_REMOVE(&iface->ifa_list, ia, entry)do { if (((ia)->entry.tqe_next) != ((void *)0)) (ia)->entry
.tqe_next->entry.tqe_prev = (ia)->entry.tqe_prev; else (
&iface->ifa_list)->tqh_last = (ia)->entry.tqe_prev
; *(ia)->entry.tqe_prev = (ia)->entry.tqe_next; ; ; } while
(0)
;
1071 /* inform engine and rde if interface is used */
1072 if (iface->cflags & F_IFACE_CONFIGURED0x02) {
1073 ifc.addr = ia->addr;
1074 ifc.dstbrd = ia->dstbrd;
1075 ifc.prefixlen = ia->prefixlen;
1076 ifc.ifindex = ifindex;
1077 main_imsg_compose_ospfe(IMSG_IFADDRDEL, 0, &ifc,
1078 sizeof(ifc));
1079 main_imsg_compose_rde(IMSG_IFADDRDEL, 0, &ifc,
1080 sizeof(ifc));
1081 }
1082 free(ia);
1083 return;
1084 }
1085 }
1086}
1087
1088void
1089if_announce(void *msg)
1090{
1091 struct if_announcemsghdr *ifan;
1092 struct iface *iface;
1093
1094 ifan = msg;
1095
1096 switch (ifan->ifan_what) {
1097 case IFAN_ARRIVAL0:
1098 if ((iface = if_new(ifan->ifan_index, ifan->ifan_name)) == NULL((void *)0))
Although the value stored to 'iface' is used in the enclosing expression, the value is never actually read from 'iface'
1099 fatal("if_announce failed");
1100 break;
1101 case IFAN_DEPARTURE1:
1102 iface = if_find(ifan->ifan_index);
1103 if_del(iface);
1104 break;
1105 }
1106}
1107
1108/* rtsock */
1109int
1110send_rtmsg(int fd, int action, struct kroute *kroute)
1111{
1112 struct iovec iov[5];
1113 struct rt_msghdr hdr;
1114 struct pad {
1115 struct sockaddr_in6 addr;
1116 char pad[sizeof(long)]; /* thank you IPv6 */
1117 } prefix, nexthop, mask;
1118 struct {
1119 struct sockaddr_dl addr;
1120 char pad[sizeof(long)];
1121 } ifp;
1122 struct sockaddr_rtlabel sa_rl;
1123 int iovcnt = 0;
1124 const char *label;
1125
1126 if (kr_state.fib_sync == 0)
1127 return (0);
1128
1129 /* initialize header */
1130 bzero(&hdr, sizeof(hdr));
1131 hdr.rtm_version = RTM_VERSION5;
1132 hdr.rtm_type = action;
1133 hdr.rtm_priority = kr_state.fib_prio;
1134 hdr.rtm_tableid = kr_state.rdomain; /* rtableid */
1135 if (action == RTM_CHANGE0x3)
1136 hdr.rtm_fmask = RTF_REJECT0x8|RTF_BLACKHOLE0x1000;
1137 else
1138 hdr.rtm_flags = RTF_MPATH0x40000;
1139 hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */
1140 hdr.rtm_hdrlen = sizeof(hdr);
1141 hdr.rtm_msglen = sizeof(hdr);
1142 /* adjust iovec */
1143 iov[iovcnt].iov_base = &hdr;
1144 iov[iovcnt++].iov_len = sizeof(hdr);
1145
1146 bzero(&prefix, sizeof(prefix));
1147 prefix.addr.sin6_len = sizeof(struct sockaddr_in6);
1148 prefix.addr.sin6_family = AF_INET624;
1149 prefix.addr.sin6_addr = kroute->prefix;
1150 /* adjust header */
1151 hdr.rtm_addrs |= RTA_DST0x1;
1152 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1153 /* adjust iovec */
1154 iov[iovcnt].iov_base = &prefix;
1155 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1156
1157 if (!IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&kroute->nexthop)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0)
)
) {
1158 bzero(&nexthop, sizeof(nexthop));
1159 nexthop.addr.sin6_len = sizeof(struct sockaddr_in6);
1160 nexthop.addr.sin6_family = AF_INET624;
1161 nexthop.addr.sin6_addr = kroute->nexthop;
1162 nexthop.addr.sin6_scope_id = kroute->scope;
1163 /*
1164 * XXX we should set the sin6_scope_id but the kernel
1165 * XXX does not expect it that way. It must be fiddled
1166 * XXX into the sin6_addr. Welcome to the typical
1167 * XXX IPv6 insanity and all without wine bottles.
1168 */
1169 embedscope(&nexthop.addr);
1170
1171 /* adjust header */
1172 hdr.rtm_flags |= RTF_GATEWAY0x2;
1173 hdr.rtm_addrs |= RTA_GATEWAY0x2;
1174 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1175 /* adjust iovec */
1176 iov[iovcnt].iov_base = &nexthop;
1177 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1178 } else if (kroute->ifindex) {
1179 /*
1180 * We don't have an interface address in that network,
1181 * so we install a cloning route. The kernel will then
1182 * do neighbor discovery.
1183 */
1184 bzero(&ifp, sizeof(ifp));
1185 ifp.addr.sdl_len = sizeof(struct sockaddr_dl);
1186 ifp.addr.sdl_family = AF_LINK18;
1187
1188 ifp.addr.sdl_index = kroute->ifindex;
1189 /* adjust header */
1190 hdr.rtm_flags |= RTF_CLONING0x100;
1191 hdr.rtm_addrs |= RTA_GATEWAY0x2;
1192 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_dl))((sizeof(struct sockaddr_dl)) > 0 ? (1 + (((sizeof(struct sockaddr_dl
)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1193 /* adjust iovec */
1194 iov[iovcnt].iov_base = &ifp;
1195 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_dl))((sizeof(struct sockaddr_dl)) > 0 ? (1 + (((sizeof(struct sockaddr_dl
)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1196 }
1197
1198 bzero(&mask, sizeof(mask));
1199 mask.addr.sin6_len = sizeof(struct sockaddr_in6);
1200 mask.addr.sin6_family = AF_INET624;
1201 mask.addr.sin6_addr = *prefixlen2mask(kroute->prefixlen);
1202 /* adjust header */
1203 if (kroute->prefixlen == 128)
1204 hdr.rtm_flags |= RTF_HOST0x4;
1205 hdr.rtm_addrs |= RTA_NETMASK0x4;
1206 hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1207 /* adjust iovec */
1208 iov[iovcnt].iov_base = &mask;
1209 iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct
sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long))
;
1210
1211 if (kroute->rtlabel != 0) {
1212 sa_rl.sr_len = sizeof(sa_rl);
1213 sa_rl.sr_family = AF_UNSPEC0;
1214 label = rtlabel_id2name(kroute->rtlabel);
1215 if (strlcpy(sa_rl.sr_label, label,
1216 sizeof(sa_rl.sr_label)) >= sizeof(sa_rl.sr_label)) {
1217 log_warnx("send_rtmsg: invalid rtlabel");
1218 return (-1);
1219 }
1220 /* adjust header */
1221 hdr.rtm_addrs |= RTA_LABEL0x400;
1222 hdr.rtm_msglen += sizeof(sa_rl);
1223 /* adjust iovec */
1224 iov[iovcnt].iov_base = &sa_rl;
1225 iov[iovcnt++].iov_len = sizeof(sa_rl);
1226 }
1227
1228retry:
1229 if (writev(fd, iov, iovcnt) == -1) {
1230 if (errno(*__errno()) == ESRCH3) {
1231 if (hdr.rtm_type == RTM_CHANGE0x3) {
1232 hdr.rtm_type = RTM_ADD0x1;
1233 goto retry;
1234 } else if (hdr.rtm_type == RTM_DELETE0x2) {
1235 log_info("route %s/%u vanished before delete",
1236 log_sockaddr(&prefix), kroute->prefixlen);
1237 return (0);
1238 }
1239 }
1240 log_warn("send_rtmsg: action %u, prefix %s/%u", hdr.rtm_type,
1241 log_sockaddr(&prefix), kroute->prefixlen);
1242 return (0);
1243 }
1244
1245 return (0);
1246}
1247
1248int
1249fetchtable(void)
1250{
1251 size_t len;
1252 int mib[7];
1253 char *buf;
1254 int rv;
1255
1256 mib[0] = CTL_NET4;
1257 mib[1] = PF_ROUTE17;
1258 mib[2] = 0;
1259 mib[3] = AF_INET624;
1260 mib[4] = NET_RT_DUMP1;
1261 mib[5] = 0;
1262 mib[6] = kr_state.rdomain; /* rtableid */
1263
1264 if (sysctl(mib, 7, NULL((void *)0), &len, NULL((void *)0), 0) == -1) {
1265 log_warn("sysctl");
1266 return (-1);
1267 }
1268 if ((buf = malloc(len)) == NULL((void *)0)) {
1269 log_warn("fetchtable");
1270 return (-1);
1271 }
1272 if (sysctl(mib, 7, buf, &len, NULL((void *)0), 0) == -1) {
1273 log_warn("sysctl");
1274 free(buf);
1275 return (-1);
1276 }
1277
1278 rv = rtmsg_process(buf, len);
1279 free(buf);
1280
1281 return (rv);
1282}
1283
1284int
1285fetchifs(u_short ifindex)
1286{
1287 size_t len;
1288 int mib[6];
1289 char *buf;
1290 int rv;
1291
1292 mib[0] = CTL_NET4;
1293 mib[1] = PF_ROUTE17;
1294 mib[2] = 0;
1295 mib[3] = AF_INET624;
1296 mib[4] = NET_RT_IFLIST3;
1297 mib[5] = ifindex;
1298
1299 if (sysctl(mib, 6, NULL((void *)0), &len, NULL((void *)0), 0) == -1) {
1300 log_warn("sysctl");
1301 return (-1);
1302 }
1303 if ((buf = malloc(len)) == NULL((void *)0)) {
1304 log_warn("fetchifs");
1305 return (-1);
1306 }
1307 if (sysctl(mib, 6, buf, &len, NULL((void *)0), 0) == -1) {
1308 log_warn("sysctl");
1309 free(buf);
1310 return (-1);
1311 }
1312
1313 rv = rtmsg_process(buf, len);
1314 free(buf);
1315
1316 return (rv);
1317}
1318
1319int
1320dispatch_rtmsg(void)
1321{
1322 char buf[RT_BUF_SIZE16384];
1323 ssize_t n;
1324
1325 if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) {
1326 if (errno(*__errno()) == EAGAIN35 || errno(*__errno()) == EINTR4)
1327 return (0);
1328 log_warn("dispatch_rtmsg: read error");
1329 return (-1);
1330 }
1331
1332 if (n == 0) {
1333 log_warnx("routing socket closed");
1334 return (-1);
1335 }
1336
1337 return (rtmsg_process(buf, n));
1338}
1339
1340int
1341rtmsg_process(char *buf, size_t len)
1342{
1343 struct rt_msghdr *rtm;
1344 struct if_msghdr ifm;
1345 struct ifa_msghdr *ifam;
1346 struct sockaddr *sa, *rti_info[RTAX_MAX15];
1347 struct sockaddr_in6 *sa_in6;
1348 struct sockaddr_rtlabel *label;
1349 struct kroute_node *kr, *okr;
1350 struct in6_addr prefix, nexthop;
1351 u_int8_t prefixlen, prio;
1352 int flags, mpath;
1353 unsigned int scope;
1354 u_short ifindex = 0;
1355 int rv, delay;
1356 size_t offset;
1357 char *next;
1358
1359 for (offset = 0; offset < len; offset += rtm->rtm_msglen) {
1360 next = buf + offset;
1361 rtm = (struct rt_msghdr *)next;
1362 if (len < offset + sizeof(u_short) ||
1363 len < offset + rtm->rtm_msglen)
1364 fatalx("rtmsg_process: partial rtm in buffer");
1365 if (rtm->rtm_version != RTM_VERSION5)
1366 continue;
1367
1368 bzero(&prefix, sizeof(prefix));
1369 bzero(&nexthop, sizeof(nexthop));
1370 scope = 0;
1371 prefixlen = 0;
1372 flags = F_KERNEL0x0002;
1373 mpath = 0;
1374 prio = 0;
1375
1376 sa = (struct sockaddr *)(next + rtm->rtm_hdrlen);
1377 get_rtaddrs(rtm->rtm_addrs, sa, rti_info);
1378
1379 switch (rtm->rtm_type) {
1380 case RTM_ADD0x1:
1381 case RTM_GET0x4:
1382 case RTM_CHANGE0x3:
1383 case RTM_DELETE0x2:
1384 if (rtm->rtm_errno) /* failed attempts... */
1385 continue;
1386
1387 if (rtm->rtm_tableid != kr_state.rdomain)
1388 continue;
1389
1390 if (rtm->rtm_type == RTM_GET0x4 &&
1391 rtm->rtm_pid != kr_state.pid) /* caused by us */
1392 continue;
1393
1394 if ((sa = rti_info[RTAX_DST0]) == NULL((void *)0))
1395 continue;
1396
1397 /* Skip ARP/ND cache and broadcast routes. */
1398 if (rtm->rtm_flags & (RTF_LLINFO0x400|RTF_BROADCAST0x400000))
1399 continue;
1400
1401 if (rtm->rtm_flags & RTF_MPATH0x40000)
1402 mpath = 1;
1403 prio = rtm->rtm_priority;
1404 flags = (prio == kr_state.fib_prio) ?
1405 F_OSPFD_INSERTED0x0001 : F_KERNEL0x0002;
1406
1407 switch (sa->sa_family) {
1408 case AF_INET624:
1409 prefix =
1410 ((struct sockaddr_in6 *)sa)->sin6_addr;
1411 sa_in6 = (struct sockaddr_in6 *)
1412 rti_info[RTAX_NETMASK2];
1413 if (sa_in6 != NULL((void *)0)) {
1414 if (sa_in6->sin6_len != 0)
1415 prefixlen = mask2prefixlen(
1416 sa_in6);
1417 } else if (rtm->rtm_flags & RTF_HOST0x4)
1418 prefixlen = 128;
1419 else
1420 fatalx("classful IPv6 address?!!");
1421 if (rtm->rtm_flags & RTF_STATIC0x800)
1422 flags |= F_STATIC0x0020;
1423 if (rtm->rtm_flags & RTF_BLACKHOLE0x1000)
1424 flags |= F_BLACKHOLE0x0100;
1425 if (rtm->rtm_flags & RTF_REJECT0x8)
1426 flags |= F_REJECT0x0080;
1427 if (rtm->rtm_flags & RTF_DYNAMIC0x10)
1428 flags |= F_DYNAMIC0x0040;
1429 break;
1430 default:
1431 continue;
1432 }
1433
1434 ifindex = rtm->rtm_index;
1435 if ((sa = rti_info[RTAX_GATEWAY1]) != NULL((void *)0)) {
1436 switch (sa->sa_family) {
1437 case AF_INET624:
1438 if (rtm->rtm_flags & RTF_CONNECTED0x800000)
1439 flags |= F_CONNECTED0x0008;
1440
1441 sa_in6 = (struct sockaddr_in6 *)sa;
1442 /*
1443 * XXX The kernel provides the scope
1444 * XXX via the kame hack instead of
1445 * XXX the scope_id field.
1446 */
1447 recoverscope(sa_in6);
1448 nexthop = sa_in6->sin6_addr;
1449 scope = sa_in6->sin6_scope_id;
1450 break;
1451 case AF_LINK18:
1452 flags |= F_CONNECTED0x0008;
1453 break;
1454 }
1455 }
1456 }
1457
1458 switch (rtm->rtm_type) {
1459 case RTM_ADD0x1:
1460 case RTM_GET0x4:
1461 case RTM_CHANGE0x3:
1462 if (IN6_IS_ADDR_UNSPECIFIED(&nexthop)((*(const u_int32_t *)(const void *)(&(&nexthop)->
__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *
)(const void *)(&(&nexthop)->__u6_addr.__u6_addr8[
4]) == 0) && (*(const u_int32_t *)(const void *)(&
(&nexthop)->__u6_addr.__u6_addr8[8]) == 0) && (
*(const u_int32_t *)(const void *)(&(&nexthop)->__u6_addr
.__u6_addr8[12]) == 0))
&&
1463 !(flags & F_CONNECTED0x0008)) {
1464 log_warnx("rtmsg_process no nexthop for %s/%u",
1465 log_in6addr(&prefix), prefixlen);
1466 continue;
1467 }
1468
1469 if ((okr = kroute_find(&prefix, prefixlen, prio))
1470 != NULL((void *)0)) {
1471 kr = okr;
1472 if ((mpath || prio == kr_state.fib_prio) &&
1473 (kr = kroute_matchgw(okr, &nexthop, scope)) ==
1474 NULL((void *)0)) {
1475 log_warnx("rtmsg_process: mpath route"
1476 " not found");
1477 /* add routes we missed out earlier */
1478 goto add;
1479 }
1480
1481 if (kr->r.flags & F_REDISTRIBUTED0x0200)
1482 flags |= F_REDISTRIBUTED0x0200;
1483 kr->r.nexthop = nexthop;
1484 kr->r.scope = scope;
1485 kr->r.flags = flags;
1486 kr->r.ifindex = ifindex;
1487
1488 rtlabel_unref(kr->r.rtlabel);
1489 kr->r.rtlabel = 0;
1490 kr->r.ext_tag = 0;
1491 if ((label = (struct sockaddr_rtlabel *)
1492 rti_info[RTAX_LABEL10]) != NULL((void *)0)) {
1493 kr->r.rtlabel =
1494 rtlabel_name2id(label->sr_label);
1495 kr->r.ext_tag =
1496 rtlabel_id2tag(kr->r.rtlabel);
1497 }
1498
1499 if (kif_validate(kr->r.ifindex))
1500 kr->r.flags &= ~F_DOWN0x0010;
1501 else
1502 kr->r.flags |= F_DOWN0x0010;
1503
1504 /* just readd, the RDE will care */
1505 kr->serial = kr_state.fib_serial;
1506 kr_redistribute(kr);
1507 } else {
1508add:
1509 if ((kr = calloc(1,
1510 sizeof(struct kroute_node))) == NULL((void *)0)) {
1511 log_warn("rtmsg_process calloc");
1512 return (-1);
1513 }
1514 kr->r.prefix = prefix;
1515 kr->r.prefixlen = prefixlen;
1516 kr->r.nexthop = nexthop;
1517 kr->r.scope = scope;
1518 kr->r.flags = flags;
1519 kr->r.ifindex = ifindex;
1520 kr->r.priority = prio;
1521
1522 if (rtm->rtm_priority == kr_state.fib_prio) {
1523 log_warnx("alien OSPF route %s/%d",
1524 log_in6addr(&prefix), prefixlen);
1525 rv = send_rtmsg(kr_state.fd,
1526 RTM_DELETE0x2, &kr->r);
1527 free(kr);
1528 if (rv == -1)
1529 return (-1);
1530 } else {
1531 if ((label = (struct sockaddr_rtlabel *)
1532 rti_info[RTAX_LABEL10]) != NULL((void *)0)) {
1533 kr->r.rtlabel =
1534 rtlabel_name2id(
1535 label->sr_label);
1536 kr->r.ext_tag =
1537 rtlabel_id2tag(
1538 kr->r.rtlabel);
1539 }
1540
1541 kroute_insert(kr);
1542 }
1543 }
1544 break;
1545 case RTM_DELETE0x2:
1546 if ((kr = kroute_find(&prefix, prefixlen, prio)) ==
1547 NULL((void *)0))
1548 continue;
1549 if (!(kr->r.flags & F_KERNEL0x0002))
1550 continue;
1551 /* get the correct route */
1552 okr = kr;
1553 if (mpath && (kr = kroute_matchgw(kr, &nexthop,
1554 scope)) == NULL((void *)0)) {
1555 log_warnx("rtmsg_process mpath route"
1556 " not found");
1557 return (-1);
1558 }
1559 if (kroute_remove(kr) == -1)
1560 return (-1);
1561 break;
1562 case RTM_IFINFO0xe:
1563 memcpy(&ifm, next, sizeof(ifm));
1564 if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data,
1565 (struct sockaddr_dl *)rti_info[RTAX_IFP4]);
1566 break;
1567 case RTM_NEWADDR0xc:
1568 ifam = (struct ifa_msghdr *)rtm;
1569 if ((ifam->ifam_addrs & (RTA_NETMASK0x4 | RTA_IFA0x20 |
1570 RTA_BRD0x80)) == 0)
1571 break;
1572
1573 if_newaddr(ifam->ifam_index,
1574 (struct sockaddr_in6 *)rti_info[RTAX_IFA5],
1575 (struct sockaddr_in6 *)rti_info[RTAX_NETMASK2],
1576 (struct sockaddr_in6 *)rti_info[RTAX_BRD7]);
1577 break;
1578 case RTM_DELADDR0xd:
1579 ifam = (struct ifa_msghdr *)rtm;
1580 if ((ifam->ifam_addrs & (RTA_NETMASK0x4 | RTA_IFA0x20 |
1581 RTA_BRD0x80)) == 0)
1582 break;
1583
1584 if_deladdr(ifam->ifam_index,
1585 (struct sockaddr_in6 *)rti_info[RTAX_IFA5],
1586 (struct sockaddr_in6 *)rti_info[RTAX_NETMASK2],
1587 (struct sockaddr_in6 *)rti_info[RTAX_BRD7]);
1588 break;
1589 case RTM_IFANNOUNCE0xf:
1590 if_announce(next);
1591 break;
1592 case RTM_DESYNC0x10:
1593 /*
1594 * We lost some routing packets. Schedule a reload
1595 * of the kernel route/interface information.
1596 */
1597 if (kr_state.reload_state == KR_RELOAD_IDLE0) {
1598 delay = KR_RELOAD_TIMER250;
1599 log_info("desync; scheduling fib reload");
1600 } else {
1601 delay = KR_RELOAD_HOLD_TIMER5000;
1602 log_debug("desync during KR_RELOAD_%s",
1603 kr_state.reload_state ==
1604 KR_RELOAD_FETCH1 ? "FETCH" : "HOLD");
1605 }
1606 kr_state.reload_state = KR_RELOAD_FETCH1;
1607 kr_fib_reload_arm_timer(delay);
1608 break;
1609 default:
1610 /* ignore for now */
1611 break;
1612 }
1613 }
1614 return (offset);
1615}