File: | src/usr.sbin/ospf6d/kroute.c |
Warning: | line 1482, column 4 Value stored to 'okr' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: kroute.c,v 1.66 2021/11/03 21:40:03 sthen Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2004 Esben Norby <norby@openbsd.org> |
5 | * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> |
6 | * |
7 | * Permission to use, copy, modify, and distribute this software for any |
8 | * purpose with or without fee is hereby granted, provided that the above |
9 | * copyright notice and this permission notice appear in all copies. |
10 | * |
11 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
12 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
13 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
14 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
15 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
16 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
17 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
18 | */ |
19 | |
20 | #include <sys/types.h> |
21 | #include <sys/socket.h> |
22 | #include <sys/sysctl.h> |
23 | #include <sys/tree.h> |
24 | #include <sys/uio.h> |
25 | #include <netinet/in.h> |
26 | #include <arpa/inet.h> |
27 | #include <net/if.h> |
28 | #include <net/if_dl.h> |
29 | #include <net/if_types.h> |
30 | #include <net/route.h> |
31 | #include <err.h> |
32 | #include <errno(*__errno()).h> |
33 | #include <fcntl.h> |
34 | #include <stdio.h> |
35 | #include <stdlib.h> |
36 | #include <string.h> |
37 | #include <unistd.h> |
38 | #include <limits.h> |
39 | |
40 | #include "ospf6d.h" |
41 | #include "ospfe.h" |
42 | #include "log.h" |
43 | |
44 | struct { |
45 | u_int32_t rtseq; |
46 | pid_t pid; |
47 | int fib_sync; |
48 | u_int8_t fib_prio; |
49 | int fd; |
50 | struct event ev; |
51 | u_int rdomain; |
52 | } kr_state; |
53 | |
54 | struct kroute_node { |
55 | RB_ENTRY(kroute_node)struct { struct kroute_node *rbe_left; struct kroute_node *rbe_right ; struct kroute_node *rbe_parent; int rbe_color; } entry; |
56 | struct kroute_node *next; |
57 | struct kroute r; |
58 | }; |
59 | |
60 | void kr_redist_remove(struct kroute_node *, struct kroute_node *); |
61 | int kr_redist_eval(struct kroute *, struct kroute *); |
62 | void kr_redistribute(struct kroute_node *); |
63 | int kroute_compare(struct kroute_node *, struct kroute_node *); |
64 | int kr_change_fib(struct kroute_node *, struct kroute *, int, int); |
65 | int kr_delete_fib(struct kroute_node *); |
66 | |
67 | struct kroute_node *kroute_find(const struct in6_addr *, u_int8_t, |
68 | u_int8_t); |
69 | struct kroute_node *kroute_matchgw(struct kroute_node *, |
70 | struct in6_addr *, unsigned int); |
71 | int kroute_insert(struct kroute_node *); |
72 | int kroute_remove(struct kroute_node *); |
73 | void kroute_clear(void); |
74 | |
75 | struct iface *kif_update(u_short, int, struct if_data *, |
76 | struct sockaddr_dl *); |
77 | int kif_validate(u_short); |
78 | |
79 | struct kroute_node *kroute_match(struct in6_addr *); |
80 | |
81 | int protect_lo(void); |
82 | void get_rtaddrs(int, struct sockaddr *, struct sockaddr **); |
83 | void if_change(u_short, int, struct if_data *, struct sockaddr_dl *); |
84 | void if_newaddr(u_short, struct sockaddr_in6 *, |
85 | struct sockaddr_in6 *, struct sockaddr_in6 *); |
86 | void if_deladdr(u_short, struct sockaddr_in6 *, |
87 | struct sockaddr_in6 *, struct sockaddr_in6 *); |
88 | void if_announce(void *); |
89 | |
90 | int send_rtmsg(int, int, struct kroute *); |
91 | int dispatch_rtmsg(void); |
92 | int fetchtable(void); |
93 | int rtmsg_process(char *, size_t); |
94 | |
95 | RB_HEAD(kroute_tree, kroute_node)struct kroute_tree { struct kroute_node *rbh_root; } krt; |
96 | RB_PROTOTYPE(kroute_tree, kroute_node, entry, kroute_compare)void kroute_tree_RB_INSERT_COLOR(struct kroute_tree *, struct kroute_node *); void kroute_tree_RB_REMOVE_COLOR(struct kroute_tree *, struct kroute_node *, struct kroute_node *); struct kroute_node *kroute_tree_RB_REMOVE(struct kroute_tree *, struct kroute_node *); struct kroute_node *kroute_tree_RB_INSERT(struct kroute_tree *, struct kroute_node *); struct kroute_node *kroute_tree_RB_FIND (struct kroute_tree *, struct kroute_node *); struct kroute_node *kroute_tree_RB_NFIND(struct kroute_tree *, struct kroute_node *); struct kroute_node *kroute_tree_RB_NEXT(struct kroute_node *); struct kroute_node *kroute_tree_RB_PREV(struct kroute_node *); struct kroute_node *kroute_tree_RB_MINMAX(struct kroute_tree *, int); |
97 | RB_GENERATE(kroute_tree, kroute_node, entry, kroute_compare)void kroute_tree_RB_INSERT_COLOR(struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node *parent, *gparent, *tmp ; while ((parent = (elm)->entry.rbe_parent) && (parent )->entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent ; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent )->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry .rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0 ); elm = gparent; continue; } if ((parent)->entry.rbe_right == elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left; if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } else { tmp = (gparent)-> entry.rbe_left; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry. rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0) ; elm = gparent; continue; } if ((parent)->entry.rbe_left == elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent )->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)-> entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right ; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } } (head->rbh_root)-> entry.rbe_color = 0; } void kroute_tree_RB_REMOVE_COLOR(struct kroute_tree *head, struct kroute_node *parent, struct kroute_node *elm) { struct kroute_node *tmp; while ((elm == ((void*)0) || (elm)->entry.rbe_color == 0) && elm != (head)-> rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = ( parent)->entry.rbe_right; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry. rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right ; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right ; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)-> entry.rbe_left)->entry.rbe_color == 0) && ((tmp)-> entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right)-> entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp )->entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right )->entry.rbe_color == 0) { struct kroute_node *oleft; if ( (oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)-> entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry .rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry .rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent) ->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent )->entry.rbe_right = (oleft); } else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry .rbe_parent = (oleft); do {} while (0); if (((oleft)->entry .rbe_parent)) do {} while (0); } while (0); tmp = (parent)-> entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry .rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry .rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)-> entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry .rbe_left)->entry.rbe_parent = (parent); } do {} while (0) ; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); elm = (head)->rbh_root; break; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry .rbe_color = 1; } while (0); do { (tmp) = (parent)->entry. rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry. rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) == ((parent )->entry.rbe_parent)->entry.rbe_left) ((parent)->entry .rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry .rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent); (parent)-> entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry .rbe_parent)) do {} while (0); } while (0); tmp = (parent)-> entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void*)0) || ((tmp)->entry .rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry .rbe_left)->entry.rbe_color == 0) { struct kroute_node *oright ; if ((oright = (tmp)->entry.rbe_right)) (oright)->entry .rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right = ( oright)->entry.rbe_left)) { ((oright)->entry.rbe_left)-> entry.rbe_parent = (tmp); } do {} while (0); if (((oright)-> entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp)-> entry.rbe_parent)->entry.rbe_left = (oright); else ((tmp)-> entry.rbe_parent)->entry.rbe_right = (oright); } else (head )->rbh_root = (oright); (oright)->entry.rbe_left = (tmp ); (tmp)->entry.rbe_parent = (oright); do {} while (0); if (((oright)->entry.rbe_parent)) do {} while (0); } while ( 0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left )->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left ; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); elm = (head)->rbh_root; break ; } } } if (elm) (elm)->entry.rbe_color = 0; } struct kroute_node * kroute_tree_RB_REMOVE(struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node *child, *parent, *old = elm; int color ; if ((elm)->entry.rbe_left == ((void*)0)) child = (elm)-> entry.rbe_right; else if ((elm)->entry.rbe_right == ((void *)0)) child = (elm)->entry.rbe_left; else { struct kroute_node *left; elm = (elm)->entry.rbe_right; while ((left = (elm) ->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right ; parent = (elm)->entry.rbe_parent; color = (elm)->entry .rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm) (parent )->entry.rbe_left = child; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child ; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)-> entry = (old)->entry; if ((old)->entry.rbe_parent) { if (((old)->entry.rbe_parent)->entry.rbe_left == old) ((old )->entry.rbe_parent)->entry.rbe_left = elm; else ((old) ->entry.rbe_parent)->entry.rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left )->entry.rbe_parent = elm; if ((old)->entry.rbe_right) ( (old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent ) { left = parent; do { do {} while (0); } while ((left = (left )->entry.rbe_parent)); } goto color; } parent = (elm)-> entry.rbe_parent; color = (elm)->entry.rbe_color; if (child ) (child)->entry.rbe_parent = parent; if (parent) { if ((parent )->entry.rbe_left == elm) (parent)->entry.rbe_left = child ; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child; color: if (color == 0) kroute_tree_RB_REMOVE_COLOR (head, parent, child); return (old); } struct kroute_node * kroute_tree_RB_INSERT (struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node *tmp; struct kroute_node *parent = ((void*)0); int comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp = (kroute_compare)(elm, parent); if (comp < 0) tmp = (tmp)-> entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right ; else return (tmp); } do { (elm)->entry.rbe_parent = parent ; (elm)->entry.rbe_left = (elm)->entry.rbe_right = ((void *)0); (elm)->entry.rbe_color = 1; } while (0); if (parent != ((void*)0)) { if (comp < 0) (parent)->entry.rbe_left = elm; else (parent)->entry.rbe_right = elm; do {} while (0 ); } else (head)->rbh_root = elm; kroute_tree_RB_INSERT_COLOR (head, elm); return (((void*)0)); } struct kroute_node * kroute_tree_RB_FIND (struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node *tmp = (head)->rbh_root; int comp; while (tmp) { comp = kroute_compare (elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (((void*)0)); } struct kroute_node * kroute_tree_RB_NFIND (struct kroute_tree *head, struct kroute_node *elm) { struct kroute_node *tmp = (head)->rbh_root; struct kroute_node *res = ((void *)0); int comp; while (tmp) { comp = kroute_compare(elm, tmp) ; if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left ; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (res); } struct kroute_node * kroute_tree_RB_NEXT (struct kroute_node *elm) { if ((elm)->entry.rbe_right) { elm = (elm)->entry.rbe_right; while ((elm)->entry.rbe_left ) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry .rbe_parent && (elm == ((elm)->entry.rbe_parent)-> entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry .rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent ; elm = (elm)->entry.rbe_parent; } } return (elm); } struct kroute_node * kroute_tree_RB_PREV(struct kroute_node *elm) { if ((elm)->entry.rbe_left) { elm = (elm)->entry.rbe_left ; while ((elm)->entry.rbe_right) elm = (elm)->entry.rbe_right ; } else { if ((elm)->entry.rbe_parent && (elm == ( (elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm) ->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_left )) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent ; } } return (elm); } struct kroute_node * kroute_tree_RB_MINMAX (struct kroute_tree *head, int val) { struct kroute_node *tmp = (head)->rbh_root; struct kroute_node *parent = ((void*) 0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)-> entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return (parent); } |
98 | |
99 | int |
100 | kr_init(int fs, u_int rdomain, int redis_label_or_prefix, u_int8_t fib_prio) |
101 | { |
102 | int opt = 0, rcvbuf, default_rcvbuf; |
103 | socklen_t optlen; |
104 | int filter_prio = fib_prio; |
105 | int filter_flags = RTF_LLINFO0x400 | RTF_BROADCAST0x400000; |
106 | |
107 | kr_state.fib_sync = fs; |
108 | kr_state.rdomain = rdomain; |
109 | kr_state.fib_prio = fib_prio; |
110 | |
111 | if ((kr_state.fd = socket(AF_ROUTE17, |
112 | SOCK_RAW3 | SOCK_CLOEXEC0x8000 | SOCK_NONBLOCK0x4000, AF_INET624)) == -1) { |
113 | log_warn("kr_init: socket"); |
114 | return (-1); |
115 | } |
116 | |
117 | /* not interested in my own messages */ |
118 | if (setsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_USELOOPBACK0x0040, |
119 | &opt, sizeof(opt)) == -1) |
120 | log_warn("kr_init: setsockopt"); /* not fatal */ |
121 | |
122 | if (redis_label_or_prefix) { |
123 | filter_prio = 0; |
124 | log_info("%s: priority filter disabled", __func__); |
125 | } else |
126 | log_debug("%s: priority filter enabled", __func__); |
127 | |
128 | if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_PRIOFILTER3, &filter_prio, |
129 | sizeof(filter_prio)) == -1) { |
130 | log_warn("%s: setsockopt AF_ROUTE ROUTE_PRIOFILTER", __func__); |
131 | /* not fatal */ |
132 | } |
133 | |
134 | if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_FLAGFILTER4, &filter_flags, |
135 | sizeof(filter_flags)) == -1) { |
136 | log_warn("%s: setsockopt AF_ROUTE ROUTE_FLAGFILTER", __func__); |
137 | /* not fatal */ |
138 | } |
139 | |
140 | /* grow receive buffer, don't wanna miss messages */ |
141 | optlen = sizeof(default_rcvbuf); |
142 | if (getsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_RCVBUF0x1002, |
143 | &default_rcvbuf, &optlen) == -1) |
144 | log_warn("kr_init getsockopt SOL_SOCKET SO_RCVBUF"); |
145 | else |
146 | for (rcvbuf = MAX_RTSOCK_BUF(2 * 1024 * 1024); |
147 | rcvbuf > default_rcvbuf && |
148 | setsockopt(kr_state.fd, SOL_SOCKET0xffff, SO_RCVBUF0x1002, |
149 | &rcvbuf, sizeof(rcvbuf)) == -1 && errno(*__errno()) == ENOBUFS55; |
150 | rcvbuf /= 2) |
151 | ; /* nothing */ |
152 | |
153 | kr_state.pid = getpid(); |
154 | kr_state.rtseq = 1; |
155 | |
156 | RB_INIT(&krt)do { (&krt)->rbh_root = ((void*)0); } while (0); |
157 | |
158 | if (fetchtable() == -1) |
159 | return (-1); |
160 | |
161 | if (protect_lo() == -1) |
162 | return (-1); |
163 | |
164 | event_set(&kr_state.ev, kr_state.fd, EV_READ0x02 | EV_PERSIST0x10, |
165 | kr_dispatch_msg, NULL((void*)0)); |
166 | event_add(&kr_state.ev, NULL((void*)0)); |
167 | |
168 | return (0); |
169 | } |
170 | |
171 | int |
172 | kr_change_fib(struct kroute_node *kr, struct kroute *kroute, int krcount, |
173 | int action) |
174 | { |
175 | int i; |
176 | struct kroute_node *kn, *nkn; |
177 | |
178 | if (action == RTM_ADD0x1) { |
179 | /* |
180 | * First remove all stale multipath routes. |
181 | * This step must be skipped when the action is RTM_CHANGE |
182 | * because it is already a single path route that will be |
183 | * changed. |
184 | */ |
185 | for (kn = kr; kn != NULL((void*)0); kn = nkn) { |
186 | for (i = 0; i < krcount; i++) { |
187 | if (kn->r.scope == kroute[i].scope && |
188 | IN6_ARE_ADDR_EQUAL(&kn->r.nexthop,(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8 [0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0 ], sizeof(struct in6_addr)) == 0) |
189 | &kroute[i].nexthop)(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8 [0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0 ], sizeof(struct in6_addr)) == 0)) |
190 | break; |
191 | } |
192 | nkn = kn->next; |
193 | if (i == krcount) { |
194 | /* stale route */ |
195 | if (kr_delete_fib(kn) == -1) |
196 | log_warnx("kr_delete_fib failed"); |
197 | /* |
198 | * if head element was removed we need to adjust |
199 | * the head |
200 | */ |
201 | if (kr == kn) |
202 | kr = nkn; |
203 | } |
204 | } |
205 | } |
206 | |
207 | /* |
208 | * now add or change the route |
209 | */ |
210 | for (i = 0; i < krcount; i++) { |
211 | /* nexthop ::1 -> ignore silently */ |
212 | if (IN6_IS_ADDR_LOOPBACK(&kroute[i].nexthop)((*(const u_int32_t *)(const void *)(&(&kroute[i].nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute[i].nexthop)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute[i].nexthop)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &kroute[i].nexthop)->__u6_addr.__u6_addr8[12]) == (__uint32_t )(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t )(1) & 0xff000000) >> 24) : __swap32md(1))))) |
213 | continue; |
214 | |
215 | if (action == RTM_ADD0x1 && kr) { |
216 | for (kn = kr; kn != NULL((void*)0); kn = kn->next) { |
217 | if (kn->r.scope == kroute[i].scope && |
218 | IN6_ARE_ADDR_EQUAL(&kn->r.nexthop,(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8 [0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0 ], sizeof(struct in6_addr)) == 0) |
219 | &kroute[i].nexthop)(memcmp(&(&kn->r.nexthop)->__u6_addr.__u6_addr8 [0], &(&kroute[i].nexthop)->__u6_addr.__u6_addr8[0 ], sizeof(struct in6_addr)) == 0)) |
220 | break; |
221 | } |
222 | |
223 | if (kn != NULL((void*)0)) |
224 | /* nexthop already present, skip it */ |
225 | continue; |
226 | } else |
227 | /* modify first entry */ |
228 | kn = kr; |
229 | |
230 | /* send update */ |
231 | if (send_rtmsg(kr_state.fd, action, &kroute[i]) == -1) |
232 | return (-1); |
233 | |
234 | /* create new entry unless we are changing the first entry */ |
235 | if (action == RTM_ADD0x1) |
236 | if ((kn = calloc(1, sizeof(*kn))) == NULL((void*)0)) |
237 | fatal(NULL((void*)0)); |
238 | |
239 | kn->r.prefix = kroute[i].prefix; |
240 | kn->r.prefixlen = kroute[i].prefixlen; |
241 | kn->r.nexthop = kroute[i].nexthop; |
242 | kn->r.scope = kroute[i].scope; |
243 | kn->r.flags = kroute[i].flags | F_OSPFD_INSERTED0x0001; |
244 | kn->r.priority = kr_state.fib_prio; |
245 | kn->r.ext_tag = kroute[i].ext_tag; |
246 | rtlabel_unref(kn->r.rtlabel); /* for RTM_CHANGE */ |
247 | kn->r.rtlabel = kroute[i].rtlabel; |
248 | |
249 | if (action == RTM_ADD0x1) |
250 | if (kroute_insert(kn) == -1) { |
251 | log_debug("kr_update_fib: cannot insert %s", |
252 | log_in6addr(&kn->r.nexthop)); |
253 | free(kn); |
254 | } |
255 | action = RTM_ADD0x1; |
256 | } |
257 | return (0); |
258 | } |
259 | |
260 | int |
261 | kr_change(struct kroute *kroute, int krcount) |
262 | { |
263 | struct kroute_node *kr; |
264 | int action = RTM_ADD0x1; |
265 | |
266 | kroute->rtlabel = rtlabel_tag2id(kroute->ext_tag); |
267 | |
268 | kr = kroute_find(&kroute->prefix, kroute->prefixlen, kr_state.fib_prio); |
269 | if (kr != NULL((void*)0) && kr->next == NULL((void*)0) && krcount == 1) { |
270 | /* |
271 | * single path OSPF route. |
272 | * The kernel does not allow to change a gateway route to a |
273 | * cloning route or contrary. In this case remove and add the |
274 | * route, otherwise change the existing one. |
275 | */ |
276 | if ((IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8 ]) == 0) && (*(const u_int32_t *)(const void *)(& (&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0) ) && |
277 | !IN6_IS_ADDR_UNSPECIFIED(&kr->r.nexthop)((*(const u_int32_t *)(const void *)(&(&kr->r.nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->r.nexthop)->__u6_addr. __u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->r.nexthop)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(& kr->r.nexthop)->__u6_addr.__u6_addr8[12]) == 0))) || |
278 | (!IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8 ]) == 0) && (*(const u_int32_t *)(const void *)(& (&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0) ) && |
279 | IN6_IS_ADDR_UNSPECIFIED(&kr->r.nexthop)((*(const u_int32_t *)(const void *)(&(&kr->r.nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->r.nexthop)->__u6_addr. __u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->r.nexthop)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(& kr->r.nexthop)->__u6_addr.__u6_addr8[12]) == 0)))) { |
280 | if (kr_delete_fib(kr) == 0) |
281 | kr = NULL((void*)0); |
282 | else { |
283 | log_warn("kr_change: failed to remove route: " |
284 | "%s/%d", log_in6addr(&kr->r.prefix), |
285 | kr->r.prefixlen); |
286 | return (-1); |
287 | } |
288 | } else |
289 | action = RTM_CHANGE0x3; |
290 | } |
291 | |
292 | return (kr_change_fib(kr, kroute, krcount, action)); |
293 | } |
294 | |
295 | int |
296 | kr_delete_fib(struct kroute_node *kr) |
297 | { |
298 | if (kr->r.priority != kr_state.fib_prio) |
299 | log_warn("kr_delete_fib: %s/%d has wrong priority %d", |
300 | log_in6addr(&kr->r.prefix), kr->r.prefixlen, |
301 | kr->r.priority); |
302 | |
303 | if (send_rtmsg(kr_state.fd, RTM_DELETE0x2, &kr->r) == -1) |
304 | return (-1); |
305 | |
306 | if (kroute_remove(kr) == -1) |
307 | return (-1); |
308 | |
309 | return (0); |
310 | } |
311 | |
312 | int |
313 | kr_delete(struct kroute *kroute) |
314 | { |
315 | struct kroute_node *kr, *nkr; |
316 | |
317 | if ((kr = kroute_find(&kroute->prefix, kroute->prefixlen, |
318 | kr_state.fib_prio)) == NULL((void*)0)) |
319 | return (0); |
320 | |
321 | while (kr != NULL((void*)0)) { |
322 | nkr = kr->next; |
323 | if (kr_delete_fib(kr) == -1) |
324 | return (-1); |
325 | kr = nkr; |
326 | } |
327 | |
328 | return (0); |
329 | } |
330 | |
331 | void |
332 | kr_shutdown(void) |
333 | { |
334 | kr_fib_decouple(); |
335 | kroute_clear(); |
336 | } |
337 | |
338 | void |
339 | kr_fib_couple(void) |
340 | { |
341 | struct kroute_node *kr; |
342 | struct kroute_node *kn; |
343 | |
344 | if (kr_state.fib_sync == 1) /* already coupled */ |
345 | return; |
346 | |
347 | kr_state.fib_sync = 1; |
348 | |
349 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) |
350 | if (kr->r.priority == kr_state.fib_prio) |
351 | for (kn = kr; kn != NULL((void*)0); kn = kn->next) |
352 | send_rtmsg(kr_state.fd, RTM_ADD0x1, &kn->r); |
353 | |
354 | log_info("kernel routing table coupled"); |
355 | } |
356 | |
357 | void |
358 | kr_fib_decouple(void) |
359 | { |
360 | struct kroute_node *kr; |
361 | struct kroute_node *kn; |
362 | |
363 | if (kr_state.fib_sync == 0) /* already decoupled */ |
364 | return; |
365 | |
366 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) |
367 | if (kr->r.priority == kr_state.fib_prio) |
368 | for (kn = kr; kn != NULL((void*)0); kn = kn->next) |
369 | send_rtmsg(kr_state.fd, RTM_DELETE0x2, &kn->r); |
370 | |
371 | kr_state.fib_sync = 0; |
372 | |
373 | log_info("kernel routing table decoupled"); |
374 | } |
375 | |
376 | void |
377 | kr_fib_update_prio(u_int8_t fib_prio) |
378 | { |
379 | struct kroute_node *kr; |
380 | |
381 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) |
382 | if ((kr->r.flags & F_OSPFD_INSERTED0x0001)) |
383 | kr->r.priority = fib_prio; |
384 | |
385 | log_info("fib priority changed from %hhu to %hhu", kr_state.fib_prio, |
386 | fib_prio); |
387 | |
388 | kr_state.fib_prio = fib_prio; |
389 | } |
390 | |
391 | /* ARGSUSED */ |
392 | void |
393 | kr_dispatch_msg(int fd, short event, void *bula) |
394 | { |
395 | /* XXX this is stupid */ |
396 | dispatch_rtmsg(); |
397 | } |
398 | |
399 | void |
400 | kr_show_route(struct imsg *imsg) |
401 | { |
402 | struct kroute_node *kr; |
403 | struct kroute_node *kn; |
404 | int flags; |
405 | struct in6_addr addr; |
406 | |
407 | switch (imsg->hdr.type) { |
408 | case IMSG_CTL_KROUTE: |
409 | if (imsg->hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + sizeof(flags)) { |
410 | log_warnx("kr_show_route: wrong imsg len"); |
411 | return; |
412 | } |
413 | memcpy(&flags, imsg->data, sizeof(flags)); |
414 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) |
415 | if (!flags || kr->r.flags & flags) { |
416 | kn = kr; |
417 | do { |
418 | main_imsg_compose_ospfe(IMSG_CTL_KROUTE, |
419 | imsg->hdr.pid, |
420 | &kn->r, sizeof(kn->r)); |
421 | } while ((kn = kn->next) != NULL((void*)0)); |
422 | } |
423 | break; |
424 | case IMSG_CTL_KROUTE_ADDR: |
425 | if (imsg->hdr.len != IMSG_HEADER_SIZEsizeof(struct imsg_hdr) + |
426 | sizeof(struct in6_addr)) { |
427 | log_warnx("kr_show_route: wrong imsg len"); |
428 | return; |
429 | } |
430 | memcpy(&addr, imsg->data, sizeof(addr)); |
431 | kr = kroute_match(&addr); |
432 | if (kr != NULL((void*)0)) |
433 | main_imsg_compose_ospfe(IMSG_CTL_KROUTE, imsg->hdr.pid, |
434 | &kr->r, sizeof(kr->r)); |
435 | break; |
436 | default: |
437 | log_debug("kr_show_route: error handling imsg"); |
438 | break; |
439 | } |
440 | |
441 | main_imsg_compose_ospfe(IMSG_CTL_END, imsg->hdr.pid, NULL((void*)0), 0); |
442 | } |
443 | |
444 | void |
445 | kr_redist_remove(struct kroute_node *kh, struct kroute_node *kn) |
446 | { |
447 | struct kroute *kr; |
448 | |
449 | /* was the route redistributed? */ |
450 | if ((kn->r.flags & F_REDISTRIBUTED0x0200) == 0) |
451 | return; |
452 | |
453 | /* remove redistributed flag */ |
454 | kn->r.flags &= ~F_REDISTRIBUTED0x0200; |
455 | kr = &kn->r; |
456 | |
457 | /* probably inform the RDE (check if no other path is redistributed) */ |
458 | for (kn = kh; kn; kn = kn->next) |
459 | if (kn->r.flags & F_REDISTRIBUTED0x0200) |
460 | break; |
461 | |
462 | if (kn == NULL((void*)0)) |
463 | main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, kr, |
464 | sizeof(struct kroute)); |
465 | } |
466 | |
467 | int |
468 | kr_redist_eval(struct kroute *kr, struct kroute *new_kr) |
469 | { |
470 | u_int32_t metric = 0; |
471 | |
472 | /* Only non-ospfd routes are considered for redistribution. */ |
473 | if (!(kr->flags & F_KERNEL0x0002)) |
474 | goto dont_redistribute; |
475 | |
476 | /* Dynamic routes are not redistributable. */ |
477 | if (kr->flags & F_DYNAMIC0x0040) |
478 | goto dont_redistribute; |
479 | |
480 | /* interface is not up and running so don't announce */ |
481 | if (kr->flags & F_DOWN0x0010) |
482 | goto dont_redistribute; |
483 | |
484 | /* |
485 | * We consider loopback, multicast, link- and site-local, |
486 | * IPv4 mapped and IPv4 compatible addresses as not redistributable. |
487 | */ |
488 | if (IN6_IS_ADDR_LOOPBACK(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8 [4]) == 0) && (*(const u_int32_t *)(const void *)(& (&kr->prefix)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix )->__u6_addr.__u6_addr8[12]) == (__uint32_t)(__builtin_constant_p (1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000 ) >> 24) : __swap32md(1)))) || |
489 | IN6_IS_ADDR_MULTICAST(&kr->prefix)((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xff) || |
490 | IN6_IS_ADDR_LINKLOCAL(&kr->prefix)(((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&kr->prefix)->__u6_addr.__u6_addr8[1] & 0xc0 ) == 0x80)) || |
491 | IN6_IS_ADDR_SITELOCAL(&kr->prefix)(((&kr->prefix)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&kr->prefix)->__u6_addr.__u6_addr8[1] & 0xc0 ) == 0xc0)) || |
492 | IN6_IS_ADDR_V4MAPPED(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8 [4]) == 0) && (*(const u_int32_t *)(const void *)(& (&kr->prefix)->__u6_addr.__u6_addr8[8]) == (__uint32_t )(__builtin_constant_p(0x0000ffff) ? (__uint32_t)(((__uint32_t )(0x0000ffff) & 0xff) << 24 | ((__uint32_t)(0x0000ffff ) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff) & 0xff0000 ) >> 8 | ((__uint32_t)(0x0000ffff) & 0xff000000) >> 24) : __swap32md(0x0000ffff)))) || |
493 | IN6_IS_ADDR_V4COMPAT(&kr->prefix)((*(const u_int32_t *)(const void *)(&(&kr->prefix )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8 [4]) == 0) && (*(const u_int32_t *)(const void *)(& (&kr->prefix)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix )->__u6_addr.__u6_addr8[12]) != 0) && (*(const u_int32_t *)(const void *)(&(&kr->prefix)->__u6_addr.__u6_addr8 [12]) != (__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)( ((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000) >> 24) : __swap32md (1))))) |
494 | goto dont_redistribute; |
495 | /* |
496 | * Consider networks with nexthop loopback as not redistributable |
497 | * unless it is a reject or blackhole route. |
498 | */ |
499 | if (IN6_IS_ADDR_LOOPBACK(&kr->nexthop)((*(const u_int32_t *)(const void *)(&(&kr->nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->nexthop)->__u6_addr.__u6_addr8 [4]) == 0) && (*(const u_int32_t *)(const void *)(& (&kr->nexthop)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(&kr->nexthop )->__u6_addr.__u6_addr8[12]) == (__uint32_t)(__builtin_constant_p (1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000 ) >> 24) : __swap32md(1)))) && |
500 | !(kr->flags & (F_BLACKHOLE0x0100|F_REJECT0x0080))) |
501 | goto dont_redistribute; |
502 | |
503 | /* Should we redistribute this route? */ |
504 | if (!ospf_redistribute(kr, &metric)) |
505 | goto dont_redistribute; |
506 | |
507 | /* prefix should be redistributed */ |
508 | kr->flags |= F_REDISTRIBUTED0x0200; |
509 | /* |
510 | * only one of all multipath routes can be redistributed so |
511 | * redistribute the best one. |
512 | */ |
513 | if (new_kr->metric > metric) { |
514 | *new_kr = *kr; |
515 | new_kr->metric = metric; |
516 | } |
517 | |
518 | return (1); |
519 | |
520 | dont_redistribute: |
521 | /* was the route redistributed? */ |
522 | if ((kr->flags & F_REDISTRIBUTED0x0200) == 0) |
523 | return (0); |
524 | |
525 | kr->flags &= ~F_REDISTRIBUTED0x0200; |
526 | return (1); |
527 | } |
528 | |
529 | void |
530 | kr_redistribute(struct kroute_node *kh) |
531 | { |
532 | struct kroute_node *kn; |
533 | struct kroute kr; |
534 | int redistribute = 0; |
535 | |
536 | /* only the highest prio route can be redistributed */ |
537 | if (kroute_find(&kh->r.prefix, kh->r.prefixlen, RTP_ANY64) != kh) |
538 | return; |
539 | |
540 | bzero(&kr, sizeof(kr)); |
541 | kr.metric = UINT_MAX(2147483647 *2U +1U); |
542 | for (kn = kh; kn; kn = kn->next) |
543 | if (kr_redist_eval(&kn->r, &kr)) |
544 | redistribute = 1; |
545 | |
546 | if (!redistribute) |
547 | return; |
548 | |
549 | if (kr.flags & F_REDISTRIBUTED0x0200) { |
550 | main_imsg_compose_rde(IMSG_NETWORK_ADD, 0, &kr, |
551 | sizeof(struct kroute)); |
552 | } else { |
553 | kr = kh->r; |
554 | main_imsg_compose_rde(IMSG_NETWORK_DEL, 0, &kr, |
555 | sizeof(struct kroute)); |
556 | } |
557 | } |
558 | |
559 | void |
560 | kr_reload(int redis_label_or_prefix) |
561 | { |
562 | struct kroute_node *kr, *kn; |
563 | u_int32_t dummy; |
564 | int r; |
565 | int filter_prio = kr_state.fib_prio; |
566 | |
567 | /* update the priority filter */ |
568 | if (redis_label_or_prefix) { |
569 | filter_prio = 0; |
570 | log_info("%s: priority filter disabled", __func__); |
571 | } else |
572 | log_debug("%s: priority filter enabled", __func__); |
573 | |
574 | if (setsockopt(kr_state.fd, AF_ROUTE17, ROUTE_PRIOFILTER3, &filter_prio, |
575 | sizeof(filter_prio)) == -1) { |
576 | log_warn("%s: setsockopt AF_ROUTE ROUTE_PRIOFILTER", __func__); |
577 | /* not fatal */ |
578 | } |
579 | |
580 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) { |
581 | for (kn = kr; kn; kn = kn->next) { |
582 | r = ospf_redistribute(&kn->r, &dummy); |
583 | /* |
584 | * if it is redistributed, redistribute again metric |
585 | * may have changed. |
586 | */ |
587 | if ((kn->r.flags & F_REDISTRIBUTED0x0200 && !r) || r) |
588 | break; |
589 | } |
590 | if (kn) { |
591 | /* |
592 | * kr_redistribute copes with removes and RDE with |
593 | * duplicates |
594 | */ |
595 | kr_redistribute(kr); |
596 | } |
597 | } |
598 | } |
599 | |
600 | /* rb-tree compare */ |
601 | int |
602 | kroute_compare(struct kroute_node *a, struct kroute_node *b) |
603 | { |
604 | int i; |
605 | |
606 | /* XXX maybe switch a & b */ |
607 | i = memcmp(&a->r.prefix, &b->r.prefix, sizeof(a->r.prefix)); |
608 | if (i) |
609 | return (i); |
610 | if (a->r.prefixlen < b->r.prefixlen) |
611 | return (-1); |
612 | if (a->r.prefixlen > b->r.prefixlen) |
613 | return (1); |
614 | |
615 | /* if the priority is RTP_ANY finish on the first address hit */ |
616 | if (a->r.priority == RTP_ANY64 || b->r.priority == RTP_ANY64) |
617 | return (0); |
618 | if (a->r.priority < b->r.priority) |
619 | return (-1); |
620 | if (a->r.priority > b->r.priority) |
621 | return (1); |
622 | return (0); |
623 | } |
624 | |
625 | /* tree management */ |
626 | struct kroute_node * |
627 | kroute_find(const struct in6_addr *prefix, u_int8_t prefixlen, u_int8_t prio) |
628 | { |
629 | struct kroute_node s; |
630 | struct kroute_node *kn, *tmp; |
631 | |
632 | s.r.prefix = *prefix; |
633 | s.r.prefixlen = prefixlen; |
634 | s.r.priority = prio; |
635 | |
636 | kn = RB_FIND(kroute_tree, &krt, &s)kroute_tree_RB_FIND(&krt, &s); |
637 | if (kn && prio == RTP_ANY64) { |
638 | tmp = RB_PREV(kroute_tree, &krt, kn)kroute_tree_RB_PREV(kn); |
639 | while (tmp) { |
640 | if (kroute_compare(&s, tmp) == 0) |
641 | kn = tmp; |
642 | else |
643 | break; |
644 | tmp = RB_PREV(kroute_tree, &krt, kn)kroute_tree_RB_PREV(kn); |
645 | } |
646 | } |
647 | return (kn); |
648 | } |
649 | |
650 | struct kroute_node * |
651 | kroute_matchgw(struct kroute_node *kr, struct in6_addr *nh, unsigned int scope) |
652 | { |
653 | while (kr) { |
654 | if (scope == kr->r.scope && |
655 | IN6_ARE_ADDR_EQUAL(&kr->r.nexthop, nh)(memcmp(&(&kr->r.nexthop)->__u6_addr.__u6_addr8 [0], &(nh)->__u6_addr.__u6_addr8[0], sizeof(struct in6_addr )) == 0)) |
656 | return (kr); |
657 | kr = kr->next; |
658 | } |
659 | |
660 | return (NULL((void*)0)); |
661 | } |
662 | |
663 | int |
664 | kroute_insert(struct kroute_node *kr) |
665 | { |
666 | struct kroute_node *krm, *krh; |
667 | |
668 | if ((krh = RB_INSERT(kroute_tree, &krt, kr)kroute_tree_RB_INSERT(&krt, kr)) != NULL((void*)0)) { |
669 | /* |
670 | * Multipath route, add at end of list. |
671 | */ |
672 | krm = krh; |
673 | while (krm->next != NULL((void*)0)) |
674 | krm = krm->next; |
675 | krm->next = kr; |
676 | kr->next = NULL((void*)0); /* to be sure */ |
677 | } else |
678 | krh = kr; |
679 | |
680 | if (!(kr->r.flags & F_KERNEL0x0002)) { |
681 | /* don't validate or redistribute ospf route */ |
682 | kr->r.flags &= ~F_DOWN0x0010; |
683 | return (0); |
684 | } |
685 | |
686 | if (kif_validate(kr->r.ifindex)) |
687 | kr->r.flags &= ~F_DOWN0x0010; |
688 | else |
689 | kr->r.flags |= F_DOWN0x0010; |
690 | |
691 | kr_redistribute(krh); |
692 | return (0); |
693 | } |
694 | |
695 | int |
696 | kroute_remove(struct kroute_node *kr) |
697 | { |
698 | struct kroute_node *krm; |
699 | |
700 | if ((krm = RB_FIND(kroute_tree, &krt, kr)kroute_tree_RB_FIND(&krt, kr)) == NULL((void*)0)) { |
701 | log_warnx("kroute_remove failed to find %s/%u", |
702 | log_in6addr(&kr->r.prefix), kr->r.prefixlen); |
703 | return (-1); |
704 | } |
705 | |
706 | if (krm == kr) { |
707 | /* head element */ |
708 | if (RB_REMOVE(kroute_tree, &krt, kr)kroute_tree_RB_REMOVE(&krt, kr) == NULL((void*)0)) { |
709 | log_warnx("kroute_remove failed for %s/%u", |
710 | log_in6addr(&kr->r.prefix), kr->r.prefixlen); |
711 | return (-1); |
712 | } |
713 | if (kr->next != NULL((void*)0)) { |
714 | if (RB_INSERT(kroute_tree, &krt, kr->next)kroute_tree_RB_INSERT(&krt, kr->next) != NULL((void*)0)) { |
715 | log_warnx("kroute_remove failed to add %s/%u", |
716 | log_in6addr(&kr->r.prefix), |
717 | kr->r.prefixlen); |
718 | return (-1); |
719 | } |
720 | } |
721 | } else { |
722 | /* somewhere in the list */ |
723 | while (krm->next != kr && krm->next != NULL((void*)0)) |
724 | krm = krm->next; |
725 | if (krm->next == NULL((void*)0)) { |
726 | log_warnx("kroute_remove multipath list corrupted " |
727 | "for %s/%u", log_in6addr(&kr->r.prefix), |
728 | kr->r.prefixlen); |
729 | return (-1); |
730 | } |
731 | krm->next = kr->next; |
732 | } |
733 | |
734 | kr_redist_remove(krm, kr); |
735 | rtlabel_unref(kr->r.rtlabel); |
736 | |
737 | free(kr); |
738 | return (0); |
739 | } |
740 | |
741 | void |
742 | kroute_clear(void) |
743 | { |
744 | struct kroute_node *kr; |
745 | |
746 | while ((kr = RB_MIN(kroute_tree, &krt)kroute_tree_RB_MINMAX(&krt, -1)) != NULL((void*)0)) |
747 | kroute_remove(kr); |
748 | } |
749 | |
750 | struct iface * |
751 | kif_update(u_short ifindex, int flags, struct if_data *ifd, |
752 | struct sockaddr_dl *sdl) |
753 | { |
754 | struct iface *iface; |
755 | char ifname[IF_NAMESIZE16]; |
756 | |
757 | if ((iface = if_find(ifindex)) == NULL((void*)0)) { |
758 | bzero(ifname, sizeof(ifname)); |
759 | if (sdl && sdl->sdl_family == AF_LINK18) { |
760 | if (sdl->sdl_nlen >= sizeof(ifname)) |
761 | memcpy(ifname, sdl->sdl_data, |
762 | sizeof(ifname) - 1); |
763 | else if (sdl->sdl_nlen > 0) |
764 | memcpy(ifname, sdl->sdl_data, sdl->sdl_nlen); |
765 | else |
766 | return (NULL((void*)0)); |
767 | } else |
768 | return (NULL((void*)0)); |
769 | if ((iface = if_new(ifindex, ifname)) == NULL((void*)0)) |
770 | return (NULL((void*)0)); |
771 | } |
772 | |
773 | if_update(iface, ifd->ifi_mtu, flags, ifd->ifi_type, |
774 | ifd->ifi_link_state, ifd->ifi_baudrate, ifd->ifi_rdomain); |
775 | |
776 | return (iface); |
777 | } |
778 | |
779 | int |
780 | kif_validate(u_short ifindex) |
781 | { |
782 | struct iface *iface; |
783 | |
784 | if ((iface = if_find(ifindex)) == NULL((void*)0)) { |
785 | log_warnx("interface with index %u not found", ifindex); |
786 | return (-1); |
787 | } |
788 | |
789 | return ((iface->flags & IFF_UP0x1) && LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0)); |
790 | } |
791 | |
792 | struct kroute_node * |
793 | kroute_match(struct in6_addr *key) |
794 | { |
795 | int i; |
796 | struct kroute_node *kr; |
797 | struct in6_addr ina; |
798 | |
799 | /* we will never match the default route */ |
800 | for (i = 128; i > 0; i--) { |
801 | inet6applymask(&ina, key, i); |
802 | if ((kr = kroute_find(&ina, i, RTP_ANY64)) != NULL((void*)0)) |
803 | return (kr); |
804 | } |
805 | |
806 | /* if we don't have a match yet, try to find a default route */ |
807 | if ((kr = kroute_find(&in6addr_any, 0, RTP_ANY64)) != NULL((void*)0)) |
808 | return (kr); |
809 | |
810 | return (NULL((void*)0)); |
811 | } |
812 | |
813 | /* misc */ |
814 | int |
815 | protect_lo(void) |
816 | { |
817 | struct kroute_node *kr; |
818 | |
819 | /* special protection for loopback */ |
820 | if ((kr = calloc(1, sizeof(struct kroute_node))) == NULL((void*)0)) { |
821 | log_warn("protect_lo"); |
822 | return (-1); |
823 | } |
824 | memcpy(&kr->r.prefix, &in6addr_loopback, sizeof(kr->r.prefix)); |
825 | kr->r.prefixlen = 128; |
826 | kr->r.flags = F_KERNEL0x0002|F_CONNECTED0x0008; |
827 | |
828 | if (RB_INSERT(kroute_tree, &krt, kr)kroute_tree_RB_INSERT(&krt, kr) != NULL((void*)0)) |
829 | free(kr); /* kernel route already there, no problem */ |
830 | |
831 | return (0); |
832 | } |
833 | |
834 | #define ROUNDUP(a)((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof (long)) \ |
835 | ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) |
836 | |
837 | void |
838 | get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) |
839 | { |
840 | int i; |
841 | |
842 | for (i = 0; i < RTAX_MAX15; i++) { |
843 | if (addrs & (1 << i)) { |
844 | rti_info[i] = sa; |
845 | sa = (struct sockaddr *)((char *)(sa) + |
846 | ROUNDUP(sa->sa_len)((sa->sa_len) > 0 ? (1 + (((sa->sa_len) - 1) | (sizeof (long) - 1))) : sizeof(long))); |
847 | } else |
848 | rti_info[i] = NULL((void*)0); |
849 | } |
850 | } |
851 | |
852 | void |
853 | if_change(u_short ifindex, int flags, struct if_data *ifd, |
854 | struct sockaddr_dl *sdl) |
855 | { |
856 | struct kroute_node *kr, *tkr; |
857 | struct iface *iface; |
858 | u_int8_t wasvalid, isvalid; |
859 | |
860 | wasvalid = kif_validate(ifindex); |
861 | |
862 | if ((iface = kif_update(ifindex, flags, ifd, sdl)) == NULL((void*)0)) { |
863 | log_warn("if_change: kif_update(%u)", ifindex); |
864 | return; |
865 | } |
866 | |
867 | /* inform engine and rde about state change */ |
868 | main_imsg_compose_rde(IMSG_IFINFO, 0, iface, sizeof(struct iface)); |
869 | main_imsg_compose_ospfe(IMSG_IFINFO, 0, iface, sizeof(struct iface)); |
870 | |
871 | isvalid = (iface->flags & IFF_UP0x1) && |
872 | LINK_STATE_IS_UP(iface->linkstate)((iface->linkstate) >= 4 || (iface->linkstate) == 0); |
873 | |
874 | if (wasvalid == isvalid) |
875 | return; /* nothing changed wrt validity */ |
876 | |
877 | /* update redistribute list */ |
878 | RB_FOREACH(kr, kroute_tree, &krt)for ((kr) = kroute_tree_RB_MINMAX(&krt, -1); (kr) != ((void *)0); (kr) = kroute_tree_RB_NEXT(kr)) { |
879 | for (tkr = kr; tkr != NULL((void*)0); tkr = tkr->next) { |
880 | if (tkr->r.ifindex == ifindex) { |
881 | if (isvalid) |
882 | tkr->r.flags &= ~F_DOWN0x0010; |
883 | else |
884 | tkr->r.flags |= F_DOWN0x0010; |
885 | |
886 | } |
887 | } |
888 | kr_redistribute(kr); |
889 | } |
890 | } |
891 | |
892 | void |
893 | if_newaddr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask, |
894 | struct sockaddr_in6 *brd) |
895 | { |
896 | struct iface *iface; |
897 | struct iface_addr *ia; |
898 | struct ifaddrchange ifc; |
899 | |
900 | if (ifa == NULL((void*)0) || ifa->sin6_family != AF_INET624) |
901 | return; |
902 | if ((iface = if_find(ifindex)) == NULL((void*)0)) { |
903 | log_warnx("if_newaddr: corresponding if %d not found", ifindex); |
904 | return; |
905 | } |
906 | |
907 | /* We only care about link-local and global-scope. */ |
908 | if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)) || |
909 | IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == (__uint32_t )(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t )(1) & 0xff000000) >> 24) : __swap32md(1)))) || |
910 | IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr)((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff ) || |
911 | IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe ) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8 [1] & 0xc0) == 0xc0)) || |
912 | IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == (__uint32_t)(__builtin_constant_p(0x0000ffff) ? (__uint32_t )(((__uint32_t)(0x0000ffff) & 0xff) << 24 | ((__uint32_t )(0x0000ffff) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff ) & 0xff0000) >> 8 | ((__uint32_t)(0x0000ffff) & 0xff000000) >> 24) : __swap32md(0x0000ffff)))) || |
913 | IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) != 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[12]) != (__uint32_t)(__builtin_constant_p (1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000 ) >> 24) : __swap32md(1))))) |
914 | return; |
915 | |
916 | clearscope(&ifa->sin6_addr); |
917 | |
918 | if (IN6_IS_ADDR_LINKLOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe ) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8 [1] & 0xc0) == 0x80)) || |
919 | iface->flags & IFF_LOOPBACK0x8) |
920 | iface->addr = ifa->sin6_addr; |
921 | |
922 | if ((ia = calloc(1, sizeof(struct iface_addr))) == NULL((void*)0)) |
923 | fatal("if_newaddr"); |
924 | |
925 | ia->addr = ifa->sin6_addr; |
926 | |
927 | if (mask) |
928 | ia->prefixlen = mask2prefixlen(mask); |
929 | else |
930 | ia->prefixlen = 0; |
931 | if (brd && brd->sin6_family == AF_INET624) |
932 | ia->dstbrd = brd->sin6_addr; |
933 | else |
934 | bzero(&ia->dstbrd, sizeof(ia->dstbrd)); |
935 | |
936 | switch (iface->type) { |
937 | case IF_TYPE_BROADCAST: |
938 | case IF_TYPE_NBMA: |
939 | log_debug("if_newaddr: ifindex %u, addr %s/%d", |
940 | ifindex, log_in6addr(&ia->addr), ia->prefixlen); |
941 | break; |
942 | case IF_TYPE_VIRTUALLINK: /* FIXME */ |
943 | break; |
944 | case IF_TYPE_POINTOPOINT: |
945 | case IF_TYPE_POINTOMULTIPOINT: |
946 | log_debug("if_newaddr: ifindex %u, addr %s/%d, " |
947 | "dest %s", ifindex, log_in6addr(&ia->addr), |
948 | ia->prefixlen, log_in6addr(&ia->dstbrd)); |
949 | break; |
950 | default: |
951 | fatalx("if_newaddr: unknown interface type"); |
952 | } |
953 | |
954 | TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry)do { (ia)->entry.tqe_next = ((void*)0); (ia)->entry.tqe_prev = (&iface->ifa_list)->tqh_last; *(&iface->ifa_list )->tqh_last = (ia); (&iface->ifa_list)->tqh_last = &(ia)->entry.tqe_next; } while (0); |
955 | /* inform engine and rde if interface is used */ |
956 | if (iface->cflags & F_IFACE_CONFIGURED0x02) { |
957 | ifc.addr = ia->addr; |
958 | ifc.dstbrd = ia->dstbrd; |
959 | ifc.prefixlen = ia->prefixlen; |
960 | ifc.ifindex = ifindex; |
961 | main_imsg_compose_ospfe(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc)); |
962 | main_imsg_compose_rde(IMSG_IFADDRNEW, 0, &ifc, sizeof(ifc)); |
963 | } |
964 | } |
965 | |
966 | void |
967 | if_deladdr(u_short ifindex, struct sockaddr_in6 *ifa, struct sockaddr_in6 *mask, |
968 | struct sockaddr_in6 *brd) |
969 | { |
970 | struct iface *iface; |
971 | struct iface_addr *ia, *nia; |
972 | struct ifaddrchange ifc; |
973 | |
974 | if (ifa == NULL((void*)0) || ifa->sin6_family != AF_INET624) |
975 | return; |
976 | if ((iface = if_find(ifindex)) == NULL((void*)0)) { |
977 | log_warnx("if_deladdr: corresponding if %d not found", ifindex); |
978 | return; |
979 | } |
980 | |
981 | /* We only care about link-local and global-scope. */ |
982 | if (IN6_IS_ADDR_UNSPECIFIED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)) || |
983 | IN6_IS_ADDR_LOOPBACK(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) == (__uint32_t )(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t )(1) & 0xff000000) >> 24) : __swap32md(1)))) || |
984 | IN6_IS_ADDR_MULTICAST(&ifa->sin6_addr)((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff ) || |
985 | IN6_IS_ADDR_SITELOCAL(&ifa->sin6_addr)(((&ifa->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xfe ) && (((&ifa->sin6_addr)->__u6_addr.__u6_addr8 [1] & 0xc0) == 0xc0)) || |
986 | IN6_IS_ADDR_V4MAPPED(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == (__uint32_t)(__builtin_constant_p(0x0000ffff) ? (__uint32_t )(((__uint32_t)(0x0000ffff) & 0xff) << 24 | ((__uint32_t )(0x0000ffff) & 0xff00) << 8 | ((__uint32_t)(0x0000ffff ) & 0xff0000) >> 8 | ((__uint32_t)(0x0000ffff) & 0xff000000) >> 24) : __swap32md(0x0000ffff)))) || |
987 | IN6_IS_ADDR_V4COMPAT(&ifa->sin6_addr)((*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &ifa->sin6_addr)->__u6_addr.__u6_addr8[12]) != 0) && (*(const u_int32_t *)(const void *)(&(&ifa->sin6_addr )->__u6_addr.__u6_addr8[12]) != (__uint32_t)(__builtin_constant_p (1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000 ) >> 24) : __swap32md(1))))) |
988 | return; |
989 | |
990 | clearscope(&ifa->sin6_addr); |
991 | |
992 | for (ia = TAILQ_FIRST(&iface->ifa_list)((&iface->ifa_list)->tqh_first); ia != NULL((void*)0); ia = nia) { |
993 | nia = TAILQ_NEXT(ia, entry)((ia)->entry.tqe_next); |
994 | |
995 | if (IN6_ARE_ADDR_EQUAL(&ia->addr, &ifa->sin6_addr)(memcmp(&(&ia->addr)->__u6_addr.__u6_addr8[0], & (&ifa->sin6_addr)->__u6_addr.__u6_addr8[0], sizeof( struct in6_addr)) == 0)) { |
996 | log_debug("if_deladdr: ifindex %u, addr %s/%d", |
997 | ifindex, log_in6addr(&ia->addr), ia->prefixlen); |
998 | TAILQ_REMOVE(&iface->ifa_list, ia, entry)do { if (((ia)->entry.tqe_next) != ((void*)0)) (ia)->entry .tqe_next->entry.tqe_prev = (ia)->entry.tqe_prev; else ( &iface->ifa_list)->tqh_last = (ia)->entry.tqe_prev ; *(ia)->entry.tqe_prev = (ia)->entry.tqe_next; ; ; } while (0); |
999 | /* inform engine and rde if interface is used */ |
1000 | if (iface->cflags & F_IFACE_CONFIGURED0x02) { |
1001 | ifc.addr = ia->addr; |
1002 | ifc.dstbrd = ia->dstbrd; |
1003 | ifc.prefixlen = ia->prefixlen; |
1004 | ifc.ifindex = ifindex; |
1005 | main_imsg_compose_ospfe(IMSG_IFADDRDEL, 0, &ifc, |
1006 | sizeof(ifc)); |
1007 | main_imsg_compose_rde(IMSG_IFADDRDEL, 0, &ifc, |
1008 | sizeof(ifc)); |
1009 | } |
1010 | free(ia); |
1011 | return; |
1012 | } |
1013 | } |
1014 | } |
1015 | |
1016 | void |
1017 | if_announce(void *msg) |
1018 | { |
1019 | struct if_announcemsghdr *ifan; |
1020 | struct iface *iface; |
1021 | |
1022 | ifan = msg; |
1023 | |
1024 | switch (ifan->ifan_what) { |
1025 | case IFAN_ARRIVAL0: |
1026 | if ((iface = if_new(ifan->ifan_index, ifan->ifan_name)) == NULL((void*)0)) |
1027 | fatal("if_announce failed"); |
1028 | break; |
1029 | case IFAN_DEPARTURE1: |
1030 | iface = if_find(ifan->ifan_index); |
1031 | if_del(iface); |
1032 | break; |
1033 | } |
1034 | } |
1035 | |
1036 | /* rtsock */ |
1037 | int |
1038 | send_rtmsg(int fd, int action, struct kroute *kroute) |
1039 | { |
1040 | struct iovec iov[5]; |
1041 | struct rt_msghdr hdr; |
1042 | struct pad { |
1043 | struct sockaddr_in6 addr; |
1044 | char pad[sizeof(long)]; /* thank you IPv6 */ |
1045 | } prefix, nexthop, mask; |
1046 | struct { |
1047 | struct sockaddr_dl addr; |
1048 | char pad[sizeof(long)]; |
1049 | } ifp; |
1050 | struct sockaddr_rtlabel sa_rl; |
1051 | int iovcnt = 0; |
1052 | const char *label; |
1053 | |
1054 | if (kr_state.fib_sync == 0) |
1055 | return (0); |
1056 | |
1057 | /* initialize header */ |
1058 | bzero(&hdr, sizeof(hdr)); |
1059 | hdr.rtm_version = RTM_VERSION5; |
1060 | hdr.rtm_type = action; |
1061 | hdr.rtm_priority = kr_state.fib_prio; |
1062 | hdr.rtm_tableid = kr_state.rdomain; /* rtableid */ |
1063 | if (action == RTM_CHANGE0x3) |
1064 | hdr.rtm_fmask = RTF_REJECT0x8|RTF_BLACKHOLE0x1000; |
1065 | else |
1066 | hdr.rtm_flags = RTF_MPATH0x40000; |
1067 | hdr.rtm_seq = kr_state.rtseq++; /* overflow doesn't matter */ |
1068 | hdr.rtm_hdrlen = sizeof(hdr); |
1069 | hdr.rtm_msglen = sizeof(hdr); |
1070 | /* adjust iovec */ |
1071 | iov[iovcnt].iov_base = &hdr; |
1072 | iov[iovcnt++].iov_len = sizeof(hdr); |
1073 | |
1074 | bzero(&prefix, sizeof(prefix)); |
1075 | prefix.addr.sin6_len = sizeof(struct sockaddr_in6); |
1076 | prefix.addr.sin6_family = AF_INET624; |
1077 | prefix.addr.sin6_addr = kroute->prefix; |
1078 | /* adjust header */ |
1079 | hdr.rtm_addrs |= RTA_DST0x1; |
1080 | hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1081 | /* adjust iovec */ |
1082 | iov[iovcnt].iov_base = &prefix; |
1083 | iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1084 | |
1085 | if (!IN6_IS_ADDR_UNSPECIFIED(&kroute->nexthop)((*(const u_int32_t *)(const void *)(&(&kroute->nexthop )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&kroute->nexthop)->__u6_addr.__u6_addr8[8 ]) == 0) && (*(const u_int32_t *)(const void *)(& (&kroute->nexthop)->__u6_addr.__u6_addr8[12]) == 0) )) { |
1086 | bzero(&nexthop, sizeof(nexthop)); |
1087 | nexthop.addr.sin6_len = sizeof(struct sockaddr_in6); |
1088 | nexthop.addr.sin6_family = AF_INET624; |
1089 | nexthop.addr.sin6_addr = kroute->nexthop; |
1090 | nexthop.addr.sin6_scope_id = kroute->scope; |
1091 | /* |
1092 | * XXX we should set the sin6_scope_id but the kernel |
1093 | * XXX does not expect it that way. It must be fiddled |
1094 | * XXX into the sin6_addr. Welcome to the typical |
1095 | * XXX IPv6 insanity and all without wine bottles. |
1096 | */ |
1097 | embedscope(&nexthop.addr); |
1098 | |
1099 | /* adjust header */ |
1100 | hdr.rtm_flags |= RTF_GATEWAY0x2; |
1101 | hdr.rtm_addrs |= RTA_GATEWAY0x2; |
1102 | hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1103 | /* adjust iovec */ |
1104 | iov[iovcnt].iov_base = &nexthop; |
1105 | iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1106 | } else if (kroute->ifindex) { |
1107 | /* |
1108 | * We don't have an interface address in that network, |
1109 | * so we install a cloning route. The kernel will then |
1110 | * do neighbor discovery. |
1111 | */ |
1112 | bzero(&ifp, sizeof(ifp)); |
1113 | ifp.addr.sdl_len = sizeof(struct sockaddr_dl); |
1114 | ifp.addr.sdl_family = AF_LINK18; |
1115 | |
1116 | ifp.addr.sdl_index = kroute->ifindex; |
1117 | /* adjust header */ |
1118 | hdr.rtm_flags |= RTF_CLONING0x100; |
1119 | hdr.rtm_addrs |= RTA_GATEWAY0x2; |
1120 | hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_dl))((sizeof(struct sockaddr_dl)) > 0 ? (1 + (((sizeof(struct sockaddr_dl )) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1121 | /* adjust iovec */ |
1122 | iov[iovcnt].iov_base = &ifp; |
1123 | iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_dl))((sizeof(struct sockaddr_dl)) > 0 ? (1 + (((sizeof(struct sockaddr_dl )) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1124 | } |
1125 | |
1126 | bzero(&mask, sizeof(mask)); |
1127 | mask.addr.sin6_len = sizeof(struct sockaddr_in6); |
1128 | mask.addr.sin6_family = AF_INET624; |
1129 | mask.addr.sin6_addr = *prefixlen2mask(kroute->prefixlen); |
1130 | /* adjust header */ |
1131 | if (kroute->prefixlen == 128) |
1132 | hdr.rtm_flags |= RTF_HOST0x4; |
1133 | hdr.rtm_addrs |= RTA_NETMASK0x4; |
1134 | hdr.rtm_msglen += ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1135 | /* adjust iovec */ |
1136 | iov[iovcnt].iov_base = &mask; |
1137 | iov[iovcnt++].iov_len = ROUNDUP(sizeof(struct sockaddr_in6))((sizeof(struct sockaddr_in6)) > 0 ? (1 + (((sizeof(struct sockaddr_in6)) - 1) | (sizeof(long) - 1))) : sizeof(long)); |
1138 | |
1139 | if (kroute->rtlabel != 0) { |
1140 | sa_rl.sr_len = sizeof(sa_rl); |
1141 | sa_rl.sr_family = AF_UNSPEC0; |
1142 | label = rtlabel_id2name(kroute->rtlabel); |
1143 | if (strlcpy(sa_rl.sr_label, label, |
1144 | sizeof(sa_rl.sr_label)) >= sizeof(sa_rl.sr_label)) { |
1145 | log_warnx("send_rtmsg: invalid rtlabel"); |
1146 | return (-1); |
1147 | } |
1148 | /* adjust header */ |
1149 | hdr.rtm_addrs |= RTA_LABEL0x400; |
1150 | hdr.rtm_msglen += sizeof(sa_rl); |
1151 | /* adjust iovec */ |
1152 | iov[iovcnt].iov_base = &sa_rl; |
1153 | iov[iovcnt++].iov_len = sizeof(sa_rl); |
1154 | } |
1155 | |
1156 | retry: |
1157 | if (writev(fd, iov, iovcnt) == -1) { |
1158 | if (errno(*__errno()) == ESRCH3) { |
1159 | if (hdr.rtm_type == RTM_CHANGE0x3) { |
1160 | hdr.rtm_type = RTM_ADD0x1; |
1161 | goto retry; |
1162 | } else if (hdr.rtm_type == RTM_DELETE0x2) { |
1163 | log_info("route %s/%u vanished before delete", |
1164 | log_sockaddr(&prefix), kroute->prefixlen); |
1165 | return (0); |
1166 | } |
1167 | } |
1168 | log_warn("send_rtmsg: action %u, prefix %s/%u", hdr.rtm_type, |
1169 | log_sockaddr(&prefix), kroute->prefixlen); |
1170 | return (0); |
1171 | } |
1172 | |
1173 | return (0); |
1174 | } |
1175 | |
1176 | int |
1177 | fetchtable(void) |
1178 | { |
1179 | size_t len; |
1180 | int mib[7]; |
1181 | char *buf; |
1182 | int rv; |
1183 | |
1184 | mib[0] = CTL_NET4; |
1185 | mib[1] = PF_ROUTE17; |
1186 | mib[2] = 0; |
1187 | mib[3] = AF_INET624; |
1188 | mib[4] = NET_RT_DUMP1; |
1189 | mib[5] = 0; |
1190 | mib[6] = kr_state.rdomain; /* rtableid */ |
1191 | |
1192 | if (sysctl(mib, 7, NULL((void*)0), &len, NULL((void*)0), 0) == -1) { |
1193 | log_warn("sysctl"); |
1194 | return (-1); |
1195 | } |
1196 | if ((buf = malloc(len)) == NULL((void*)0)) { |
1197 | log_warn("fetchtable"); |
1198 | return (-1); |
1199 | } |
1200 | if (sysctl(mib, 7, buf, &len, NULL((void*)0), 0) == -1) { |
1201 | log_warn("sysctl"); |
1202 | free(buf); |
1203 | return (-1); |
1204 | } |
1205 | |
1206 | rv = rtmsg_process(buf, len); |
1207 | free(buf); |
1208 | |
1209 | return (rv); |
1210 | } |
1211 | |
1212 | int |
1213 | fetchifs(u_short ifindex) |
1214 | { |
1215 | size_t len; |
1216 | int mib[6]; |
1217 | char *buf; |
1218 | int rv; |
1219 | |
1220 | mib[0] = CTL_NET4; |
1221 | mib[1] = PF_ROUTE17; |
1222 | mib[2] = 0; |
1223 | mib[3] = AF_INET624; |
1224 | mib[4] = NET_RT_IFLIST3; |
1225 | mib[5] = ifindex; |
1226 | |
1227 | if (sysctl(mib, 6, NULL((void*)0), &len, NULL((void*)0), 0) == -1) { |
1228 | log_warn("sysctl"); |
1229 | return (-1); |
1230 | } |
1231 | if ((buf = malloc(len)) == NULL((void*)0)) { |
1232 | log_warn("fetchifs"); |
1233 | return (-1); |
1234 | } |
1235 | if (sysctl(mib, 6, buf, &len, NULL((void*)0), 0) == -1) { |
1236 | log_warn("sysctl"); |
1237 | free(buf); |
1238 | return (-1); |
1239 | } |
1240 | |
1241 | rv = rtmsg_process(buf, len); |
1242 | free(buf); |
1243 | |
1244 | return (rv); |
1245 | } |
1246 | |
1247 | int |
1248 | dispatch_rtmsg(void) |
1249 | { |
1250 | char buf[RT_BUF_SIZE16384]; |
1251 | ssize_t n; |
1252 | |
1253 | if ((n = read(kr_state.fd, &buf, sizeof(buf))) == -1) { |
1254 | if (errno(*__errno()) == EAGAIN35 || errno(*__errno()) == EINTR4) |
1255 | return (0); |
1256 | log_warn("dispatch_rtmsg: read error"); |
1257 | return (-1); |
1258 | } |
1259 | |
1260 | if (n == 0) { |
1261 | log_warnx("routing socket closed"); |
1262 | return (-1); |
1263 | } |
1264 | |
1265 | return (rtmsg_process(buf, n)); |
1266 | } |
1267 | |
1268 | int |
1269 | rtmsg_process(char *buf, size_t len) |
1270 | { |
1271 | struct rt_msghdr *rtm; |
1272 | struct if_msghdr ifm; |
1273 | struct ifa_msghdr *ifam; |
1274 | struct sockaddr *sa, *rti_info[RTAX_MAX15]; |
1275 | struct sockaddr_in6 *sa_in6; |
1276 | struct sockaddr_rtlabel *label; |
1277 | struct kroute_node *kr, *okr; |
1278 | struct in6_addr prefix, nexthop; |
1279 | u_int8_t prefixlen, prio; |
1280 | int flags, mpath; |
1281 | unsigned int scope; |
1282 | u_short ifindex = 0; |
1283 | int rv; |
1284 | size_t offset; |
1285 | char *next; |
1286 | |
1287 | for (offset = 0; offset < len; offset += rtm->rtm_msglen) { |
1288 | next = buf + offset; |
1289 | rtm = (struct rt_msghdr *)next; |
1290 | if (len < offset + sizeof(u_short) || |
1291 | len < offset + rtm->rtm_msglen) |
1292 | fatalx("rtmsg_process: partial rtm in buffer"); |
1293 | if (rtm->rtm_version != RTM_VERSION5) |
1294 | continue; |
1295 | |
1296 | bzero(&prefix, sizeof(prefix)); |
1297 | bzero(&nexthop, sizeof(nexthop)); |
1298 | scope = 0; |
1299 | prefixlen = 0; |
1300 | flags = F_KERNEL0x0002; |
1301 | mpath = 0; |
1302 | prio = 0; |
1303 | |
1304 | sa = (struct sockaddr *)(next + rtm->rtm_hdrlen); |
1305 | get_rtaddrs(rtm->rtm_addrs, sa, rti_info); |
1306 | |
1307 | switch (rtm->rtm_type) { |
1308 | case RTM_ADD0x1: |
1309 | case RTM_GET0x4: |
1310 | case RTM_CHANGE0x3: |
1311 | case RTM_DELETE0x2: |
1312 | if (rtm->rtm_errno) /* failed attempts... */ |
1313 | continue; |
1314 | |
1315 | if (rtm->rtm_tableid != kr_state.rdomain) |
1316 | continue; |
1317 | |
1318 | if (rtm->rtm_type == RTM_GET0x4 && |
1319 | rtm->rtm_pid != kr_state.pid) /* caused by us */ |
1320 | continue; |
1321 | |
1322 | if ((sa = rti_info[RTAX_DST0]) == NULL((void*)0)) |
1323 | continue; |
1324 | |
1325 | /* Skip ARP/ND cache and broadcast routes. */ |
1326 | if (rtm->rtm_flags & (RTF_LLINFO0x400|RTF_BROADCAST0x400000)) |
1327 | continue; |
1328 | |
1329 | if (rtm->rtm_flags & RTF_MPATH0x40000) |
1330 | mpath = 1; |
1331 | prio = rtm->rtm_priority; |
1332 | flags = (prio == kr_state.fib_prio) ? |
1333 | F_OSPFD_INSERTED0x0001 : F_KERNEL0x0002; |
1334 | |
1335 | switch (sa->sa_family) { |
1336 | case AF_INET624: |
1337 | prefix = |
1338 | ((struct sockaddr_in6 *)sa)->sin6_addr; |
1339 | sa_in6 = (struct sockaddr_in6 *) |
1340 | rti_info[RTAX_NETMASK2]; |
1341 | if (sa_in6 != NULL((void*)0)) { |
1342 | if (sa_in6->sin6_len != 0) |
1343 | prefixlen = mask2prefixlen( |
1344 | sa_in6); |
1345 | } else if (rtm->rtm_flags & RTF_HOST0x4) |
1346 | prefixlen = 128; |
1347 | else |
1348 | fatalx("classful IPv6 address?!!"); |
1349 | if (rtm->rtm_flags & RTF_STATIC0x800) |
1350 | flags |= F_STATIC0x0020; |
1351 | if (rtm->rtm_flags & RTF_BLACKHOLE0x1000) |
1352 | flags |= F_BLACKHOLE0x0100; |
1353 | if (rtm->rtm_flags & RTF_REJECT0x8) |
1354 | flags |= F_REJECT0x0080; |
1355 | if (rtm->rtm_flags & RTF_DYNAMIC0x10) |
1356 | flags |= F_DYNAMIC0x0040; |
1357 | break; |
1358 | default: |
1359 | continue; |
1360 | } |
1361 | |
1362 | ifindex = rtm->rtm_index; |
1363 | if ((sa = rti_info[RTAX_GATEWAY1]) != NULL((void*)0)) { |
1364 | switch (sa->sa_family) { |
1365 | case AF_INET624: |
1366 | if (rtm->rtm_flags & RTF_CONNECTED0x800000) |
1367 | flags |= F_CONNECTED0x0008; |
1368 | |
1369 | sa_in6 = (struct sockaddr_in6 *)sa; |
1370 | /* |
1371 | * XXX The kernel provides the scope |
1372 | * XXX via the kame hack instead of |
1373 | * XXX the scope_id field. |
1374 | */ |
1375 | recoverscope(sa_in6); |
1376 | nexthop = sa_in6->sin6_addr; |
1377 | scope = sa_in6->sin6_scope_id; |
1378 | break; |
1379 | case AF_LINK18: |
1380 | flags |= F_CONNECTED0x0008; |
1381 | break; |
1382 | } |
1383 | } |
1384 | } |
1385 | |
1386 | switch (rtm->rtm_type) { |
1387 | case RTM_ADD0x1: |
1388 | case RTM_GET0x4: |
1389 | case RTM_CHANGE0x3: |
1390 | if (IN6_IS_ADDR_UNSPECIFIED(&nexthop)((*(const u_int32_t *)(const void *)(&(&nexthop)-> __u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t * )(const void *)(&(&nexthop)->__u6_addr.__u6_addr8[ 4]) == 0) && (*(const u_int32_t *)(const void *)(& (&nexthop)->__u6_addr.__u6_addr8[8]) == 0) && ( *(const u_int32_t *)(const void *)(&(&nexthop)->__u6_addr .__u6_addr8[12]) == 0)) && |
1391 | !(flags & F_CONNECTED0x0008)) { |
1392 | log_warnx("rtmsg_process no nexthop for %s/%u", |
1393 | log_in6addr(&prefix), prefixlen); |
1394 | continue; |
1395 | } |
1396 | |
1397 | if ((okr = kroute_find(&prefix, prefixlen, prio)) |
1398 | != NULL((void*)0)) { |
1399 | /* just add new multipath routes */ |
1400 | if (mpath && rtm->rtm_type == RTM_ADD0x1) |
1401 | goto add; |
1402 | /* get the correct route */ |
1403 | kr = okr; |
1404 | if (mpath && (kr = kroute_matchgw(okr, |
1405 | &nexthop, scope)) == NULL((void*)0)) { |
1406 | log_warnx("rtmsg_process: mpath route" |
1407 | " not found"); |
1408 | /* add routes we missed out earlier */ |
1409 | goto add; |
1410 | } |
1411 | |
1412 | if (kr->r.flags & F_REDISTRIBUTED0x0200) |
1413 | flags |= F_REDISTRIBUTED0x0200; |
1414 | kr->r.nexthop = nexthop; |
1415 | kr->r.scope = scope; |
1416 | kr->r.flags = flags; |
1417 | kr->r.ifindex = ifindex; |
1418 | |
1419 | rtlabel_unref(kr->r.rtlabel); |
1420 | kr->r.rtlabel = 0; |
1421 | kr->r.ext_tag = 0; |
1422 | if ((label = (struct sockaddr_rtlabel *) |
1423 | rti_info[RTAX_LABEL10]) != NULL((void*)0)) { |
1424 | kr->r.rtlabel = |
1425 | rtlabel_name2id(label->sr_label); |
1426 | kr->r.ext_tag = |
1427 | rtlabel_id2tag(kr->r.rtlabel); |
1428 | } |
1429 | |
1430 | if (kif_validate(kr->r.ifindex)) |
1431 | kr->r.flags &= ~F_DOWN0x0010; |
1432 | else |
1433 | kr->r.flags |= F_DOWN0x0010; |
1434 | |
1435 | /* just readd, the RDE will care */ |
1436 | kr_redistribute(okr); |
1437 | } else { |
1438 | add: |
1439 | if ((kr = calloc(1, |
1440 | sizeof(struct kroute_node))) == NULL((void*)0)) { |
1441 | log_warn("rtmsg_process calloc"); |
1442 | return (-1); |
1443 | } |
1444 | kr->r.prefix = prefix; |
1445 | kr->r.prefixlen = prefixlen; |
1446 | kr->r.nexthop = nexthop; |
1447 | kr->r.scope = scope; |
1448 | kr->r.flags = flags; |
1449 | kr->r.ifindex = ifindex; |
1450 | kr->r.priority = prio; |
1451 | |
1452 | if (rtm->rtm_priority == kr_state.fib_prio) { |
1453 | log_warnx("alien OSPF route %s/%d", |
1454 | log_in6addr(&prefix), prefixlen); |
1455 | rv = send_rtmsg(kr_state.fd, |
1456 | RTM_DELETE0x2, &kr->r); |
1457 | free(kr); |
1458 | if (rv == -1) |
1459 | return (-1); |
1460 | } else { |
1461 | if ((label = (struct sockaddr_rtlabel *) |
1462 | rti_info[RTAX_LABEL10]) != NULL((void*)0)) { |
1463 | kr->r.rtlabel = |
1464 | rtlabel_name2id( |
1465 | label->sr_label); |
1466 | kr->r.ext_tag = |
1467 | rtlabel_id2tag( |
1468 | kr->r.rtlabel); |
1469 | } |
1470 | |
1471 | kroute_insert(kr); |
1472 | } |
1473 | } |
1474 | break; |
1475 | case RTM_DELETE0x2: |
1476 | if ((kr = kroute_find(&prefix, prefixlen, prio)) == |
1477 | NULL((void*)0)) |
1478 | continue; |
1479 | if (!(kr->r.flags & F_KERNEL0x0002)) |
1480 | continue; |
1481 | /* get the correct route */ |
1482 | okr = kr; |
Value stored to 'okr' is never read | |
1483 | if (mpath && (kr = kroute_matchgw(kr, &nexthop, |
1484 | scope)) == NULL((void*)0)) { |
1485 | log_warnx("rtmsg_process mpath route" |
1486 | " not found"); |
1487 | return (-1); |
1488 | } |
1489 | if (kroute_remove(kr) == -1) |
1490 | return (-1); |
1491 | break; |
1492 | case RTM_IFINFO0xe: |
1493 | memcpy(&ifm, next, sizeof(ifm)); |
1494 | if_change(ifm.ifm_index, ifm.ifm_flags, &ifm.ifm_data, |
1495 | (struct sockaddr_dl *)rti_info[RTAX_IFP4]); |
1496 | break; |
1497 | case RTM_NEWADDR0xc: |
1498 | ifam = (struct ifa_msghdr *)rtm; |
1499 | if ((ifam->ifam_addrs & (RTA_NETMASK0x4 | RTA_IFA0x20 | |
1500 | RTA_BRD0x80)) == 0) |
1501 | break; |
1502 | |
1503 | if_newaddr(ifam->ifam_index, |
1504 | (struct sockaddr_in6 *)rti_info[RTAX_IFA5], |
1505 | (struct sockaddr_in6 *)rti_info[RTAX_NETMASK2], |
1506 | (struct sockaddr_in6 *)rti_info[RTAX_BRD7]); |
1507 | break; |
1508 | case RTM_DELADDR0xd: |
1509 | ifam = (struct ifa_msghdr *)rtm; |
1510 | if ((ifam->ifam_addrs & (RTA_NETMASK0x4 | RTA_IFA0x20 | |
1511 | RTA_BRD0x80)) == 0) |
1512 | break; |
1513 | |
1514 | if_deladdr(ifam->ifam_index, |
1515 | (struct sockaddr_in6 *)rti_info[RTAX_IFA5], |
1516 | (struct sockaddr_in6 *)rti_info[RTAX_NETMASK2], |
1517 | (struct sockaddr_in6 *)rti_info[RTAX_BRD7]); |
1518 | break; |
1519 | case RTM_IFANNOUNCE0xf: |
1520 | if_announce(next); |
1521 | break; |
1522 | default: |
1523 | /* ignore for now */ |
1524 | break; |
1525 | } |
1526 | } |
1527 | return (offset); |
1528 | } |