File: | src/usr.sbin/ospf6d/rde_lsdb.c |
Warning: | line 40, column 1 Use of memory after it is freed |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: rde_lsdb.c,v 1.47 2020/10/04 07:24:46 denis Exp $ */ | |||
2 | ||||
3 | /* | |||
4 | * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> | |||
5 | * | |||
6 | * Permission to use, copy, modify, and distribute this software for any | |||
7 | * purpose with or without fee is hereby granted, provided that the above | |||
8 | * copyright notice and this permission notice appear in all copies. | |||
9 | * | |||
10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |||
11 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |||
12 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |||
13 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |||
14 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |||
15 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |||
16 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |||
17 | */ | |||
18 | ||||
19 | #include <sys/types.h> | |||
20 | #include <sys/tree.h> | |||
21 | #include <stdlib.h> | |||
22 | #include <string.h> | |||
23 | #include <unistd.h> | |||
24 | ||||
25 | #include "ospf6.h" | |||
26 | #include "ospf6d.h" | |||
27 | #include "rde.h" | |||
28 | #include "log.h" | |||
29 | ||||
30 | struct vertex *vertex_get(struct lsa *, struct rde_nbr *, struct lsa_tree *); | |||
31 | ||||
32 | int lsa_link_check(struct lsa *, u_int16_t); | |||
33 | int lsa_intra_a_pref_check(struct lsa *, u_int16_t); | |||
34 | int lsa_asext_check(struct lsa *, u_int16_t); | |||
35 | void lsa_timeout(int, short, void *); | |||
36 | void lsa_refresh(struct vertex *); | |||
37 | int lsa_equal(struct lsa *, struct lsa *); | |||
38 | int lsa_get_prefix(void *, u_int16_t, struct rt_prefix *); | |||
39 | ||||
40 | RB_GENERATE(lsa_tree, vertex, entry, lsa_compare)void lsa_tree_RB_INSERT_COLOR(struct lsa_tree *head, struct vertex *elm) { struct vertex *parent, *gparent, *tmp; while ((parent = (elm)->entry.rbe_parent) && (parent)->entry. rbe_color == 1) { gparent = (parent)->entry.rbe_parent; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent)-> entry.rbe_right; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry .rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0 ); elm = gparent; continue; } if ((parent)->entry.rbe_right == elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left; if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } else { tmp = (gparent)-> entry.rbe_left; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry. rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0) ; elm = gparent; continue; } if ((parent)->entry.rbe_left == elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent )->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)-> entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right ; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } } (head->rbh_root)-> entry.rbe_color = 0; } void lsa_tree_RB_REMOVE_COLOR(struct lsa_tree *head, struct vertex *parent, struct vertex *elm) { struct vertex *tmp; while ((elm == ((void*)0) || (elm)->entry.rbe_color == 0) && elm != (head)->rbh_root) { if ((parent)-> entry.rbe_left == elm) { tmp = (parent)->entry.rbe_right; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.rbe_color = 1; } while (0); do { (tmp ) = (parent)->entry.rbe_right; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry.rbe_left)-> entry.rbe_parent = (parent); } do {} while (0); if (((tmp)-> entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent ) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent )->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent )->entry.rbe_parent)->entry.rbe_right = (tmp); } else ( head)->rbh_root = (tmp); (tmp)->entry.rbe_left = (parent ); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp )->entry.rbe_color = 1; elm = parent; parent = (elm)->entry .rbe_parent; } else { if ((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0) { struct vertex *oleft; if ((oleft = (tmp)->entry.rbe_left )) (oleft)->entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)->entry.rbe_left; if (((tmp)-> entry.rbe_left = (oleft)->entry.rbe_right)) { ((oleft)-> entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while ( 0); if (((oleft)->entry.rbe_parent = (tmp)->entry.rbe_parent )) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left ) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent)->entry.rbe_right = (oleft); } else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry.rbe_parent = (oleft); do {} while ( 0); if (((oleft)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_right) ((tmp)->entry.rbe_right )->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right ; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); elm = (head)->rbh_root; break ; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)-> entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; ( parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent )->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp )->entry.rbe_right)) { ((tmp)->entry.rbe_right)->entry .rbe_parent = (parent); } do {} while (0); if (((tmp)->entry .rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent )->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent )->entry.rbe_parent)->entry.rbe_right = (tmp); } else ( head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent ); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void*)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp )->entry.rbe_color = 1; elm = parent; parent = (elm)->entry .rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void* )0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) { struct vertex *oright; if ((oright = (tmp)->entry.rbe_right )) (oright)->entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright) = (tmp)->entry.rbe_right; if (((tmp)-> entry.rbe_right = (oright)->entry.rbe_left)) { ((oright)-> entry.rbe_left)->entry.rbe_parent = (tmp); } do {} while ( 0); if (((oright)->entry.rbe_parent = (tmp)->entry.rbe_parent )) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left ) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oright); else ((tmp)->entry.rbe_parent)->entry.rbe_right = (oright ); } else (head)->rbh_root = (oright); (oright)->entry. rbe_left = (tmp); (tmp)->entry.rbe_parent = (oright); do { } while (0); if (((oright)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left; } (tmp) ->entry.rbe_color = (parent)->entry.rbe_color; (parent) ->entry.rbe_color = 0; if ((tmp)->entry.rbe_left) ((tmp )->entry.rbe_left)->entry.rbe_color = 0; do { (tmp) = ( parent)->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->entry.rbe_right)-> entry.rbe_parent = (parent); } do {} while (0); if (((tmp)-> entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent ) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent )->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent )->entry.rbe_parent)->entry.rbe_right = (tmp); } else ( head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent ); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); elm = (head)->rbh_root; break; } } } if (elm) (elm)->entry .rbe_color = 0; } struct vertex * lsa_tree_RB_REMOVE(struct lsa_tree *head, struct vertex *elm) { struct vertex *child, *parent, * old = elm; int color; if ((elm)->entry.rbe_left == ((void* )0)) child = (elm)->entry.rbe_right; else if ((elm)->entry .rbe_right == ((void*)0)) child = (elm)->entry.rbe_left; else { struct vertex *left; elm = (elm)->entry.rbe_right; while ((left = (elm)->entry.rbe_left)) elm = left; child = (elm )->entry.rbe_right; parent = (elm)->entry.rbe_parent; color = (elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm ) (parent)->entry.rbe_left = child; else (parent)->entry .rbe_right = child; do {} while (0); } else (head)->rbh_root = child; if ((elm)->entry.rbe_parent == old) parent = elm ; (elm)->entry = (old)->entry; if ((old)->entry.rbe_parent ) { if (((old)->entry.rbe_parent)->entry.rbe_left == old ) ((old)->entry.rbe_parent)->entry.rbe_left = elm; else ((old)->entry.rbe_parent)->entry.rbe_right = elm; do { } while (0); } else (head)->rbh_root = elm; ((old)->entry .rbe_left)->entry.rbe_parent = elm; if ((old)->entry.rbe_right ) ((old)->entry.rbe_right)->entry.rbe_parent = elm; if ( parent) { left = parent; do { do {} while (0); } while ((left = (left)->entry.rbe_parent)); } goto color; } parent = (elm )->entry.rbe_parent; color = (elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm) (parent)->entry.rbe_left = child; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child; color: if (color == 0) lsa_tree_RB_REMOVE_COLOR(head, parent, child); return (old ); } struct vertex * lsa_tree_RB_INSERT(struct lsa_tree *head , struct vertex *elm) { struct vertex *tmp; struct vertex *parent = ((void*)0); int comp = 0; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp = (lsa_compare)(elm, parent); if ( comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left = (elm)->entry.rbe_right = ((void*)0); (elm)->entry.rbe_color = 1; } while (0); if (parent != ((void*)0)) { if (comp < 0 ) (parent)->entry.rbe_left = elm; else (parent)->entry. rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; lsa_tree_RB_INSERT_COLOR(head, elm); return (((void*)0) ); } struct vertex * lsa_tree_RB_FIND(struct lsa_tree *head, struct vertex *elm) { struct vertex *tmp = (head)->rbh_root; int comp; while (tmp) { comp = lsa_compare(elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return ((( void*)0)); } struct vertex * lsa_tree_RB_NFIND(struct lsa_tree *head, struct vertex *elm) { struct vertex *tmp = (head)-> rbh_root; struct vertex *res = ((void*)0); int comp; while (tmp ) { comp = lsa_compare(elm, tmp); if (comp < 0) { res = tmp ; tmp = (tmp)->entry.rbe_left; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (res ); } struct vertex * lsa_tree_RB_NEXT(struct vertex *elm) { if ((elm)->entry.rbe_right) { elm = (elm)->entry.rbe_right ; while ((elm)->entry.rbe_left) elm = (elm)->entry.rbe_left ; } else { if ((elm)->entry.rbe_parent && (elm == ( (elm)->entry.rbe_parent)->entry.rbe_left)) elm = (elm)-> entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent ; } } return (elm); } struct vertex * lsa_tree_RB_PREV(struct vertex *elm) { if ((elm)->entry.rbe_left) { elm = (elm)-> entry.rbe_left; while ((elm)->entry.rbe_right) elm = (elm) ->entry.rbe_right; } else { if ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_right )) elm = (elm)->entry.rbe_parent; else { while ((elm)-> entry.rbe_parent && (elm == ((elm)->entry.rbe_parent )->entry.rbe_left)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent; } } return (elm); } struct vertex * lsa_tree_RB_MINMAX(struct lsa_tree *head, int val) { struct vertex *tmp = (head)->rbh_root; struct vertex *parent = ( (void*)0); while (tmp) { parent = tmp; if (val < 0) tmp = ( tmp)->entry.rbe_left; else tmp = (tmp)->entry.rbe_right ; } return (parent); } | |||
| ||||
41 | ||||
42 | void | |||
43 | lsa_init(struct lsa_tree *t) | |||
44 | { | |||
45 | RB_INIT(t)do { (t)->rbh_root = ((void*)0); } while (0); | |||
46 | } | |||
47 | ||||
48 | int | |||
49 | lsa_compare(struct vertex *a, struct vertex *b) | |||
50 | { | |||
51 | if (a->type < b->type) | |||
52 | return (-1); | |||
53 | if (a->type > b->type) | |||
54 | return (1); | |||
55 | if (a->adv_rtr < b->adv_rtr) | |||
56 | return (-1); | |||
57 | if (a->adv_rtr > b->adv_rtr) | |||
58 | return (1); | |||
59 | if (a->ls_id < b->ls_id) | |||
60 | return (-1); | |||
61 | if (a->ls_id > b->ls_id) | |||
62 | return (1); | |||
63 | return (0); | |||
64 | } | |||
65 | ||||
66 | ||||
67 | struct vertex * | |||
68 | vertex_get(struct lsa *lsa, struct rde_nbr *nbr, struct lsa_tree *tree) | |||
69 | { | |||
70 | struct vertex *v; | |||
71 | struct timespec tp; | |||
72 | ||||
73 | if ((v = calloc(1, sizeof(struct vertex))) == NULL((void*)0)) | |||
74 | fatal(NULL((void*)0)); | |||
75 | TAILQ_INIT(&v->nexthop)do { (&v->nexthop)->tqh_first = ((void*)0); (&v ->nexthop)->tqh_last = &(&v->nexthop)->tqh_first ; } while (0); | |||
76 | v->area = nbr->area; | |||
77 | v->peerid = nbr->peerid; | |||
78 | v->lsa = lsa; | |||
79 | clock_gettime(CLOCK_MONOTONIC3, &tp); | |||
80 | v->changed = v->stamp = tp.tv_sec; | |||
81 | v->cost = LS_INFINITY0xffffff; | |||
82 | v->ls_id = ntohl(lsa->hdr.ls_id)(__uint32_t)(__builtin_constant_p(lsa->hdr.ls_id) ? (__uint32_t )(((__uint32_t)(lsa->hdr.ls_id) & 0xff) << 24 | ( (__uint32_t)(lsa->hdr.ls_id) & 0xff00) << 8 | (( __uint32_t)(lsa->hdr.ls_id) & 0xff0000) >> 8 | ( (__uint32_t)(lsa->hdr.ls_id) & 0xff000000) >> 24 ) : __swap32md(lsa->hdr.ls_id)); | |||
83 | v->adv_rtr = ntohl(lsa->hdr.adv_rtr)(__uint32_t)(__builtin_constant_p(lsa->hdr.adv_rtr) ? (__uint32_t )(((__uint32_t)(lsa->hdr.adv_rtr) & 0xff) << 24 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff00) << 8 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff000000) >> 24) : __swap32md(lsa->hdr.adv_rtr)); | |||
84 | v->type = ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type)); | |||
85 | v->lsa_tree = tree; | |||
86 | ||||
87 | if (!nbr->self) | |||
88 | v->flooded = 1; /* XXX fix me */ | |||
89 | v->self = nbr->self; | |||
90 | ||||
91 | evtimer_set(&v->ev, lsa_timeout, v)event_set(&v->ev, -1, 0, lsa_timeout, v); | |||
92 | ||||
93 | return (v); | |||
94 | } | |||
95 | ||||
96 | void | |||
97 | vertex_free(struct vertex *v) | |||
98 | { | |||
99 | RB_REMOVE(lsa_tree, v->lsa_tree, v)lsa_tree_RB_REMOVE(v->lsa_tree, v); | |||
100 | ||||
101 | (void)evtimer_del(&v->ev)event_del(&v->ev); | |||
102 | vertex_nexthop_clear(v); | |||
103 | free(v->lsa); | |||
104 | free(v); | |||
105 | } | |||
106 | ||||
107 | void | |||
108 | vertex_nexthop_clear(struct vertex *v) | |||
109 | { | |||
110 | struct v_nexthop *vn; | |||
111 | ||||
112 | while ((vn = TAILQ_FIRST(&v->nexthop)((&v->nexthop)->tqh_first))) { | |||
113 | TAILQ_REMOVE(&v->nexthop, vn, entry)do { if (((vn)->entry.tqe_next) != ((void*)0)) (vn)->entry .tqe_next->entry.tqe_prev = (vn)->entry.tqe_prev; else ( &v->nexthop)->tqh_last = (vn)->entry.tqe_prev; * (vn)->entry.tqe_prev = (vn)->entry.tqe_next; ; ; } while (0); | |||
114 | free(vn); | |||
115 | } | |||
116 | } | |||
117 | ||||
118 | void | |||
119 | vertex_nexthop_add(struct vertex *dst, struct vertex *parent, | |||
120 | const struct in6_addr *nexthop, u_int32_t ifindex) | |||
121 | { | |||
122 | struct v_nexthop *vn; | |||
123 | ||||
124 | if ((vn = calloc(1, sizeof(*vn))) == NULL((void*)0)) | |||
125 | fatal("vertex_nexthop_add"); | |||
126 | ||||
127 | vn->prev = parent; | |||
128 | if (nexthop) | |||
129 | vn->nexthop = *nexthop; | |||
130 | vn->ifindex = ifindex; | |||
131 | ||||
132 | TAILQ_INSERT_TAIL(&dst->nexthop, vn, entry)do { (vn)->entry.tqe_next = ((void*)0); (vn)->entry.tqe_prev = (&dst->nexthop)->tqh_last; *(&dst->nexthop )->tqh_last = (vn); (&dst->nexthop)->tqh_last = & (vn)->entry.tqe_next; } while (0); | |||
133 | } | |||
134 | ||||
135 | /* returns -1 if a is older, 1 if newer and 0 if equal to b */ | |||
136 | int | |||
137 | lsa_newer(struct lsa_hdr *a, struct lsa_hdr *b) | |||
138 | { | |||
139 | int32_t a32, b32; | |||
140 | u_int16_t a16, b16; | |||
141 | int i; | |||
142 | ||||
143 | if (a == NULL((void*)0)) | |||
144 | return (-1); | |||
145 | if (b == NULL((void*)0)) | |||
146 | return (1); | |||
147 | ||||
148 | /* | |||
149 | * The sequence number is defined as signed 32-bit integer, | |||
150 | * no idea how IETF came up with such a stupid idea. | |||
151 | */ | |||
152 | a32 = (int32_t)ntohl(a->seq_num)(__uint32_t)(__builtin_constant_p(a->seq_num) ? (__uint32_t )(((__uint32_t)(a->seq_num) & 0xff) << 24 | ((__uint32_t )(a->seq_num) & 0xff00) << 8 | ((__uint32_t)(a-> seq_num) & 0xff0000) >> 8 | ((__uint32_t)(a->seq_num ) & 0xff000000) >> 24) : __swap32md(a->seq_num)); | |||
153 | b32 = (int32_t)ntohl(b->seq_num)(__uint32_t)(__builtin_constant_p(b->seq_num) ? (__uint32_t )(((__uint32_t)(b->seq_num) & 0xff) << 24 | ((__uint32_t )(b->seq_num) & 0xff00) << 8 | ((__uint32_t)(b-> seq_num) & 0xff0000) >> 8 | ((__uint32_t)(b->seq_num ) & 0xff000000) >> 24) : __swap32md(b->seq_num)); | |||
154 | ||||
155 | if (a32 > b32) | |||
156 | return (1); | |||
157 | if (a32 < b32) | |||
158 | return (-1); | |||
159 | ||||
160 | a16 = ntohs(a->ls_chksum)(__uint16_t)(__builtin_constant_p(a->ls_chksum) ? (__uint16_t )(((__uint16_t)(a->ls_chksum) & 0xffU) << 8 | (( __uint16_t)(a->ls_chksum) & 0xff00U) >> 8) : __swap16md (a->ls_chksum)); | |||
161 | b16 = ntohs(b->ls_chksum)(__uint16_t)(__builtin_constant_p(b->ls_chksum) ? (__uint16_t )(((__uint16_t)(b->ls_chksum) & 0xffU) << 8 | (( __uint16_t)(b->ls_chksum) & 0xff00U) >> 8) : __swap16md (b->ls_chksum)); | |||
162 | ||||
163 | if (a16 > b16) | |||
164 | return (1); | |||
165 | if (a16 < b16) | |||
166 | return (-1); | |||
167 | ||||
168 | a16 = ntohs(a->age)(__uint16_t)(__builtin_constant_p(a->age) ? (__uint16_t)(( (__uint16_t)(a->age) & 0xffU) << 8 | ((__uint16_t )(a->age) & 0xff00U) >> 8) : __swap16md(a->age )); | |||
169 | b16 = ntohs(b->age)(__uint16_t)(__builtin_constant_p(b->age) ? (__uint16_t)(( (__uint16_t)(b->age) & 0xffU) << 8 | ((__uint16_t )(b->age) & 0xff00U) >> 8) : __swap16md(b->age )); | |||
170 | ||||
171 | if (a16 >= MAX_AGE3600 && b16 >= MAX_AGE3600) | |||
172 | return (0); | |||
173 | if (b16 >= MAX_AGE3600) | |||
174 | return (-1); | |||
175 | if (a16 >= MAX_AGE3600) | |||
176 | return (1); | |||
177 | ||||
178 | i = b16 - a16; | |||
179 | if (abs(i) > MAX_AGE_DIFF900) | |||
180 | return (i > 0 ? 1 : -1); | |||
181 | ||||
182 | return (0); | |||
183 | } | |||
184 | ||||
185 | int | |||
186 | lsa_check(struct rde_nbr *nbr, struct lsa *lsa, u_int16_t len) | |||
187 | { | |||
188 | u_int32_t metric; | |||
189 | ||||
190 | if (len < sizeof(lsa->hdr)) { | |||
191 | log_warnx("lsa_check: bad packet size"); | |||
192 | return (0); | |||
193 | } | |||
194 | if (ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md (lsa->hdr.len)) != len) { | |||
195 | log_warnx("lsa_check: bad packet length"); | |||
196 | return (0); | |||
197 | } | |||
198 | ||||
199 | if (iso_cksum(lsa, len, 0)) { | |||
200 | log_warnx("lsa_check: bad packet checksum"); | |||
201 | return (0); | |||
202 | } | |||
203 | ||||
204 | /* invalid ages */ | |||
205 | if ((ntohs(lsa->hdr.age)(__uint16_t)(__builtin_constant_p(lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(lsa->hdr.age) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.age) & 0xff00U) >> 8) : __swap16md (lsa->hdr.age)) < 1 && !nbr->self) || | |||
206 | ntohs(lsa->hdr.age)(__uint16_t)(__builtin_constant_p(lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(lsa->hdr.age) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.age) & 0xff00U) >> 8) : __swap16md (lsa->hdr.age)) > MAX_AGE3600) { | |||
207 | log_warnx("lsa_check: bad age"); | |||
208 | return (0); | |||
209 | } | |||
210 | ||||
211 | /* invalid sequence number */ | |||
212 | if (ntohl(lsa->hdr.seq_num)(__uint32_t)(__builtin_constant_p(lsa->hdr.seq_num) ? (__uint32_t )(((__uint32_t)(lsa->hdr.seq_num) & 0xff) << 24 | ((__uint32_t)(lsa->hdr.seq_num) & 0xff00) << 8 | ((__uint32_t)(lsa->hdr.seq_num) & 0xff0000) >> 8 | ((__uint32_t)(lsa->hdr.seq_num) & 0xff000000) >> 24) : __swap32md(lsa->hdr.seq_num)) == RESV_SEQ_NUM0x80000000U) { | |||
213 | log_warnx("lsa_check: bad seq num"); | |||
214 | return (0); | |||
215 | } | |||
216 | ||||
217 | switch (ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type))) { | |||
218 | case LSA_TYPE_LINK0x0008: | |||
219 | if (!lsa_link_check(lsa, len)) | |||
220 | return (0); | |||
221 | break; | |||
222 | case LSA_TYPE_ROUTER0x2001: | |||
223 | if (len < sizeof(lsa->hdr) + sizeof(struct lsa_rtr)) { | |||
224 | log_warnx("lsa_check: bad LSA rtr packet"); | |||
225 | return (0); | |||
226 | } | |||
227 | len -= sizeof(lsa->hdr) + sizeof(struct lsa_rtr); | |||
228 | if (len % sizeof(struct lsa_rtr_link)) { | |||
229 | log_warnx("lsa_check: bad LSA rtr packet"); | |||
230 | return (0); | |||
231 | } | |||
232 | break; | |||
233 | case LSA_TYPE_NETWORK0x2002: | |||
234 | if ((len % sizeof(u_int32_t)) || | |||
235 | len < sizeof(lsa->hdr) + sizeof(u_int32_t)) { | |||
236 | log_warnx("lsa_check: bad LSA network packet"); | |||
237 | return (0); | |||
238 | } | |||
239 | break; | |||
240 | case LSA_TYPE_INTER_A_PREFIX0x2003: | |||
241 | if (len < sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum)) { | |||
242 | log_warnx("lsa_check: bad LSA prefix summary packet"); | |||
243 | return (0); | |||
244 | } | |||
245 | metric = ntohl(lsa->data.pref_sum.metric)(__uint32_t)(__builtin_constant_p(lsa->data.pref_sum.metric ) ? (__uint32_t)(((__uint32_t)(lsa->data.pref_sum.metric) & 0xff) << 24 | ((__uint32_t)(lsa->data.pref_sum.metric ) & 0xff00) << 8 | ((__uint32_t)(lsa->data.pref_sum .metric) & 0xff0000) >> 8 | ((__uint32_t)(lsa->data .pref_sum.metric) & 0xff000000) >> 24) : __swap32md (lsa->data.pref_sum.metric)); | |||
246 | if (metric & ~LSA_METRIC_MASK0x00ffffff) { | |||
247 | log_warnx("lsa_check: bad LSA prefix summary metric"); | |||
248 | return (0); | |||
249 | } | |||
250 | if (lsa_get_prefix(((char *)lsa) + sizeof(lsa->hdr) + | |||
251 | sizeof(lsa->data.pref_sum), | |||
252 | len - sizeof(lsa->hdr) + sizeof(lsa->data.pref_sum), | |||
253 | NULL((void*)0)) == -1) { | |||
254 | log_warnx("lsa_check: " | |||
255 | "invalid LSA prefix summary packet"); | |||
256 | return (0); | |||
257 | } | |||
258 | break; | |||
259 | case LSA_TYPE_INTER_A_ROUTER0x2004: | |||
260 | if (len < sizeof(lsa->hdr) + sizeof(lsa->data.rtr_sum)) { | |||
261 | log_warnx("lsa_check: bad LSA router summary packet"); | |||
262 | return (0); | |||
263 | } | |||
264 | metric = ntohl(lsa->data.rtr_sum.metric)(__uint32_t)(__builtin_constant_p(lsa->data.rtr_sum.metric ) ? (__uint32_t)(((__uint32_t)(lsa->data.rtr_sum.metric) & 0xff) << 24 | ((__uint32_t)(lsa->data.rtr_sum.metric ) & 0xff00) << 8 | ((__uint32_t)(lsa->data.rtr_sum .metric) & 0xff0000) >> 8 | ((__uint32_t)(lsa->data .rtr_sum.metric) & 0xff000000) >> 24) : __swap32md( lsa->data.rtr_sum.metric)); | |||
265 | if (metric & ~LSA_METRIC_MASK0x00ffffff) { | |||
266 | log_warnx("lsa_check: bad LSA router summary metric"); | |||
267 | return (0); | |||
268 | } | |||
269 | break; | |||
270 | case LSA_TYPE_INTRA_A_PREFIX0x2009: | |||
271 | if (!lsa_intra_a_pref_check(lsa, len)) | |||
272 | return (0); | |||
273 | break; | |||
274 | case LSA_TYPE_EXTERNAL0x4005: | |||
275 | /* AS-external-LSA are silently discarded in stub areas */ | |||
276 | if (nbr->area->stub) | |||
277 | return (0); | |||
278 | if (!lsa_asext_check(lsa, len)) | |||
279 | return (0); | |||
280 | break; | |||
281 | default: | |||
282 | log_warnx("lsa_check: unknown type %x", ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type))); | |||
283 | return (0); | |||
284 | } | |||
285 | ||||
286 | /* MaxAge handling */ | |||
287 | if (lsa->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600)) && !nbr->self && lsa_find(nbr->iface, | |||
288 | lsa->hdr.type, lsa->hdr.ls_id, lsa->hdr.adv_rtr) == NULL((void*)0) && | |||
289 | !rde_nbr_loading(nbr->area)) { | |||
290 | /* | |||
291 | * if no neighbor in state Exchange or Loading | |||
292 | * ack LSA but don't add it. Needs to be a direct ack. | |||
293 | */ | |||
294 | rde_imsg_compose_ospfe(IMSG_LS_ACK, nbr->peerid, 0, &lsa->hdr, | |||
295 | sizeof(struct lsa_hdr)); | |||
296 | return (0); | |||
297 | } | |||
298 | ||||
299 | return (1); | |||
300 | } | |||
301 | ||||
302 | int | |||
303 | lsa_link_check(struct lsa *lsa, u_int16_t len) | |||
304 | { | |||
305 | char *buf = (char *)lsa; | |||
306 | struct lsa_link *llink; | |||
307 | u_int32_t i, off, npref; | |||
308 | int rv; | |||
309 | ||||
310 | llink = (struct lsa_link *)(buf + sizeof(lsa->hdr)); | |||
311 | off = sizeof(lsa->hdr) + sizeof(struct lsa_link); | |||
312 | if (off > len) { | |||
313 | log_warnx("lsa_link_check: invalid LSA link packet, " | |||
314 | "short header"); | |||
315 | return (0); | |||
316 | } | |||
317 | ||||
318 | len -= off; | |||
319 | npref = ntohl(llink->numprefix)(__uint32_t)(__builtin_constant_p(llink->numprefix) ? (__uint32_t )(((__uint32_t)(llink->numprefix) & 0xff) << 24 | ((__uint32_t)(llink->numprefix) & 0xff00) << 8 | ((__uint32_t)(llink->numprefix) & 0xff0000) >> 8 | ((__uint32_t)(llink->numprefix) & 0xff000000) >> 24) : __swap32md(llink->numprefix)); | |||
320 | ||||
321 | for (i = 0; i < npref; i++) { | |||
322 | rv = lsa_get_prefix(buf + off, len, NULL((void*)0)); | |||
323 | if (rv == -1) { | |||
324 | log_warnx("lsa_link_check: invalid LSA link packet"); | |||
325 | return (0); | |||
326 | } | |||
327 | off += rv; | |||
328 | len -= rv; | |||
329 | } | |||
330 | ||||
331 | return (1); | |||
332 | } | |||
333 | ||||
334 | int | |||
335 | lsa_intra_a_pref_check(struct lsa *lsa, u_int16_t len) | |||
336 | { | |||
337 | char *buf = (char *)lsa; | |||
338 | struct lsa_intra_prefix *iap; | |||
339 | u_int32_t i, off, npref; | |||
340 | int rv; | |||
341 | ||||
342 | iap = (struct lsa_intra_prefix *)(buf + sizeof(lsa->hdr)); | |||
343 | off = sizeof(lsa->hdr) + sizeof(struct lsa_intra_prefix); | |||
344 | if (off > len) { | |||
345 | log_warnx("lsa_intra_a_pref_check: " | |||
346 | "invalid LSA intra area prefix packet, short header"); | |||
347 | return (0); | |||
348 | } | |||
349 | ||||
350 | len -= off; | |||
351 | npref = ntohs(iap->numprefix)(__uint16_t)(__builtin_constant_p(iap->numprefix) ? (__uint16_t )(((__uint16_t)(iap->numprefix) & 0xffU) << 8 | ( (__uint16_t)(iap->numprefix) & 0xff00U) >> 8) : __swap16md (iap->numprefix)); | |||
352 | ||||
353 | for (i = 0; i < npref; i++) { | |||
354 | rv = lsa_get_prefix(buf + off, len, NULL((void*)0)); | |||
355 | if (rv == -1) { | |||
356 | log_warnx("lsa_intra_a_pref_check: " | |||
357 | "invalid LSA intra area prefix packet"); | |||
358 | return (0); | |||
359 | } | |||
360 | off += rv; | |||
361 | len -= rv; | |||
362 | } | |||
363 | ||||
364 | return (1); | |||
365 | } | |||
366 | ||||
367 | int | |||
368 | lsa_asext_check(struct lsa *lsa, u_int16_t len) | |||
369 | { | |||
370 | char *buf = (char *)lsa; | |||
371 | struct lsa_asext *asext; | |||
372 | struct in6_addr fw_addr; | |||
373 | u_int32_t metric; | |||
374 | u_int16_t ref_ls_type; | |||
375 | int rv; | |||
376 | u_int16_t total_len; | |||
377 | ||||
378 | asext = (struct lsa_asext *)(buf + sizeof(lsa->hdr)); | |||
379 | ||||
380 | if ((len % sizeof(u_int32_t)) || | |||
381 | len < sizeof(lsa->hdr) + sizeof(*asext)) { | |||
382 | log_warnx("lsa_asext_check: bad LSA as-external packet size"); | |||
383 | return (0); | |||
384 | } | |||
385 | ||||
386 | total_len = sizeof(lsa->hdr) + sizeof(*asext); | |||
387 | rv = lsa_get_prefix(&asext->prefix, len, NULL((void*)0)); | |||
388 | if (rv == -1) { | |||
389 | log_warnx("lsa_asext_check: bad LSA as-external packet"); | |||
390 | return (0); | |||
391 | } | |||
392 | total_len += rv - sizeof(struct lsa_prefix); | |||
393 | ||||
394 | metric = ntohl(asext->metric)(__uint32_t)(__builtin_constant_p(asext->metric) ? (__uint32_t )(((__uint32_t)(asext->metric) & 0xff) << 24 | ( (__uint32_t)(asext->metric) & 0xff00) << 8 | ((__uint32_t )(asext->metric) & 0xff0000) >> 8 | ((__uint32_t )(asext->metric) & 0xff000000) >> 24) : __swap32md (asext->metric)); | |||
395 | if (metric & LSA_ASEXT_F_FLAG0x02000000) { | |||
396 | if (total_len + sizeof(fw_addr) < len) { | |||
397 | bcopy(buf + total_len, &fw_addr, sizeof(fw_addr)); | |||
398 | if (IN6_IS_ADDR_UNSPECIFIED(&fw_addr)((*(const u_int32_t *)(const void *)(&(&fw_addr)-> __u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t * )(const void *)(&(&fw_addr)->__u6_addr.__u6_addr8[ 4]) == 0) && (*(const u_int32_t *)(const void *)(& (&fw_addr)->__u6_addr.__u6_addr8[8]) == 0) && ( *(const u_int32_t *)(const void *)(&(&fw_addr)->__u6_addr .__u6_addr8[12]) == 0)) || | |||
399 | IN6_IS_ADDR_LINKLOCAL(&fw_addr)(((&fw_addr)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&fw_addr)->__u6_addr.__u6_addr8[1] & 0xc0) == 0x80 ))) { | |||
400 | log_warnx("lsa_asext_check: bad LSA " | |||
401 | "as-external forwarding address"); | |||
402 | return (0); | |||
403 | } | |||
404 | } | |||
405 | total_len += sizeof(fw_addr); | |||
406 | } | |||
407 | ||||
408 | if (metric & LSA_ASEXT_T_FLAG0x01000000) | |||
409 | total_len += sizeof(u_int32_t); | |||
410 | ||||
411 | ref_ls_type = asext->prefix.metric; | |||
412 | if (ref_ls_type != 0) | |||
413 | total_len += sizeof(u_int32_t); | |||
414 | ||||
415 | if (len != total_len) { | |||
416 | log_warnx("lsa_asext_check: bad LSA as-external length"); | |||
417 | return (0); | |||
418 | } | |||
419 | ||||
420 | return (1); | |||
421 | } | |||
422 | ||||
423 | int | |||
424 | lsa_self(struct rde_nbr *nbr, struct lsa *lsa, struct vertex *v) | |||
425 | { | |||
426 | struct lsa *dummy; | |||
427 | ||||
428 | if (nbr->self) | |||
429 | return (0); | |||
430 | ||||
431 | if (rde_router_id() != lsa->hdr.adv_rtr) | |||
432 | return (0); | |||
433 | ||||
434 | if (v == NULL((void*)0)) { | |||
435 | /* LSA is no longer announced, remove by premature aging. | |||
436 | * The LSA may not be altered because the caller may still | |||
437 | * use it, so a copy needs to be added to the LSDB. | |||
438 | * The copy will be reflooded via the default timeout handler. | |||
439 | */ | |||
440 | if ((dummy = malloc(ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md (lsa->hdr.len)))) == NULL((void*)0)) | |||
441 | fatal("lsa_self"); | |||
442 | memcpy(dummy, lsa, ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md (lsa->hdr.len))); | |||
443 | dummy->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600)); | |||
444 | /* | |||
445 | * The clue is that by using the remote nbr as originator | |||
446 | * the dummy LSA will be reflooded via the default timeout | |||
447 | * handler. | |||
448 | */ | |||
449 | (void)lsa_add(rde_nbr_self(nbr->area), dummy); | |||
450 | return (1); | |||
451 | } | |||
452 | ||||
453 | /* | |||
454 | * LSA is still originated, just reflood it. But we need to create | |||
455 | * a new instance by setting the LSA sequence number equal to the | |||
456 | * one of new and calling lsa_refresh(). Flooding will be done by the | |||
457 | * caller. | |||
458 | */ | |||
459 | v->lsa->hdr.seq_num = lsa->hdr.seq_num; | |||
460 | lsa_refresh(v); | |||
461 | return (1); | |||
462 | } | |||
463 | ||||
464 | int | |||
465 | lsa_add(struct rde_nbr *nbr, struct lsa *lsa) | |||
466 | { | |||
467 | struct lsa_tree *tree; | |||
468 | struct vertex *new, *old; | |||
469 | struct timeval tv, now, res; | |||
470 | int update = 1; | |||
471 | ||||
472 | if (LSA_IS_SCOPE_AS(ntohs(lsa->hdr.type))((((__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type))) & 0x6000) == 0x4000)) | |||
473 | tree = &asext_tree; | |||
474 | else if (LSA_IS_SCOPE_AREA(ntohs(lsa->hdr.type))((((__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type))) & 0x6000) == 0x2000)) | |||
475 | tree = &nbr->area->lsa_tree; | |||
476 | else if (LSA_IS_SCOPE_LLOCAL(ntohs(lsa->hdr.type))((((__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type))) & 0x6000) == 0)) | |||
477 | tree = &nbr->iface->lsa_tree; | |||
478 | else | |||
479 | fatalx("%s: unknown scope type", __func__); | |||
480 | ||||
481 | new = vertex_get(lsa, nbr, tree); | |||
482 | old = RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new); | |||
483 | ||||
484 | if (old
| |||
485 | if (old->deleted && evtimer_pending(&old->ev, &tv)event_pending(&old->ev, 0x01, &tv)) { | |||
486 | /* new update added before hold time expired */ | |||
487 | gettimeofday(&now, NULL((void*)0)); | |||
488 | timersub(&tv, &now, &res)do { (&res)->tv_sec = (&tv)->tv_sec - (&now )->tv_sec; (&res)->tv_usec = (&tv)->tv_usec - (&now)->tv_usec; if ((&res)->tv_usec < 0) { (&res)->tv_sec--; (&res)->tv_usec += 1000000; } } while (0); | |||
489 | ||||
490 | /* remove old LSA and insert new LSA with delay */ | |||
491 | vertex_free(old); | |||
492 | RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new); | |||
493 | new->deleted = 1; | |||
494 | ||||
495 | if (evtimer_add(&new->ev, &res)event_add(&new->ev, &res) != 0) | |||
496 | fatal("lsa_add"); | |||
497 | return (1); | |||
498 | } | |||
499 | if (lsa_equal(new->lsa, old->lsa)) | |||
500 | update = 0; | |||
501 | vertex_free(old); | |||
502 | RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new); | |||
503 | } | |||
504 | ||||
505 | if (update) { | |||
506 | if (ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type)) == LSA_TYPE_LINK0x0008) | |||
507 | orig_intra_area_prefix_lsas(nbr->area); | |||
508 | if (ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type)) != LSA_TYPE_EXTERNAL0x4005) | |||
509 | nbr->area->dirty = 1; | |||
510 | start_spf_timer(); | |||
511 | } | |||
512 | ||||
513 | /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ | |||
514 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
515 | ||||
516 | if (nbr->self && ntohs(new->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(new->lsa->hdr.age) ? ( __uint16_t)(((__uint16_t)(new->lsa->hdr.age) & 0xffU ) << 8 | ((__uint16_t)(new->lsa->hdr.age) & 0xff00U ) >> 8) : __swap16md(new->lsa->hdr.age)) == DEFAULT_AGE0) | |||
517 | tv.tv_sec = LS_REFRESH_TIME1800; | |||
518 | else | |||
519 | tv.tv_sec = MAX_AGE3600 - ntohs(new->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(new->lsa->hdr.age) ? ( __uint16_t)(((__uint16_t)(new->lsa->hdr.age) & 0xffU ) << 8 | ((__uint16_t)(new->lsa->hdr.age) & 0xff00U ) >> 8) : __swap16md(new->lsa->hdr.age)); | |||
520 | ||||
521 | if (evtimer_add(&new->ev, &tv)event_add(&new->ev, &tv) != 0) | |||
522 | fatal("lsa_add: evtimer_add()"); | |||
523 | return (0); | |||
524 | } | |||
525 | ||||
526 | void | |||
527 | lsa_del(struct rde_nbr *nbr, struct lsa_hdr *lsa) | |||
528 | { | |||
529 | struct vertex *v; | |||
530 | struct timeval tv; | |||
531 | ||||
532 | v = lsa_find(nbr->iface, lsa->type, lsa->ls_id, lsa->adv_rtr); | |||
533 | if (v == NULL((void*)0)) | |||
534 | return; | |||
535 | ||||
536 | v->deleted = 1; | |||
537 | /* hold time to make sure that a new lsa is not added premature */ | |||
538 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
539 | tv.tv_sec = MIN_LS_INTERVAL5; | |||
540 | if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1) | |||
541 | fatal("lsa_del"); | |||
542 | } | |||
543 | ||||
544 | void | |||
545 | lsa_age(struct vertex *v) | |||
546 | { | |||
547 | struct timespec tp; | |||
548 | time_t now; | |||
549 | int d; | |||
550 | u_int16_t age; | |||
551 | ||||
552 | clock_gettime(CLOCK_MONOTONIC3, &tp); | |||
553 | now = tp.tv_sec; | |||
554 | ||||
555 | d = now - v->stamp; | |||
556 | /* set stamp so that at least new calls work */ | |||
557 | v->stamp = now; | |||
558 | ||||
559 | if (d < 0) { | |||
560 | log_warnx("lsa_age: time went backwards"); | |||
561 | return; | |||
562 | } | |||
563 | ||||
564 | age = ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)); | |||
565 | if (age + d > MAX_AGE3600) | |||
566 | age = MAX_AGE3600; | |||
567 | else | |||
568 | age += d; | |||
569 | ||||
570 | v->lsa->hdr.age = htons(age)(__uint16_t)(__builtin_constant_p(age) ? (__uint16_t)(((__uint16_t )(age) & 0xffU) << 8 | ((__uint16_t)(age) & 0xff00U ) >> 8) : __swap16md(age)); | |||
571 | } | |||
572 | ||||
573 | struct vertex * | |||
574 | lsa_find(struct iface *iface, u_int16_t type, u_int32_t ls_id, | |||
575 | u_int32_t adv_rtr) | |||
576 | { | |||
577 | struct lsa_tree *tree; | |||
578 | ||||
579 | if (LSA_IS_SCOPE_AS(ntohs(type))((((__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type))) & 0x6000) == 0x4000)) | |||
580 | tree = &asext_tree; | |||
581 | else if (LSA_IS_SCOPE_AREA(ntohs(type))((((__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type))) & 0x6000) == 0x2000)) | |||
582 | tree = &iface->area->lsa_tree; | |||
583 | else if (LSA_IS_SCOPE_LLOCAL(ntohs(type))((((__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type))) & 0x6000) == 0)) | |||
584 | tree = &iface->lsa_tree; | |||
585 | else | |||
586 | fatalx("unknown scope type"); | |||
587 | ||||
588 | return lsa_find_tree(tree, type, ls_id, adv_rtr); | |||
589 | ||||
590 | } | |||
591 | ||||
592 | struct vertex * | |||
593 | lsa_find_tree(struct lsa_tree *tree, u_int16_t type, u_int32_t ls_id, | |||
594 | u_int32_t adv_rtr) | |||
595 | { | |||
596 | struct vertex key; | |||
597 | struct vertex *v; | |||
598 | ||||
599 | key.ls_id = ntohl(ls_id)(__uint32_t)(__builtin_constant_p(ls_id) ? (__uint32_t)(((__uint32_t )(ls_id) & 0xff) << 24 | ((__uint32_t)(ls_id) & 0xff00) << 8 | ((__uint32_t)(ls_id) & 0xff0000) >> 8 | ((__uint32_t)(ls_id) & 0xff000000) >> 24) : __swap32md (ls_id)); | |||
600 | key.adv_rtr = ntohl(adv_rtr)(__uint32_t)(__builtin_constant_p(adv_rtr) ? (__uint32_t)(((__uint32_t )(adv_rtr) & 0xff) << 24 | ((__uint32_t)(adv_rtr) & 0xff00) << 8 | ((__uint32_t)(adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(adv_rtr) & 0xff000000) >> 24) : __swap32md (adv_rtr)); | |||
601 | key.type = ntohs(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type)); | |||
602 | ||||
603 | v = RB_FIND(lsa_tree, tree, &key)lsa_tree_RB_FIND(tree, &key); | |||
604 | ||||
605 | /* LSA that are deleted are not findable */ | |||
606 | if (v && v->deleted) | |||
607 | return (NULL((void*)0)); | |||
608 | ||||
609 | if (v) | |||
610 | lsa_age(v); | |||
611 | ||||
612 | return (v); | |||
613 | } | |||
614 | ||||
615 | struct vertex * | |||
616 | lsa_find_rtr(struct area *area, u_int32_t rtr_id) | |||
617 | { | |||
618 | return lsa_find_rtr_frag(area, rtr_id, 0); | |||
619 | } | |||
620 | ||||
621 | struct vertex * | |||
622 | lsa_find_rtr_frag(struct area *area, u_int32_t rtr_id, unsigned int n) | |||
623 | { | |||
624 | struct vertex *v; | |||
625 | struct vertex key; | |||
626 | unsigned int i; | |||
627 | ||||
628 | key.ls_id = 0; | |||
629 | key.adv_rtr = ntohl(rtr_id)(__uint32_t)(__builtin_constant_p(rtr_id) ? (__uint32_t)(((__uint32_t )(rtr_id) & 0xff) << 24 | ((__uint32_t)(rtr_id) & 0xff00) << 8 | ((__uint32_t)(rtr_id) & 0xff0000) >> 8 | ((__uint32_t)(rtr_id) & 0xff000000) >> 24) : __swap32md (rtr_id)); | |||
630 | key.type = LSA_TYPE_ROUTER0x2001; | |||
631 | ||||
632 | i = 0; | |||
633 | v = RB_NFIND(lsa_tree, &area->lsa_tree, &key)lsa_tree_RB_NFIND(&area->lsa_tree, &key); | |||
634 | while (v) { | |||
635 | if (v->type != LSA_TYPE_ROUTER0x2001 || | |||
636 | v->adv_rtr != ntohl(rtr_id)(__uint32_t)(__builtin_constant_p(rtr_id) ? (__uint32_t)(((__uint32_t )(rtr_id) & 0xff) << 24 | ((__uint32_t)(rtr_id) & 0xff00) << 8 | ((__uint32_t)(rtr_id) & 0xff0000) >> 8 | ((__uint32_t)(rtr_id) & 0xff000000) >> 24) : __swap32md (rtr_id))) { | |||
637 | /* no more interesting LSAs */ | |||
638 | v = NULL((void*)0); | |||
639 | break; | |||
640 | } | |||
641 | if (!v->deleted) { | |||
642 | if (i >= n) | |||
643 | break; | |||
644 | i++; | |||
645 | } | |||
646 | v = RB_NEXT(lsa_tree, &area->lsa_tree, v)lsa_tree_RB_NEXT(v); | |||
647 | } | |||
648 | ||||
649 | if (v) { | |||
650 | if (i == n) | |||
651 | lsa_age(v); | |||
652 | else | |||
653 | v = NULL((void*)0); | |||
654 | } | |||
655 | ||||
656 | return (v); | |||
657 | } | |||
658 | ||||
659 | u_int32_t | |||
660 | lsa_find_lsid(struct lsa_tree *tree, int (*cmp)(struct lsa *, struct lsa *), | |||
661 | struct lsa *lsa) | |||
662 | { | |||
663 | #define MIN(x, y) ((x) < (y) ? (x) : (y)) | |||
664 | struct vertex *v; | |||
665 | struct vertex key; | |||
666 | u_int32_t min, cur; | |||
667 | ||||
668 | key.ls_id = 0; | |||
669 | key.adv_rtr = ntohl(lsa->hdr.adv_rtr)(__uint32_t)(__builtin_constant_p(lsa->hdr.adv_rtr) ? (__uint32_t )(((__uint32_t)(lsa->hdr.adv_rtr) & 0xff) << 24 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff00) << 8 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff000000) >> 24) : __swap32md(lsa->hdr.adv_rtr)); | |||
670 | key.type = ntohs(lsa->hdr.type)(__uint16_t)(__builtin_constant_p(lsa->hdr.type) ? (__uint16_t )(((__uint16_t)(lsa->hdr.type) & 0xffU) << 8 | ( (__uint16_t)(lsa->hdr.type) & 0xff00U) >> 8) : __swap16md (lsa->hdr.type)); | |||
671 | ||||
672 | cur = 0; | |||
673 | min = 0xffffffffU; | |||
674 | v = RB_NFIND(lsa_tree, tree, &key)lsa_tree_RB_NFIND(tree, &key); | |||
675 | while (v) { | |||
676 | if (v->type != key.type || | |||
677 | v->adv_rtr != key.adv_rtr) { | |||
678 | /* no more interesting LSAs */ | |||
679 | min = MIN(min, cur + 1); | |||
680 | return (htonl(min)(__uint32_t)(__builtin_constant_p(min) ? (__uint32_t)(((__uint32_t )(min) & 0xff) << 24 | ((__uint32_t)(min) & 0xff00 ) << 8 | ((__uint32_t)(min) & 0xff0000) >> 8 | ((__uint32_t)(min) & 0xff000000) >> 24) : __swap32md (min))); | |||
681 | } | |||
682 | if (cmp(lsa, v->lsa) == 0) { | |||
683 | /* match, return this ls_id */ | |||
684 | return (htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t) (((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t )(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v-> ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id ) & 0xff000000) >> 24) : __swap32md(v->ls_id))); | |||
685 | } | |||
686 | if (v->ls_id > cur + 1) | |||
687 | min = cur + 1; | |||
688 | cur = v->ls_id; | |||
689 | if (cur + 1 < cur) | |||
690 | fatalx("King Bula sez: somebody got to many LSA"); | |||
691 | v = RB_NEXT(lsa_tree, tree, v)lsa_tree_RB_NEXT(v); | |||
692 | } | |||
693 | min = MIN(min, cur + 1); | |||
694 | return (htonl(min)(__uint32_t)(__builtin_constant_p(min) ? (__uint32_t)(((__uint32_t )(min) & 0xff) << 24 | ((__uint32_t)(min) & 0xff00 ) << 8 | ((__uint32_t)(min) & 0xff0000) >> 8 | ((__uint32_t)(min) & 0xff000000) >> 24) : __swap32md (min))); | |||
695 | #undef MIN | |||
696 | } | |||
697 | ||||
698 | u_int16_t | |||
699 | lsa_num_links(struct vertex *v) | |||
700 | { | |||
701 | unsigned int n = 1; | |||
702 | u_int16_t nlinks = 0; | |||
703 | ||||
704 | switch (v->type) { | |||
705 | case LSA_TYPE_ROUTER0x2001: | |||
706 | do { | |||
707 | nlinks += ((ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len)) - | |||
708 | sizeof(struct lsa_hdr) - sizeof(struct lsa_rtr)) / | |||
709 | sizeof(struct lsa_rtr_link)); | |||
710 | v = lsa_find_rtr_frag(v->area, htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t )(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t )(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v-> adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr ) & 0xff000000) >> 24) : __swap32md(v->adv_rtr)), n++); | |||
711 | } while (v); | |||
712 | return nlinks; | |||
713 | case LSA_TYPE_NETWORK0x2002: | |||
714 | return ((ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len)) - sizeof(struct lsa_hdr) - | |||
715 | sizeof(struct lsa_net)) / sizeof(struct lsa_net_link)); | |||
716 | default: | |||
717 | fatalx("lsa_num_links: invalid LSA type"); | |||
718 | } | |||
719 | ||||
720 | return (0); | |||
721 | } | |||
722 | ||||
723 | void | |||
724 | lsa_snap(struct rde_nbr *nbr) | |||
725 | { | |||
726 | struct lsa_tree *tree = &nbr->area->lsa_tree; | |||
727 | struct vertex *v; | |||
728 | ||||
729 | do { | |||
730 | RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); ( v) = lsa_tree_RB_NEXT(v)) { | |||
731 | if (v->deleted) | |||
732 | continue; | |||
733 | lsa_age(v); | |||
734 | if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)) >= MAX_AGE3600) { | |||
735 | rde_imsg_compose_ospfe(IMSG_LS_SNAP, | |||
736 | nbr->peerid, 0, &v->lsa->hdr, | |||
737 | ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len))); | |||
738 | } else { | |||
739 | rde_imsg_compose_ospfe(IMSG_DB_SNAPSHOT, | |||
740 | nbr->peerid, 0, &v->lsa->hdr, | |||
741 | sizeof(struct lsa_hdr)); | |||
742 | } | |||
743 | } | |||
744 | if (tree == &asext_tree) | |||
745 | break; | |||
746 | if (tree == &nbr->area->lsa_tree) | |||
747 | tree = &nbr->iface->lsa_tree; | |||
748 | else | |||
749 | tree = &asext_tree; | |||
750 | } while (1); | |||
751 | } | |||
752 | ||||
753 | void | |||
754 | lsa_dump(struct lsa_tree *tree, int imsg_type, pid_t pid) | |||
755 | { | |||
756 | struct vertex *v; | |||
757 | ||||
758 | RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); ( v) = lsa_tree_RB_NEXT(v)) { | |||
759 | if (v->deleted) | |||
760 | continue; | |||
761 | lsa_age(v); | |||
762 | switch (imsg_type) { | |||
763 | case IMSG_CTL_SHOW_DATABASE: | |||
764 | break; | |||
765 | case IMSG_CTL_SHOW_DB_SELF: | |||
766 | if (v->lsa->hdr.adv_rtr == rde_router_id()) | |||
767 | break; | |||
768 | continue; | |||
769 | case IMSG_CTL_SHOW_DB_EXT: | |||
770 | if (v->type == LSA_TYPE_EXTERNAL0x4005) | |||
771 | break; | |||
772 | continue; | |||
773 | case IMSG_CTL_SHOW_DB_LINK: | |||
774 | if (v->type == LSA_TYPE_LINK0x0008) | |||
775 | break; | |||
776 | continue; | |||
777 | case IMSG_CTL_SHOW_DB_NET: | |||
778 | if (v->type == LSA_TYPE_NETWORK0x2002) | |||
779 | break; | |||
780 | continue; | |||
781 | case IMSG_CTL_SHOW_DB_RTR: | |||
782 | if (v->type == LSA_TYPE_ROUTER0x2001) | |||
783 | break; | |||
784 | continue; | |||
785 | case IMSG_CTL_SHOW_DB_INTRA: | |||
786 | if (v->type == LSA_TYPE_INTRA_A_PREFIX0x2009) | |||
787 | break; | |||
788 | continue; | |||
789 | case IMSG_CTL_SHOW_DB_SUM: | |||
790 | if (v->type == LSA_TYPE_INTER_A_PREFIX0x2003) | |||
791 | break; | |||
792 | continue; | |||
793 | case IMSG_CTL_SHOW_DB_ASBR: | |||
794 | if (v->type == LSA_TYPE_INTER_A_ROUTER0x2004) | |||
795 | break; | |||
796 | continue; | |||
797 | default: | |||
798 | log_warnx("lsa_dump: unknown imsg type"); | |||
799 | return; | |||
800 | } | |||
801 | rde_imsg_compose_ospfe(imsg_type, 0, pid, &v->lsa->hdr, | |||
802 | ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len))); | |||
803 | } | |||
804 | } | |||
805 | ||||
806 | /* ARGSUSED */ | |||
807 | void | |||
808 | lsa_timeout(int fd, short event, void *bula) | |||
809 | { | |||
810 | struct vertex *v = bula; | |||
811 | struct timeval tv; | |||
812 | ||||
813 | lsa_age(v); | |||
814 | ||||
815 | if (v->deleted) { | |||
816 | if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)) >= MAX_AGE3600) { | |||
817 | vertex_free(v); | |||
818 | } else { | |||
819 | v->deleted = 0; | |||
820 | ||||
821 | /* schedule recalculation of the RIB */ | |||
822 | if (ntohs(v->lsa->hdr.type)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.type) ? ( __uint16_t)(((__uint16_t)(v->lsa->hdr.type) & 0xffU ) << 8 | ((__uint16_t)(v->lsa->hdr.type) & 0xff00U ) >> 8) : __swap16md(v->lsa->hdr.type)) == LSA_TYPE_LINK0x0008) | |||
823 | orig_intra_area_prefix_lsas(v->area); | |||
824 | if (ntohs(v->lsa->hdr.type)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.type) ? ( __uint16_t)(((__uint16_t)(v->lsa->hdr.type) & 0xffU ) << 8 | ((__uint16_t)(v->lsa->hdr.type) & 0xff00U ) >> 8) : __swap16md(v->lsa->hdr.type)) != LSA_TYPE_EXTERNAL0x4005) | |||
825 | v->area->dirty = 1; | |||
826 | start_spf_timer(); | |||
827 | ||||
828 | rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, | |||
829 | v->lsa, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len))); | |||
830 | ||||
831 | /* timeout handling either MAX_AGE or LS_REFRESH_TIME */ | |||
832 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
833 | if (v->self) | |||
834 | tv.tv_sec = LS_REFRESH_TIME1800; | |||
835 | else | |||
836 | tv.tv_sec = MAX_AGE3600 - ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)); | |||
837 | ||||
838 | if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) != 0) | |||
839 | fatal("lsa_timeout"); | |||
840 | } | |||
841 | return; | |||
842 | } | |||
843 | ||||
844 | if (v->self && ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)) < MAX_AGE3600) | |||
845 | lsa_refresh(v); | |||
846 | ||||
847 | rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0, | |||
848 | v->lsa, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len))); | |||
849 | } | |||
850 | ||||
851 | void | |||
852 | lsa_refresh(struct vertex *v) | |||
853 | { | |||
854 | struct timeval tv; | |||
855 | struct timespec tp; | |||
856 | u_int32_t seqnum; | |||
857 | u_int16_t len; | |||
858 | ||||
859 | /* refresh LSA by increasing sequence number by one */ | |||
860 | if (v->self && ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.age)) >= MAX_AGE3600) | |||
861 | /* self originated network that is currently beeing removed */ | |||
862 | v->lsa->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600)); | |||
863 | else | |||
864 | v->lsa->hdr.age = htons(DEFAULT_AGE)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t )(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U ) >> 8) : __swap16md(0)); | |||
865 | seqnum = ntohl(v->lsa->hdr.seq_num)(__uint32_t)(__builtin_constant_p(v->lsa->hdr.seq_num) ? (__uint32_t)(((__uint32_t)(v->lsa->hdr.seq_num) & 0xff ) << 24 | ((__uint32_t)(v->lsa->hdr.seq_num) & 0xff00) << 8 | ((__uint32_t)(v->lsa->hdr.seq_num ) & 0xff0000) >> 8 | ((__uint32_t)(v->lsa->hdr .seq_num) & 0xff000000) >> 24) : __swap32md(v->lsa ->hdr.seq_num)); | |||
866 | if (seqnum++ == MAX_SEQ_NUM0x7fffffffU) | |||
867 | /* XXX fix me */ | |||
868 | fatalx("sequence number wrapping"); | |||
869 | v->lsa->hdr.seq_num = htonl(seqnum)(__uint32_t)(__builtin_constant_p(seqnum) ? (__uint32_t)(((__uint32_t )(seqnum) & 0xff) << 24 | ((__uint32_t)(seqnum) & 0xff00) << 8 | ((__uint32_t)(seqnum) & 0xff0000) >> 8 | ((__uint32_t)(seqnum) & 0xff000000) >> 24) : __swap32md (seqnum)); | |||
870 | ||||
871 | /* recalculate checksum */ | |||
872 | len = ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8 | ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >> 8) : __swap16md(v->lsa->hdr.len)); | |||
873 | v->lsa->hdr.ls_chksum = 0; | |||
874 | v->lsa->hdr.ls_chksum = htons(iso_cksum(v->lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(v->lsa, len, __builtin_offsetof (struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum (v->lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum ))) & 0xffU) << 8 | ((__uint16_t)(iso_cksum(v->lsa , len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) & 0xff00U ) >> 8) : __swap16md(iso_cksum(v->lsa, len, __builtin_offsetof (struct lsa_hdr, ls_chksum)))); | |||
875 | ||||
876 | clock_gettime(CLOCK_MONOTONIC3, &tp); | |||
877 | v->changed = v->stamp = tp.tv_sec; | |||
878 | ||||
879 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
880 | tv.tv_sec = LS_REFRESH_TIME1800; | |||
881 | if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1) | |||
882 | fatal("lsa_refresh"); | |||
883 | } | |||
884 | ||||
885 | void | |||
886 | lsa_merge(struct rde_nbr *nbr, struct lsa *lsa, struct vertex *v) | |||
887 | { | |||
888 | struct timeval tv; | |||
889 | struct timespec tp; | |||
890 | time_t now; | |||
891 | u_int16_t len; | |||
892 | ||||
893 | if (v == NULL((void*)0)) { | |||
| ||||
894 | if (lsa_add(nbr, lsa)) | |||
895 | /* delayed update */ | |||
896 | return; | |||
897 | rde_imsg_compose_ospfe(IMSG_LS_FLOOD, nbr->peerid, 0, | |||
898 | lsa, ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md (lsa->hdr.len))); | |||
899 | return; | |||
900 | } | |||
901 | ||||
902 | /* set the seq_num to the current one. lsa_refresh() will do the ++ */ | |||
903 | lsa->hdr.seq_num = v->lsa->hdr.seq_num; | |||
904 | /* recalculate checksum */ | |||
905 | len = ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t )(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | (( __uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md (lsa->hdr.len)); | |||
906 | lsa->hdr.ls_chksum = 0; | |||
907 | lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof (struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum (lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) & 0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof (struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md (iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum )))); | |||
908 | ||||
909 | /* compare LSA most header fields are equal so don't check them */ | |||
910 | if (lsa_equal(lsa, v->lsa)) { | |||
911 | free(lsa); | |||
912 | return; | |||
913 | } | |||
914 | ||||
915 | /* overwrite the lsa all other fields are unaffected */ | |||
916 | free(v->lsa); | |||
917 | v->lsa = lsa; | |||
918 | if (v->type == LSA_TYPE_LINK0x0008) | |||
919 | orig_intra_area_prefix_lsas(nbr->area); | |||
920 | if (v->type != LSA_TYPE_EXTERNAL0x4005) | |||
921 | nbr->area->dirty = 1; | |||
922 | start_spf_timer(); | |||
923 | ||||
924 | /* set correct timeout for reflooding the LSA */ | |||
925 | clock_gettime(CLOCK_MONOTONIC3, &tp); | |||
926 | now = tp.tv_sec; | |||
927 | timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0; | |||
928 | if (v->changed + MIN_LS_INTERVAL5 >= now) | |||
929 | tv.tv_sec = MIN_LS_INTERVAL5; | |||
930 | if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1) | |||
931 | fatal("lsa_merge"); | |||
932 | } | |||
933 | ||||
934 | void | |||
935 | lsa_remove_invalid_sums(struct area *area) | |||
936 | { | |||
937 | struct lsa_tree *tree = &area->lsa_tree; | |||
938 | struct vertex *v, *nv; | |||
939 | ||||
940 | /* XXX speed me up */ | |||
941 | for (v = RB_MIN(lsa_tree, tree)lsa_tree_RB_MINMAX(tree, -1); v != NULL((void*)0); v = nv) { | |||
942 | nv = RB_NEXT(lsa_tree, tree, v)lsa_tree_RB_NEXT(v); | |||
943 | if ((v->type == LSA_TYPE_INTER_A_PREFIX0x2003 || | |||
944 | v->type == LSA_TYPE_INTER_A_ROUTER0x2004) && | |||
945 | v->self && v->cost == LS_INFINITY0xffffff && | |||
946 | v->deleted == 0) { | |||
947 | /* | |||
948 | * age the lsa and call lsa_timeout() which will | |||
949 | * actually remove it from the database. | |||
950 | */ | |||
951 | v->lsa->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600)); | |||
952 | lsa_timeout(0, 0, v); | |||
953 | } | |||
954 | } | |||
955 | } | |||
956 | ||||
957 | int | |||
958 | lsa_equal(struct lsa *a, struct lsa *b) | |||
959 | { | |||
960 | /* | |||
961 | * compare LSA that already have same type, adv_rtr and ls_id | |||
962 | * so not all header need to be compared | |||
963 | */ | |||
964 | if (a == NULL((void*)0) || b == NULL((void*)0)) | |||
965 | return (0); | |||
966 | if (a->hdr.len != b->hdr.len) | |||
967 | return (0); | |||
968 | /* LSAs with age MAX_AGE are never equal */ | |||
969 | if (a->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600)) || b->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t )(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U ) >> 8) : __swap16md(3600))) | |||
970 | return (0); | |||
971 | if (memcmp(&a->data, &b->data, ntohs(a->hdr.len)(__uint16_t)(__builtin_constant_p(a->hdr.len) ? (__uint16_t )(((__uint16_t)(a->hdr.len) & 0xffU) << 8 | ((__uint16_t )(a->hdr.len) & 0xff00U) >> 8) : __swap16md(a-> hdr.len)) - | |||
972 | sizeof(struct lsa_hdr))) | |||
973 | return (0); | |||
974 | ||||
975 | return (1); | |||
976 | } | |||
977 | ||||
978 | int | |||
979 | lsa_get_prefix(void *buf, u_int16_t len, struct rt_prefix *p) | |||
980 | { | |||
981 | struct lsa_prefix *lp = buf; | |||
982 | u_int32_t *buf32, *addr = NULL((void*)0); | |||
983 | u_int8_t prefixlen; | |||
984 | u_int16_t consumed; | |||
985 | ||||
986 | if (len < sizeof(*lp)) | |||
987 | return (-1); | |||
988 | ||||
989 | prefixlen = lp->prefixlen; | |||
990 | ||||
991 | if (p) { | |||
992 | bzero(p, sizeof(*p)); | |||
993 | p->prefixlen = lp->prefixlen; | |||
994 | p->options = lp->options; | |||
995 | p->metric = ntohs(lp->metric)(__uint16_t)(__builtin_constant_p(lp->metric) ? (__uint16_t )(((__uint16_t)(lp->metric) & 0xffU) << 8 | ((__uint16_t )(lp->metric) & 0xff00U) >> 8) : __swap16md(lp-> metric)); | |||
996 | addr = (u_int32_t *)&p->prefix; | |||
997 | } | |||
998 | ||||
999 | buf32 = (u_int32_t *)(lp + 1); | |||
1000 | consumed = sizeof(*lp); | |||
1001 | ||||
1002 | for (prefixlen = LSA_PREFIXSIZE(prefixlen)(((prefixlen) + 31)/32 * 4) / sizeof(u_int32_t); | |||
1003 | prefixlen > 0; prefixlen--) { | |||
1004 | if (len < consumed + sizeof(u_int32_t)) | |||
1005 | return (-1); | |||
1006 | if (addr) | |||
1007 | *addr++ = *buf32++; | |||
1008 | consumed += sizeof(u_int32_t); | |||
1009 | } | |||
1010 | ||||
1011 | return (consumed); | |||
1012 | } |