Bug Summary

File:src/usr.sbin/ospfd/rde_lsdb.c
Warning:line 39, column 1
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name rde_lsdb.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/ospfd/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/usr.sbin/ospfd -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/ospfd/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/ospfd/rde_lsdb.c
1/* $OpenBSD: rde_lsdb.c,v 1.51 2020/10/05 09:19:05 jan Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/types.h>
20#include <sys/tree.h>
21#include <stdlib.h>
22#include <string.h>
23#include <unistd.h>
24
25#include "ospf.h"
26#include "ospfd.h"
27#include "rde.h"
28#include "log.h"
29
30struct vertex *vertex_get(struct lsa *, struct rde_nbr *, struct lsa_tree *);
31
32int lsa_router_check(struct lsa *, u_int16_t);
33struct vertex *lsa_find_tree(struct lsa_tree *, u_int16_t, u_int32_t,
34 u_int32_t);
35void lsa_timeout(int, short, void *);
36void lsa_refresh(struct vertex *);
37int lsa_equal(struct lsa *, struct lsa *);
38
39RB_GENERATE(lsa_tree, vertex, entry, lsa_compare)void lsa_tree_RB_INSERT_COLOR(struct lsa_tree *head, struct vertex
*elm) { struct vertex *parent, *gparent, *tmp; while ((parent
= (elm)->entry.rbe_parent) && (parent)->entry.
rbe_color == 1) { gparent = (parent)->entry.rbe_parent; if
(parent == (gparent)->entry.rbe_left) { tmp = (gparent)->
entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } void lsa_tree_RB_REMOVE_COLOR(struct lsa_tree
*head, struct vertex *parent, struct vertex *elm) { struct vertex
*tmp; while ((elm == ((void*)0) || (elm)->entry.rbe_color
== 0) && elm != (head)->rbh_root) { if ((parent)->
entry.rbe_left == elm) { tmp = (parent)->entry.rbe_right; if
((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color
= 0; (parent)->entry.rbe_color = 1; } while (0); do { (tmp
) = (parent)->entry.rbe_right; if (((parent)->entry.rbe_right
= (tmp)->entry.rbe_left)) { ((tmp)->entry.rbe_left)->
entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->
entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent
) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_left = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
tmp = (parent)->entry.rbe_right; } if (((tmp)->entry.rbe_left
== ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color
== 0) && ((tmp)->entry.rbe_right == ((void*)0) ||
((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp
)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry
.rbe_parent; } else { if ((tmp)->entry.rbe_right == ((void
*)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)
{ struct vertex *oleft; if ((oleft = (tmp)->entry.rbe_left
)) (oleft)->entry.rbe_color = 0; (tmp)->entry.rbe_color
= 1; do { (oleft) = (tmp)->entry.rbe_left; if (((tmp)->
entry.rbe_left = (oleft)->entry.rbe_right)) { ((oleft)->
entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while (
0); if (((oleft)->entry.rbe_parent = (tmp)->entry.rbe_parent
)) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left
) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oleft); else
((tmp)->entry.rbe_parent)->entry.rbe_right = (oleft); }
else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right
= (tmp); (tmp)->entry.rbe_parent = (oleft); do {} while (
0); if (((oleft)->entry.rbe_parent)) do {} while (0); } while
(0); tmp = (parent)->entry.rbe_right; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_right) ((tmp)->entry.rbe_right
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)->
entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (
parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent
)->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp
)->entry.rbe_right)) { ((tmp)->entry.rbe_right)->entry
.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry
.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) ==
((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
tmp = (parent)->entry.rbe_left; } if (((tmp)->entry.rbe_left
== ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color
== 0) && ((tmp)->entry.rbe_right == ((void*)0) ||
((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp
)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry
.rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void*
)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) {
struct vertex *oright; if ((oright = (tmp)->entry.rbe_right
)) (oright)->entry.rbe_color = 0; (tmp)->entry.rbe_color
= 1; do { (oright) = (tmp)->entry.rbe_right; if (((tmp)->
entry.rbe_right = (oright)->entry.rbe_left)) { ((oright)->
entry.rbe_left)->entry.rbe_parent = (tmp); } do {} while (
0); if (((oright)->entry.rbe_parent = (tmp)->entry.rbe_parent
)) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left
) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oright);
else ((tmp)->entry.rbe_parent)->entry.rbe_right = (oright
); } else (head)->rbh_root = (oright); (oright)->entry.
rbe_left = (tmp); (tmp)->entry.rbe_parent = (oright); do {
} while (0); if (((oright)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = (parent)->entry.rbe_left; } (tmp)
->entry.rbe_color = (parent)->entry.rbe_color; (parent)
->entry.rbe_color = 0; if ((tmp)->entry.rbe_left) ((tmp
)->entry.rbe_left)->entry.rbe_color = 0; do { (tmp) = (
parent)->entry.rbe_left; if (((parent)->entry.rbe_left =
(tmp)->entry.rbe_right)) { ((tmp)->entry.rbe_right)->
entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->
entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent
) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
elm = (head)->rbh_root; break; } } } if (elm) (elm)->entry
.rbe_color = 0; } struct vertex * lsa_tree_RB_REMOVE(struct lsa_tree
*head, struct vertex *elm) { struct vertex *child, *parent, *
old = elm; int color; if ((elm)->entry.rbe_left == ((void*
)0)) child = (elm)->entry.rbe_right; else if ((elm)->entry
.rbe_right == ((void*)0)) child = (elm)->entry.rbe_left; else
{ struct vertex *left; elm = (elm)->entry.rbe_right; while
((left = (elm)->entry.rbe_left)) elm = left; child = (elm
)->entry.rbe_right; parent = (elm)->entry.rbe_parent; color
= (elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent
= parent; if (parent) { if ((parent)->entry.rbe_left == elm
) (parent)->entry.rbe_left = child; else (parent)->entry
.rbe_right = child; do {} while (0); } else (head)->rbh_root
= child; if ((elm)->entry.rbe_parent == old) parent = elm
; (elm)->entry = (old)->entry; if ((old)->entry.rbe_parent
) { if (((old)->entry.rbe_parent)->entry.rbe_left == old
) ((old)->entry.rbe_parent)->entry.rbe_left = elm; else
((old)->entry.rbe_parent)->entry.rbe_right = elm; do {
} while (0); } else (head)->rbh_root = elm; ((old)->entry
.rbe_left)->entry.rbe_parent = elm; if ((old)->entry.rbe_right
) ((old)->entry.rbe_right)->entry.rbe_parent = elm; if (
parent) { left = parent; do { do {} while (0); } while ((left
= (left)->entry.rbe_parent)); } goto color; } parent = (elm
)->entry.rbe_parent; color = (elm)->entry.rbe_color; if
(child) (child)->entry.rbe_parent = parent; if (parent) {
if ((parent)->entry.rbe_left == elm) (parent)->entry.rbe_left
= child; else (parent)->entry.rbe_right = child; do {} while
(0); } else (head)->rbh_root = child; color: if (color ==
0) lsa_tree_RB_REMOVE_COLOR(head, parent, child); return (old
); } struct vertex * lsa_tree_RB_INSERT(struct lsa_tree *head
, struct vertex *elm) { struct vertex *tmp; struct vertex *parent
= ((void*)0); int comp = 0; tmp = (head)->rbh_root; while
(tmp) { parent = tmp; comp = (lsa_compare)(elm, parent); if (
comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp >
0) tmp = (tmp)->entry.rbe_right; else return (tmp); } do {
(elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left
= (elm)->entry.rbe_right = ((void*)0); (elm)->entry.rbe_color
= 1; } while (0); if (parent != ((void*)0)) { if (comp < 0
) (parent)->entry.rbe_left = elm; else (parent)->entry.
rbe_right = elm; do {} while (0); } else (head)->rbh_root =
elm; lsa_tree_RB_INSERT_COLOR(head, elm); return (((void*)0)
); } struct vertex * lsa_tree_RB_FIND(struct lsa_tree *head, struct
vertex *elm) { struct vertex *tmp = (head)->rbh_root; int
comp; while (tmp) { comp = lsa_compare(elm, tmp); if (comp <
0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp
= (tmp)->entry.rbe_right; else return (tmp); } return (((
void*)0)); } struct vertex * lsa_tree_RB_NFIND(struct lsa_tree
*head, struct vertex *elm) { struct vertex *tmp = (head)->
rbh_root; struct vertex *res = ((void*)0); int comp; while (tmp
) { comp = lsa_compare(elm, tmp); if (comp < 0) { res = tmp
; tmp = (tmp)->entry.rbe_left; } else if (comp > 0) tmp
= (tmp)->entry.rbe_right; else return (tmp); } return (res
); } struct vertex * lsa_tree_RB_NEXT(struct vertex *elm) { if
((elm)->entry.rbe_right) { elm = (elm)->entry.rbe_right
; while ((elm)->entry.rbe_left) elm = (elm)->entry.rbe_left
; } else { if ((elm)->entry.rbe_parent && (elm == (
(elm)->entry.rbe_parent)->entry.rbe_left)) elm = (elm)->
entry.rbe_parent; else { while ((elm)->entry.rbe_parent &&
(elm == ((elm)->entry.rbe_parent)->entry.rbe_right)) elm
= (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent
; } } return (elm); } struct vertex * lsa_tree_RB_PREV(struct
vertex *elm) { if ((elm)->entry.rbe_left) { elm = (elm)->
entry.rbe_left; while ((elm)->entry.rbe_right) elm = (elm)
->entry.rbe_right; } else { if ((elm)->entry.rbe_parent
&& (elm == ((elm)->entry.rbe_parent)->entry.rbe_right
)) elm = (elm)->entry.rbe_parent; else { while ((elm)->
entry.rbe_parent && (elm == ((elm)->entry.rbe_parent
)->entry.rbe_left)) elm = (elm)->entry.rbe_parent; elm =
(elm)->entry.rbe_parent; } } return (elm); } struct vertex
* lsa_tree_RB_MINMAX(struct lsa_tree *head, int val) { struct
vertex *tmp = (head)->rbh_root; struct vertex *parent = (
(void*)0); while (tmp) { parent = tmp; if (val < 0) tmp = (
tmp)->entry.rbe_left; else tmp = (tmp)->entry.rbe_right
; } return (parent); }
25
Loop condition is true. Entering loop body
26
Use of memory after it is freed
40
41void
42lsa_init(struct lsa_tree *t)
43{
44 RB_INIT(t)do { (t)->rbh_root = ((void*)0); } while (0);
45}
46
47int
48lsa_compare(struct vertex *a, struct vertex *b)
49{
50 if (a->type < b->type)
51 return (-1);
52 if (a->type > b->type)
53 return (1);
54 if (a->adv_rtr < b->adv_rtr)
55 return (-1);
56 if (a->adv_rtr > b->adv_rtr)
57 return (1);
58 if (a->ls_id < b->ls_id)
59 return (-1);
60 if (a->ls_id > b->ls_id)
61 return (1);
62 return (0);
63}
64
65
66struct vertex *
67vertex_get(struct lsa *lsa, struct rde_nbr *nbr, struct lsa_tree *tree)
68{
69 struct vertex *v;
70 struct timespec tp;
71
72 if ((v = calloc(1, sizeof(struct vertex))) == NULL((void*)0))
73 fatal(NULL((void*)0));
74 TAILQ_INIT(&v->nexthop)do { (&v->nexthop)->tqh_first = ((void*)0); (&v
->nexthop)->tqh_last = &(&v->nexthop)->tqh_first
; } while (0)
;
75 v->area = nbr->area;
76 v->peerid = nbr->peerid;
77 v->lsa = lsa;
78 clock_gettime(CLOCK_MONOTONIC3, &tp);
79 v->changed = v->stamp = tp.tv_sec;
80 v->cost = LS_INFINITY0xffffff;
81 v->ls_id = ntohl(lsa->hdr.ls_id)(__uint32_t)(__builtin_constant_p(lsa->hdr.ls_id) ? (__uint32_t
)(((__uint32_t)(lsa->hdr.ls_id) & 0xff) << 24 | (
(__uint32_t)(lsa->hdr.ls_id) & 0xff00) << 8 | ((
__uint32_t)(lsa->hdr.ls_id) & 0xff0000) >> 8 | (
(__uint32_t)(lsa->hdr.ls_id) & 0xff000000) >> 24
) : __swap32md(lsa->hdr.ls_id))
;
82 v->adv_rtr = ntohl(lsa->hdr.adv_rtr)(__uint32_t)(__builtin_constant_p(lsa->hdr.adv_rtr) ? (__uint32_t
)(((__uint32_t)(lsa->hdr.adv_rtr) & 0xff) << 24 |
((__uint32_t)(lsa->hdr.adv_rtr) & 0xff00) << 8 |
((__uint32_t)(lsa->hdr.adv_rtr) & 0xff0000) >> 8
| ((__uint32_t)(lsa->hdr.adv_rtr) & 0xff000000) >>
24) : __swap32md(lsa->hdr.adv_rtr))
;
83 v->type = lsa->hdr.type;
84 v->lsa_tree = tree;
85
86 if (!nbr->self)
87 v->flooded = 1; /* XXX fix me */
88 v->self = nbr->self;
89
90 evtimer_set(&v->ev, lsa_timeout, v)event_set(&v->ev, -1, 0, lsa_timeout, v);
91
92 return (v);
93}
94
95void
96vertex_free(struct vertex *v)
97{
98 RB_REMOVE(lsa_tree, v->lsa_tree, v)lsa_tree_RB_REMOVE(v->lsa_tree, v);
99
100 (void)evtimer_del(&v->ev)event_del(&v->ev);
101 vertex_nexthop_clear(v);
102 free(v->lsa);
103 free(v);
22
Memory is released
104}
105
106void
107vertex_nexthop_clear(struct vertex *v)
108{
109 struct v_nexthop *vn;
110
111 while ((vn = TAILQ_FIRST(&v->nexthop)((&v->nexthop)->tqh_first))) {
112 TAILQ_REMOVE(&v->nexthop, vn, entry)do { if (((vn)->entry.tqe_next) != ((void*)0)) (vn)->entry
.tqe_next->entry.tqe_prev = (vn)->entry.tqe_prev; else (
&v->nexthop)->tqh_last = (vn)->entry.tqe_prev; *
(vn)->entry.tqe_prev = (vn)->entry.tqe_next; ; ; } while
(0)
;
113 free(vn);
114 }
115}
116
117void
118vertex_nexthop_add(struct vertex *dst, struct vertex *parent, u_int32_t nexthop)
119{
120 struct v_nexthop *vn;
121
122 if ((vn = calloc(1, sizeof(*vn))) == NULL((void*)0))
123 fatal("vertex_nexthop_add");
124
125 vn->prev = parent;
126 vn->nexthop.s_addr = nexthop;
127
128 TAILQ_INSERT_TAIL(&dst->nexthop, vn, entry)do { (vn)->entry.tqe_next = ((void*)0); (vn)->entry.tqe_prev
= (&dst->nexthop)->tqh_last; *(&dst->nexthop
)->tqh_last = (vn); (&dst->nexthop)->tqh_last = &
(vn)->entry.tqe_next; } while (0)
;
129}
130
131/* returns -1 if a is older, 1 if newer and 0 if equal to b */
132int
133lsa_newer(struct lsa_hdr *a, struct lsa_hdr *b)
134{
135 int32_t a32, b32;
136 u_int16_t a16, b16;
137 int i;
138
139 if (a == NULL((void*)0))
140 return (-1);
141 if (b == NULL((void*)0))
142 return (1);
143
144 /*
145 * The sequence number is defined as signed 32-bit integer,
146 * no idea how IETF came up with such a stupid idea.
147 */
148 a32 = (int32_t)ntohl(a->seq_num)(__uint32_t)(__builtin_constant_p(a->seq_num) ? (__uint32_t
)(((__uint32_t)(a->seq_num) & 0xff) << 24 | ((__uint32_t
)(a->seq_num) & 0xff00) << 8 | ((__uint32_t)(a->
seq_num) & 0xff0000) >> 8 | ((__uint32_t)(a->seq_num
) & 0xff000000) >> 24) : __swap32md(a->seq_num))
;
149 b32 = (int32_t)ntohl(b->seq_num)(__uint32_t)(__builtin_constant_p(b->seq_num) ? (__uint32_t
)(((__uint32_t)(b->seq_num) & 0xff) << 24 | ((__uint32_t
)(b->seq_num) & 0xff00) << 8 | ((__uint32_t)(b->
seq_num) & 0xff0000) >> 8 | ((__uint32_t)(b->seq_num
) & 0xff000000) >> 24) : __swap32md(b->seq_num))
;
150
151 if (a32 > b32)
152 return (1);
153 if (a32 < b32)
154 return (-1);
155
156 a16 = ntohs(a->ls_chksum)(__uint16_t)(__builtin_constant_p(a->ls_chksum) ? (__uint16_t
)(((__uint16_t)(a->ls_chksum) & 0xffU) << 8 | ((
__uint16_t)(a->ls_chksum) & 0xff00U) >> 8) : __swap16md
(a->ls_chksum))
;
157 b16 = ntohs(b->ls_chksum)(__uint16_t)(__builtin_constant_p(b->ls_chksum) ? (__uint16_t
)(((__uint16_t)(b->ls_chksum) & 0xffU) << 8 | ((
__uint16_t)(b->ls_chksum) & 0xff00U) >> 8) : __swap16md
(b->ls_chksum))
;
158
159 if (a16 > b16)
160 return (1);
161 if (a16 < b16)
162 return (-1);
163
164 a16 = ntohs(a->age)(__uint16_t)(__builtin_constant_p(a->age) ? (__uint16_t)((
(__uint16_t)(a->age) & 0xffU) << 8 | ((__uint16_t
)(a->age) & 0xff00U) >> 8) : __swap16md(a->age
))
;
165 b16 = ntohs(b->age)(__uint16_t)(__builtin_constant_p(b->age) ? (__uint16_t)((
(__uint16_t)(b->age) & 0xffU) << 8 | ((__uint16_t
)(b->age) & 0xff00U) >> 8) : __swap16md(b->age
))
;
166
167 if (a16 >= MAX_AGE3600 && b16 >= MAX_AGE3600)
168 return (0);
169 if (b16 >= MAX_AGE3600)
170 return (-1);
171 if (a16 >= MAX_AGE3600)
172 return (1);
173
174 i = b16 - a16;
175 if (abs(i) > MAX_AGE_DIFF900)
176 return (i > 0 ? 1 : -1);
177
178 return (0);
179}
180
181int
182lsa_check(struct rde_nbr *nbr, struct lsa *lsa, u_int16_t len)
183{
184 struct area *area = nbr->area;
185 u_int32_t metric;
186
187 if (len < sizeof(lsa->hdr)) {
188 log_warnx("lsa_check: bad packet size");
189 return (0);
190 }
191 if (ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.len))
!= len) {
192 log_warnx("lsa_check: bad packet size");
193 return (0);
194 }
195
196 if (iso_cksum(lsa, len, 0)) {
197 log_warnx("lsa_check: bad packet checksum");
198 return (0);
199 }
200
201 /* invalid ages */
202 if ((ntohs(lsa->hdr.age)(__uint16_t)(__builtin_constant_p(lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.age) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.age) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.age))
< 1 && !nbr->self) ||
203 ntohs(lsa->hdr.age)(__uint16_t)(__builtin_constant_p(lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.age) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.age) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.age))
> MAX_AGE3600) {
204 log_warnx("lsa_check: bad age");
205 return (0);
206 }
207
208 /* invalid sequence number */
209 if (ntohl(lsa->hdr.seq_num)(__uint32_t)(__builtin_constant_p(lsa->hdr.seq_num) ? (__uint32_t
)(((__uint32_t)(lsa->hdr.seq_num) & 0xff) << 24 |
((__uint32_t)(lsa->hdr.seq_num) & 0xff00) << 8 |
((__uint32_t)(lsa->hdr.seq_num) & 0xff0000) >> 8
| ((__uint32_t)(lsa->hdr.seq_num) & 0xff000000) >>
24) : __swap32md(lsa->hdr.seq_num))
== RESV_SEQ_NUM0x80000000) {
210 log_warnx("ls_check: bad seq num");
211 return (0);
212 }
213
214 switch (lsa->hdr.type) {
215 case LSA_TYPE_ROUTER1:
216 if (!lsa_router_check(lsa, len))
217 return (0);
218 break;
219 case LSA_TYPE_NETWORK2:
220 if ((len % sizeof(u_int32_t)) ||
221 len < sizeof(lsa->hdr) + sizeof(u_int32_t)) {
222 log_warnx("lsa_check: bad LSA network packet");
223 return (0);
224 }
225 break;
226 case LSA_TYPE_SUM_NETWORK3:
227 case LSA_TYPE_SUM_ROUTER4:
228 if ((len % sizeof(u_int32_t)) ||
229 len < sizeof(lsa->hdr) + sizeof(lsa->data.sum)) {
230 log_warnx("lsa_check: bad LSA summary packet");
231 return (0);
232 }
233 metric = ntohl(lsa->data.sum.metric)(__uint32_t)(__builtin_constant_p(lsa->data.sum.metric) ? (
__uint32_t)(((__uint32_t)(lsa->data.sum.metric) & 0xff
) << 24 | ((__uint32_t)(lsa->data.sum.metric) & 0xff00
) << 8 | ((__uint32_t)(lsa->data.sum.metric) & 0xff0000
) >> 8 | ((__uint32_t)(lsa->data.sum.metric) & 0xff000000
) >> 24) : __swap32md(lsa->data.sum.metric))
;
234 if (metric & ~LSA_METRIC_MASK0x00ffffff) {
235 log_warnx("lsa_check: bad LSA summary metric");
236 return (0);
237 }
238 break;
239 case LSA_TYPE_EXTERNAL5:
240 if ((len % (3 * sizeof(u_int32_t))) ||
241 len < sizeof(lsa->hdr) + sizeof(lsa->data.asext)) {
242 log_warnx("lsa_check: bad LSA as-external packet");
243 return (0);
244 }
245 metric = ntohl(lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(lsa->data.asext.metric) ?
(__uint32_t)(((__uint32_t)(lsa->data.asext.metric) & 0xff
) << 24 | ((__uint32_t)(lsa->data.asext.metric) &
0xff00) << 8 | ((__uint32_t)(lsa->data.asext.metric
) & 0xff0000) >> 8 | ((__uint32_t)(lsa->data.asext
.metric) & 0xff000000) >> 24) : __swap32md(lsa->
data.asext.metric))
;
246 if (metric & ~(LSA_METRIC_MASK0x00ffffff | LSA_ASEXT_E_FLAG0x80000000)) {
247 log_warnx("lsa_check: bad LSA as-external metric");
248 return (0);
249 }
250 /* AS-external-LSA are silently discarded in stub areas */
251 if (area->stub)
252 return (0);
253 break;
254 case LSA_TYPE_LINK_OPAQ9:
255 case LSA_TYPE_AREA_OPAQ10:
256 case LSA_TYPE_AS_OPAQ11:
257 if (len % sizeof(u_int32_t)) {
258 log_warnx("lsa_check: bad opaque LSA packet");
259 return (0);
260 }
261 /* Type-11 Opaque-LSA are silently discarded in stub areas */
262 if (lsa->hdr.type == LSA_TYPE_AS_OPAQ11 && area->stub)
263 return (0);
264 break;
265 default:
266 log_warnx("lsa_check: unknown type %u", lsa->hdr.type);
267 return (0);
268 }
269
270 /* MaxAge handling */
271 if (lsa->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
&& !nbr->self && lsa_find(nbr->iface,
272 lsa->hdr.type, lsa->hdr.ls_id, lsa->hdr.adv_rtr) == NULL((void*)0) &&
273 !rde_nbr_loading(area)) {
274 /*
275 * if no neighbor in state Exchange or Loading
276 * ack LSA but don't add it. Needs to be a direct ack.
277 */
278 rde_imsg_compose_ospfe(IMSG_LS_ACK, nbr->peerid, 0, &lsa->hdr,
279 sizeof(struct lsa_hdr));
280 return (0);
281 }
282
283 return (1);
284}
285
286int
287lsa_router_check(struct lsa *lsa, u_int16_t len)
288{
289 struct lsa_rtr_link *rtr_link;
290 char *buf = (char *)lsa;
291 u_int16_t i, off, nlinks;
292
293 off = sizeof(lsa->hdr) + sizeof(struct lsa_rtr);
294 if (off > len) {
295 log_warnx("lsa_check: invalid LSA router packet");
296 return (0);
297 }
298
299 if (lsa->hdr.ls_id != lsa->hdr.adv_rtr) {
300 log_warnx("lsa_check: invalid LSA router packet, bad adv_rtr");
301 return (0);
302 }
303
304 nlinks = ntohs(lsa->data.rtr.nlinks)(__uint16_t)(__builtin_constant_p(lsa->data.rtr.nlinks) ? (
__uint16_t)(((__uint16_t)(lsa->data.rtr.nlinks) & 0xffU
) << 8 | ((__uint16_t)(lsa->data.rtr.nlinks) & 0xff00U
) >> 8) : __swap16md(lsa->data.rtr.nlinks))
;
305 if (nlinks == 0) {
306 log_warnx("lsa_check: invalid LSA router packet");
307 return (0);
308 }
309 for (i = 0; i < nlinks; i++) {
310 rtr_link = (struct lsa_rtr_link *)(buf + off);
311 off += sizeof(struct lsa_rtr_link);
312 if (off > len) {
313 log_warnx("lsa_check: invalid LSA router packet");
314 return (0);
315 }
316 off += rtr_link->num_tos * sizeof(u_int32_t);
317 if (off > len) {
318 log_warnx("lsa_check: invalid LSA router packet");
319 return (0);
320 }
321 }
322
323 if (i != nlinks) {
324 log_warnx("lsa_check: invalid LSA router packet");
325 return (0);
326 }
327 return (1);
328}
329
330int
331lsa_self(struct rde_nbr *nbr, struct lsa *new, struct vertex *v)
332{
333 struct iface *iface;
334 struct lsa *dummy;
335
336 if (nbr->self)
337 return (0);
338
339 if (rde_router_id() == new->hdr.adv_rtr)
340 goto self;
341
342 if (new->hdr.type == LSA_TYPE_NETWORK2)
343 LIST_FOREACH(iface, &nbr->area->iface_list, entry)for((iface) = ((&nbr->area->iface_list)->lh_first
); (iface)!= ((void*)0); (iface) = ((iface)->entry.le_next
))
344 if (iface->addr.s_addr == new->hdr.ls_id)
345 goto self;
346
347 return (0);
348
349self:
350 if (v == NULL((void*)0)) {
351 /*
352 * LSA is no longer announced, remove by premature aging.
353 * The problem is that new may not be altered so a copy
354 * needs to be added to the LSA DB first.
355 */
356 if ((dummy = malloc(ntohs(new->hdr.len)(__uint16_t)(__builtin_constant_p(new->hdr.len) ? (__uint16_t
)(((__uint16_t)(new->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(new->hdr.len) & 0xff00U) >> 8) : __swap16md
(new->hdr.len))
)) == NULL((void*)0))
357 fatal("lsa_self");
358 memcpy(dummy, new, ntohs(new->hdr.len)(__uint16_t)(__builtin_constant_p(new->hdr.len) ? (__uint16_t
)(((__uint16_t)(new->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(new->hdr.len) & 0xff00U) >> 8) : __swap16md
(new->hdr.len))
);
359 dummy->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
;
360 /*
361 * The clue is that by using the remote nbr as originator
362 * the dummy LSA will be reflooded via the default timeout
363 * handler.
364 */
365 (void)lsa_add(rde_nbr_self(nbr->area), dummy);
366 return (1);
367 }
368
369 /*
370 * LSA is still originated, just reflood it. But we need to create
371 * a new instance by setting the LSA sequence number equal to the
372 * one of new and calling lsa_refresh(). Flooding will be done by the
373 * caller.
374 */
375 v->lsa->hdr.seq_num = new->hdr.seq_num;
376 lsa_refresh(v);
377 return (1);
378}
379
380int
381lsa_add(struct rde_nbr *nbr, struct lsa *lsa)
382{
383 struct lsa_tree *tree;
384 struct vertex *new, *old;
385 struct timeval tv, now, res;
386 int update = 1;
387
388 if (lsa->hdr.type == LSA_TYPE_EXTERNAL5 ||
13
Assuming field 'type' is not equal to LSA_TYPE_EXTERNAL
15
Taking false branch
389 lsa->hdr.type == LSA_TYPE_AS_OPAQ11)
14
Assuming field 'type' is not equal to LSA_TYPE_AS_OPAQ
390 tree = &asext_tree;
391 else if (lsa->hdr.type == LSA_TYPE_LINK_OPAQ9)
16
Assuming field 'type' is not equal to LSA_TYPE_LINK_OPAQ
17
Taking false branch
392 tree = &nbr->iface->lsa_tree;
393 else
394 tree = &nbr->area->lsa_tree;
395
396 new = vertex_get(lsa, nbr, tree);
397 old = RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new);
398
399 if (old
17.1
'old' is not equal to NULL
!= NULL((void*)0)) {
18
Taking true branch
400 if (old->deleted && evtimer_pending(&old->ev, &tv)event_pending(&old->ev, 0x01, &tv)) {
19
Assuming field 'deleted' is 0
401 /* new update added before hold time expired */
402 gettimeofday(&now, NULL((void*)0));
403 timersub(&tv, &now, &res)do { (&res)->tv_sec = (&tv)->tv_sec - (&now
)->tv_sec; (&res)->tv_usec = (&tv)->tv_usec -
(&now)->tv_usec; if ((&res)->tv_usec < 0) {
(&res)->tv_sec--; (&res)->tv_usec += 1000000; }
} while (0)
;
404
405 /* remove old LSA and insert new LSA with delay */
406 vertex_free(old);
407 RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new);
408 new->deleted = 1;
409
410 if (evtimer_add(&new->ev, &res)event_add(&new->ev, &res) != 0)
411 fatal("lsa_add");
412 return (1);
413 }
414 if (lsa_equal(new->lsa, old->lsa))
20
Taking false branch
415 update = 0;
416 vertex_free(old);
21
Calling 'vertex_free'
23
Returning; memory was released via 1st parameter
417 RB_INSERT(lsa_tree, tree, new)lsa_tree_RB_INSERT(tree, new);
24
Calling 'lsa_tree_RB_INSERT'
418 }
419
420 if (update) {
421 if (lsa->hdr.type != LSA_TYPE_EXTERNAL5 &&
422 lsa->hdr.type != LSA_TYPE_AS_OPAQ11)
423 nbr->area->dirty = 1;
424 start_spf_timer();
425 }
426
427 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */
428 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
429
430 if (nbr->self && ntohs(new->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(new->lsa->hdr.age) ? (
__uint16_t)(((__uint16_t)(new->lsa->hdr.age) & 0xffU
) << 8 | ((__uint16_t)(new->lsa->hdr.age) & 0xff00U
) >> 8) : __swap16md(new->lsa->hdr.age))
== DEFAULT_AGE0)
431 tv.tv_sec = LS_REFRESH_TIME1800;
432 else
433 tv.tv_sec = MAX_AGE3600 - ntohs(new->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(new->lsa->hdr.age) ? (
__uint16_t)(((__uint16_t)(new->lsa->hdr.age) & 0xffU
) << 8 | ((__uint16_t)(new->lsa->hdr.age) & 0xff00U
) >> 8) : __swap16md(new->lsa->hdr.age))
;
434
435 if (evtimer_add(&new->ev, &tv)event_add(&new->ev, &tv) != 0)
436 fatal("lsa_add");
437 return (0);
438}
439
440void
441lsa_del(struct rde_nbr *nbr, struct lsa_hdr *lsa)
442{
443 struct vertex *v;
444 struct timeval tv;
445
446 v = lsa_find(nbr->iface, lsa->type, lsa->ls_id, lsa->adv_rtr);
447 if (v == NULL((void*)0))
448 return;
449
450 v->deleted = 1;
451 /* hold time to make sure that a new lsa is not added premature */
452 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
453 tv.tv_sec = MIN_LS_INTERVAL5;
454 if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1)
455 fatal("lsa_del");
456}
457
458void
459lsa_age(struct vertex *v)
460{
461 struct timespec tp;
462 time_t now;
463 int d;
464 u_int16_t age;
465
466 clock_gettime(CLOCK_MONOTONIC3, &tp);
467 now = tp.tv_sec;
468
469 d = now - v->stamp;
470 /* set stamp so that at least new calls work */
471 v->stamp = now;
472
473 if (d < 0) {
474 log_warnx("lsa_age: time went backwards");
475 return;
476 }
477
478 age = ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
;
479 if (age + d > MAX_AGE3600)
480 age = MAX_AGE3600;
481 else
482 age += d;
483
484 v->lsa->hdr.age = htons(age)(__uint16_t)(__builtin_constant_p(age) ? (__uint16_t)(((__uint16_t
)(age) & 0xffU) << 8 | ((__uint16_t)(age) & 0xff00U
) >> 8) : __swap16md(age))
;
485}
486
487struct vertex *
488lsa_find(struct iface *iface, u_int8_t type, u_int32_t ls_id, u_int32_t adv_rtr)
489{
490 struct lsa_tree *tree;
491
492 if (type == LSA_TYPE_EXTERNAL5 ||
493 type == LSA_TYPE_AS_OPAQ11)
494 tree = &asext_tree;
495 else if (type == LSA_TYPE_LINK_OPAQ9)
496 tree = &iface->lsa_tree;
497 else
498 tree = &iface->area->lsa_tree;
499
500 return lsa_find_tree(tree, type, ls_id, adv_rtr);
501}
502
503struct vertex *
504lsa_find_area(struct area *area, u_int8_t type, u_int32_t ls_id,
505 u_int32_t adv_rtr)
506{
507 return lsa_find_tree(&area->lsa_tree, type, ls_id, adv_rtr);
508}
509
510struct vertex *
511lsa_find_tree(struct lsa_tree *tree, u_int16_t type, u_int32_t ls_id,
512 u_int32_t adv_rtr)
513{
514 struct vertex key;
515 struct vertex *v;
516
517 key.ls_id = ntohl(ls_id)(__uint32_t)(__builtin_constant_p(ls_id) ? (__uint32_t)(((__uint32_t
)(ls_id) & 0xff) << 24 | ((__uint32_t)(ls_id) &
0xff00) << 8 | ((__uint32_t)(ls_id) & 0xff0000) >>
8 | ((__uint32_t)(ls_id) & 0xff000000) >> 24) : __swap32md
(ls_id))
;
518 key.adv_rtr = ntohl(adv_rtr)(__uint32_t)(__builtin_constant_p(adv_rtr) ? (__uint32_t)(((__uint32_t
)(adv_rtr) & 0xff) << 24 | ((__uint32_t)(adv_rtr) &
0xff00) << 8 | ((__uint32_t)(adv_rtr) & 0xff0000) >>
8 | ((__uint32_t)(adv_rtr) & 0xff000000) >> 24) : __swap32md
(adv_rtr))
;
519 key.type = type;
520
521 v = RB_FIND(lsa_tree, tree, &key)lsa_tree_RB_FIND(tree, &key);
522
523 /* LSA that are deleted are not findable */
524 if (v && v->deleted)
525 return (NULL((void*)0));
526
527 if (v)
528 lsa_age(v);
529
530 return (v);
531}
532
533struct vertex *
534lsa_find_net(struct area *area, u_int32_t ls_id)
535{
536 struct lsa_tree *tree = &area->lsa_tree;
537 struct vertex *v;
538
539 /* XXX speed me up */
540 RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); (
v) = lsa_tree_RB_NEXT(v))
{
541 if (v->lsa->hdr.type == LSA_TYPE_NETWORK2 &&
542 v->lsa->hdr.ls_id == ls_id) {
543 /* LSA that are deleted are not findable */
544 if (v->deleted)
545 return (NULL((void*)0));
546 lsa_age(v);
547 return (v);
548 }
549 }
550
551 return (NULL((void*)0));
552}
553
554u_int16_t
555lsa_num_links(struct vertex *v)
556{
557 switch (v->type) {
558 case LSA_TYPE_ROUTER1:
559 return (ntohs(v->lsa->data.rtr.nlinks)(__uint16_t)(__builtin_constant_p(v->lsa->data.rtr.nlinks
) ? (__uint16_t)(((__uint16_t)(v->lsa->data.rtr.nlinks)
& 0xffU) << 8 | ((__uint16_t)(v->lsa->data.rtr
.nlinks) & 0xff00U) >> 8) : __swap16md(v->lsa->
data.rtr.nlinks))
);
560 case LSA_TYPE_NETWORK2:
561 return ((ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
- sizeof(struct lsa_hdr)
562 - sizeof(u_int32_t)) / sizeof(struct lsa_net_link));
563 default:
564 fatalx("lsa_num_links: invalid LSA type");
565 }
566}
567
568void
569lsa_snap(struct rde_nbr *nbr)
570{
571 struct lsa_tree *tree = &nbr->area->lsa_tree;
572 struct vertex *v;
573
574 do {
575 RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); (
v) = lsa_tree_RB_NEXT(v))
{
576 if (v->deleted)
577 continue;
578 switch (v->type) {
579 case LSA_TYPE_LINK_OPAQ9:
580 case LSA_TYPE_AREA_OPAQ10:
581 case LSA_TYPE_AS_OPAQ11:
582 if (nbr->capa_options & OSPF_OPTION_O0x40)
583 break;
584 continue;
585 }
586 lsa_age(v);
587 if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
>= MAX_AGE3600)
588 rde_imsg_compose_ospfe(IMSG_LS_SNAP, nbr->peerid,
589 0, &v->lsa->hdr, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
590 else
591 rde_imsg_compose_ospfe(IMSG_DB_SNAPSHOT,
592 nbr->peerid, 0, &v->lsa->hdr,
593 sizeof(struct lsa_hdr));
594 }
595 if (tree == &asext_tree)
596 break;
597 if (tree == &nbr->area->lsa_tree)
598 tree = &nbr->iface->lsa_tree;
599 else if (nbr->area->stub)
600 break;
601 else
602 tree = &asext_tree;
603 } while (1);
604}
605
606void
607lsa_dump(struct lsa_tree *tree, int imsg_type, pid_t pid)
608{
609 struct vertex *v;
610
611 RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); (
v) = lsa_tree_RB_NEXT(v))
{
612 if (v->deleted)
613 continue;
614 lsa_age(v);
615 switch (imsg_type) {
616 case IMSG_CTL_SHOW_DATABASE:
617 break;
618 case IMSG_CTL_SHOW_DB_SELF:
619 if (v->lsa->hdr.adv_rtr == rde_router_id())
620 break;
621 continue;
622 case IMSG_CTL_SHOW_DB_EXT:
623 if (v->type == LSA_TYPE_EXTERNAL5)
624 break;
625 continue;
626 case IMSG_CTL_SHOW_DB_NET:
627 if (v->type == LSA_TYPE_NETWORK2)
628 break;
629 continue;
630 case IMSG_CTL_SHOW_DB_RTR:
631 if (v->type == LSA_TYPE_ROUTER1)
632 break;
633 continue;
634 case IMSG_CTL_SHOW_DB_SUM:
635 if (v->type == LSA_TYPE_SUM_NETWORK3)
636 break;
637 continue;
638 case IMSG_CTL_SHOW_DB_ASBR:
639 if (v->type == LSA_TYPE_SUM_ROUTER4)
640 break;
641 continue;
642 case IMSG_CTL_SHOW_DB_OPAQ:
643 if (v->type == LSA_TYPE_LINK_OPAQ9 ||
644 v->type == LSA_TYPE_AREA_OPAQ10 ||
645 v->type == LSA_TYPE_AS_OPAQ11)
646 break;
647 continue;
648 default:
649 log_warnx("lsa_dump: unknown imsg type");
650 return;
651 }
652 rde_imsg_compose_ospfe(imsg_type, 0, pid, &v->lsa->hdr,
653 ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
654 }
655}
656
657/* ARGSUSED */
658void
659lsa_timeout(int fd, short event, void *bula)
660{
661 struct vertex *v = bula;
662 struct timeval tv;
663
664 lsa_age(v);
665
666 if (v->deleted) {
667 if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
>= MAX_AGE3600) {
668 vertex_free(v);
669 } else {
670 v->deleted = 0;
671
672 /* schedule recalculation of the RIB */
673 if (v->type != LSA_TYPE_EXTERNAL5 &&
674 v->type != LSA_TYPE_AS_OPAQ11)
675 v->area->dirty = 1;
676 start_spf_timer();
677
678 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0,
679 v->lsa, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
680
681 /* timeout handling either MAX_AGE or LS_REFRESH_TIME */
682 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
683 if (v->self)
684 tv.tv_sec = LS_REFRESH_TIME1800;
685 else
686 tv.tv_sec = MAX_AGE3600 - ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
;
687
688 if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) != 0)
689 fatal("lsa_timeout");
690 }
691 return;
692 }
693
694 if (v->self && ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
< MAX_AGE3600)
695 lsa_refresh(v);
696
697 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, v->peerid, 0,
698 v->lsa, ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
);
699}
700
701void
702lsa_refresh(struct vertex *v)
703{
704 struct timeval tv;
705 struct timespec tp;
706 u_int32_t seqnum;
707 u_int16_t len;
708
709 /* refresh LSA by increasing sequence number by one */
710 if (v->self && ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
>= MAX_AGE3600)
711 /* self originated network that is currently beeing removed */
712 v->lsa->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
;
713 else
714 v->lsa->hdr.age = htons(DEFAULT_AGE)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
715 seqnum = ntohl(v->lsa->hdr.seq_num)(__uint32_t)(__builtin_constant_p(v->lsa->hdr.seq_num) ?
(__uint32_t)(((__uint32_t)(v->lsa->hdr.seq_num) & 0xff
) << 24 | ((__uint32_t)(v->lsa->hdr.seq_num) &
0xff00) << 8 | ((__uint32_t)(v->lsa->hdr.seq_num
) & 0xff0000) >> 8 | ((__uint32_t)(v->lsa->hdr
.seq_num) & 0xff000000) >> 24) : __swap32md(v->lsa
->hdr.seq_num))
;
716 if (seqnum++ == MAX_SEQ_NUM0x7fffffff)
717 /* XXX fix me */
718 fatalx("sequence number wrapping");
719 v->lsa->hdr.seq_num = htonl(seqnum)(__uint32_t)(__builtin_constant_p(seqnum) ? (__uint32_t)(((__uint32_t
)(seqnum) & 0xff) << 24 | ((__uint32_t)(seqnum) &
0xff00) << 8 | ((__uint32_t)(seqnum) & 0xff0000) >>
8 | ((__uint32_t)(seqnum) & 0xff000000) >> 24) : __swap32md
(seqnum))
;
720
721 /* recalculate checksum */
722 len = ntohs(v->lsa->hdr.len)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.len) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.len) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.len))
;
723 v->lsa->hdr.ls_chksum = 0;
724 v->lsa->hdr.ls_chksum = htons(iso_cksum(v->lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(v->lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(v->lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))) & 0xffU) << 8 | ((__uint16_t)(iso_cksum(v->lsa
, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) & 0xff00U
) >> 8) : __swap16md(iso_cksum(v->lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))))
;
725
726 clock_gettime(CLOCK_MONOTONIC3, &tp);
727 v->changed = v->stamp = tp.tv_sec;
728
729 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
730 tv.tv_sec = LS_REFRESH_TIME1800;
731 if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1)
732 fatal("lsa_refresh");
733}
734
735void
736lsa_merge(struct rde_nbr *nbr, struct lsa *lsa, struct vertex *v)
737{
738 struct timeval tv;
739 struct timespec tp;
740 time_t now;
741 u_int16_t len;
742
743 if (v
10.1
'v' is equal to NULL
== NULL((void*)0)) {
11
Taking true branch
744 if (lsa_add(nbr, lsa))
12
Calling 'lsa_add'
745 /* delayed update */
746 return;
747 rde_imsg_compose_ospfe(IMSG_LS_FLOOD, nbr->peerid, 0,
748 lsa, ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.len))
);
749 return;
750 }
751
752 /* set the seq_num to the current one. lsa_refresh() will do the ++ */
753 lsa->hdr.seq_num = v->lsa->hdr.seq_num;
754 /* recalculate checksum */
755 len = ntohs(lsa->hdr.len)(__uint16_t)(__builtin_constant_p(lsa->hdr.len) ? (__uint16_t
)(((__uint16_t)(lsa->hdr.len) & 0xffU) << 8 | ((
__uint16_t)(lsa->hdr.len) & 0xff00U) >> 8) : __swap16md
(lsa->hdr.len))
;
756 lsa->hdr.ls_chksum = 0;
757 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET))(__uint16_t)(__builtin_constant_p(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) ? (__uint16_t)(((__uint16_t)(iso_cksum
(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum))) &
0xffU) << 8 | ((__uint16_t)(iso_cksum(lsa, len, __builtin_offsetof
(struct lsa_hdr, ls_chksum))) & 0xff00U) >> 8) : __swap16md
(iso_cksum(lsa, len, __builtin_offsetof(struct lsa_hdr, ls_chksum
))))
;
758
759 /* compare LSA most header fields are equal so don't check them */
760 if (lsa_equal(lsa, v->lsa)) {
761 free(lsa);
762 return;
763 }
764
765 /* overwrite the lsa all other fields are unaffected */
766 free(v->lsa);
767 v->lsa = lsa;
768 start_spf_timer();
769 if (v->type != LSA_TYPE_EXTERNAL5 &&
770 v->type != LSA_TYPE_AS_OPAQ11)
771 nbr->area->dirty = 1;
772
773 /* set correct timeout for reflooding the LSA */
774 clock_gettime(CLOCK_MONOTONIC3, &tp);
775 now = tp.tv_sec;
776 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
777 if (v->changed + MIN_LS_INTERVAL5 >= now)
778 tv.tv_sec = MIN_LS_INTERVAL5;
779 if (evtimer_add(&v->ev, &tv)event_add(&v->ev, &tv) == -1)
780 fatal("lsa_merge");
781}
782
783void
784lsa_remove_invalid_sums(struct area *area)
785{
786 struct lsa_tree *tree = &area->lsa_tree;
787 struct vertex *v, *nv;
788
789 /* XXX speed me up */
790 for (v = RB_MIN(lsa_tree, tree)lsa_tree_RB_MINMAX(tree, -1); v != NULL((void*)0); v = nv) {
791 nv = RB_NEXT(lsa_tree, tree, v)lsa_tree_RB_NEXT(v);
792 if ((v->type == LSA_TYPE_SUM_NETWORK3 ||
793 v->type == LSA_TYPE_SUM_ROUTER4) &&
794 v->self && v->cost == LS_INFINITY0xffffff &&
795 v->deleted == 0) {
796 /*
797 * age the lsa and call lsa_timeout() which will
798 * actually remove it from the database.
799 */
800 v->lsa->hdr.age = htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
;
801 lsa_timeout(0, 0, v);
802 }
803 }
804}
805
806void
807lsa_generate_stub_sums(struct area *area)
808{
809 struct rt_node rn;
810 struct redistribute *r;
811 struct vertex *v;
812 struct lsa *lsa;
813 struct area *back;
814
815 if (!area->stub)
1
Assuming field 'stub' is not equal to 0
2
Taking false branch
816 return;
817
818 back = rde_backbone_area();
819 if (!back || !back->active)
3
Assuming 'back' is non-null
4
Assuming field 'active' is not equal to 0
5
Taking false branch
820 return;
821
822 SIMPLEQ_FOREACH(r, &area->redist_list, entry)for((r) = ((&area->redist_list)->sqh_first); (r) !=
((void*)0); (r) = ((r)->entry.sqe_next))
{
6
Assuming 'r' is not equal to null
7
Loop condition is true. Entering loop body
823 bzero(&rn, sizeof(rn));
824 if (r->type == REDIST_DEFAULT0x20) {
8
Assuming field 'type' is equal to REDIST_DEFAULT
9
Taking true branch
825 /* setup fake rt_node */
826 rn.prefixlen = 0;
827 rn.prefix.s_addr = INADDR_ANY((u_int32_t)(0x00000000));
828 rn.cost = r->metric & LSA_METRIC_MASK0x00ffffff;
829
830 /* update lsa but only if it was changed */
831 v = lsa_find_area(area, LSA_TYPE_SUM_NETWORK3,
832 rn.prefix.s_addr, rde_router_id());
833 lsa = orig_sum_lsa(&rn, area, LSA_TYPE_SUM_NETWORK3, 0);
834 lsa_merge(rde_nbr_self(area), lsa, v);
10
Calling 'lsa_merge'
835
836 if (v == NULL((void*)0))
837 v = lsa_find_area(area, LSA_TYPE_SUM_NETWORK3,
838 rn.prefix.s_addr, rde_router_id());
839
840 /*
841 * suppressed/deleted routes are not found in the
842 * second lsa_find
843 */
844 if (v)
845 v->cost = rn.cost;
846 return;
847 } else if (r->type == (REDIST_DEFAULT0x20 | REDIST_NO0x10))
848 return;
849 }
850}
851
852int
853lsa_equal(struct lsa *a, struct lsa *b)
854{
855 /*
856 * compare LSA that already have same type, adv_rtr and ls_id
857 * so not all header need to be compared
858 */
859 if (a == NULL((void*)0) || b == NULL((void*)0))
860 return (0);
861 if (a->hdr.len != b->hdr.len)
862 return (0);
863 if (a->hdr.opts != b->hdr.opts)
864 return (0);
865 /* LSAs with age MAX_AGE are never equal */
866 if (a->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
|| b->hdr.age == htons(MAX_AGE)(__uint16_t)(__builtin_constant_p(3600) ? (__uint16_t)(((__uint16_t
)(3600) & 0xffU) << 8 | ((__uint16_t)(3600) & 0xff00U
) >> 8) : __swap16md(3600))
)
867 return (0);
868 if (memcmp(&a->data, &b->data, ntohs(a->hdr.len)(__uint16_t)(__builtin_constant_p(a->hdr.len) ? (__uint16_t
)(((__uint16_t)(a->hdr.len) & 0xffU) << 8 | ((__uint16_t
)(a->hdr.len) & 0xff00U) >> 8) : __swap16md(a->
hdr.len))
-
869 sizeof(struct lsa_hdr)))
870 return (0);
871
872 return (1);
873}