Bug Summary

File:src/usr.sbin/ospfd/rde_spf.c
Warning:line 398, column 12
Access to field 'type' results in a dereference of a null pointer (loaded from variable 'rtr_link')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name rde_spf.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/ospfd/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/usr.sbin/ospfd -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/ospfd/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/ospfd/rde_spf.c
1/* $OpenBSD: rde_spf.c,v 1.78 2019/11/19 09:55:55 remi Exp $ */
2
3/*
4 * Copyright (c) 2005 Esben Norby <norby@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/types.h>
20#include <sys/socket.h>
21#include <netinet/in.h>
22#include <arpa/inet.h>
23#include <err.h>
24#include <stdlib.h>
25#include <string.h>
26
27#include "ospfd.h"
28#include "ospf.h"
29#include "log.h"
30#include "rde.h"
31
32extern struct ospfd_conf *rdeconf;
33TAILQ_HEAD(, vertex)struct { struct vertex *tqh_first; struct vertex **tqh_last; } cand_list;
34RB_HEAD(rt_tree, rt_node)struct rt_tree { struct rt_node *rbh_root; } rt;
35RB_PROTOTYPE(rt_tree, rt_node, entry, rt_compare)void rt_tree_RB_INSERT_COLOR(struct rt_tree *, struct rt_node
*); void rt_tree_RB_REMOVE_COLOR(struct rt_tree *, struct rt_node
*, struct rt_node *); struct rt_node *rt_tree_RB_REMOVE(struct
rt_tree *, struct rt_node *); struct rt_node *rt_tree_RB_INSERT
(struct rt_tree *, struct rt_node *); struct rt_node *rt_tree_RB_FIND
(struct rt_tree *, struct rt_node *); struct rt_node *rt_tree_RB_NFIND
(struct rt_tree *, struct rt_node *); struct rt_node *rt_tree_RB_NEXT
(struct rt_node *); struct rt_node *rt_tree_RB_PREV(struct rt_node
*); struct rt_node *rt_tree_RB_MINMAX(struct rt_tree *, int)
;
36RB_GENERATE(rt_tree, rt_node, entry, rt_compare)void rt_tree_RB_INSERT_COLOR(struct rt_tree *head, struct rt_node
*elm) { struct rt_node *parent, *gparent, *tmp; while ((parent
= (elm)->entry.rbe_parent) && (parent)->entry.
rbe_color == 1) { gparent = (parent)->entry.rbe_parent; if
(parent == (gparent)->entry.rbe_left) { tmp = (gparent)->
entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } void rt_tree_RB_REMOVE_COLOR(struct rt_tree
*head, struct rt_node *parent, struct rt_node *elm) { struct
rt_node *tmp; while ((elm == ((void*)0) || (elm)->entry.rbe_color
== 0) && elm != (head)->rbh_root) { if ((parent)->
entry.rbe_left == elm) { tmp = (parent)->entry.rbe_right; if
((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color
= 0; (parent)->entry.rbe_color = 1; } while (0); do { (tmp
) = (parent)->entry.rbe_right; if (((parent)->entry.rbe_right
= (tmp)->entry.rbe_left)) { ((tmp)->entry.rbe_left)->
entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->
entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent
) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_left = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
tmp = (parent)->entry.rbe_right; } if (((tmp)->entry.rbe_left
== ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color
== 0) && ((tmp)->entry.rbe_right == ((void*)0) ||
((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp
)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry
.rbe_parent; } else { if ((tmp)->entry.rbe_right == ((void
*)0) || ((tmp)->entry.rbe_right)->entry.rbe_color == 0)
{ struct rt_node *oleft; if ((oleft = (tmp)->entry.rbe_left
)) (oleft)->entry.rbe_color = 0; (tmp)->entry.rbe_color
= 1; do { (oleft) = (tmp)->entry.rbe_left; if (((tmp)->
entry.rbe_left = (oleft)->entry.rbe_right)) { ((oleft)->
entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while (
0); if (((oleft)->entry.rbe_parent = (tmp)->entry.rbe_parent
)) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left
) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oleft); else
((tmp)->entry.rbe_parent)->entry.rbe_right = (oleft); }
else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right
= (tmp); (tmp)->entry.rbe_parent = (oleft); do {} while (
0); if (((oleft)->entry.rbe_parent)) do {} while (0); } while
(0); tmp = (parent)->entry.rbe_right; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_right) ((tmp)->entry.rbe_right
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)->
entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (
parent)->entry.rbe_color = 1; } while (0); do { (tmp) = (parent
)->entry.rbe_left; if (((parent)->entry.rbe_left = (tmp
)->entry.rbe_right)) { ((tmp)->entry.rbe_right)->entry
.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry
.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) ==
((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
tmp = (parent)->entry.rbe_left; } if (((tmp)->entry.rbe_left
== ((void*)0) || ((tmp)->entry.rbe_left)->entry.rbe_color
== 0) && ((tmp)->entry.rbe_right == ((void*)0) ||
((tmp)->entry.rbe_right)->entry.rbe_color == 0)) { (tmp
)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry
.rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void*
)0) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) {
struct rt_node *oright; if ((oright = (tmp)->entry.rbe_right
)) (oright)->entry.rbe_color = 0; (tmp)->entry.rbe_color
= 1; do { (oright) = (tmp)->entry.rbe_right; if (((tmp)->
entry.rbe_right = (oright)->entry.rbe_left)) { ((oright)->
entry.rbe_left)->entry.rbe_parent = (tmp); } do {} while (
0); if (((oright)->entry.rbe_parent = (tmp)->entry.rbe_parent
)) { if ((tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left
) ((tmp)->entry.rbe_parent)->entry.rbe_left = (oright);
else ((tmp)->entry.rbe_parent)->entry.rbe_right = (oright
); } else (head)->rbh_root = (oright); (oright)->entry.
rbe_left = (tmp); (tmp)->entry.rbe_parent = (oright); do {
} while (0); if (((oright)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = (parent)->entry.rbe_left; } (tmp)
->entry.rbe_color = (parent)->entry.rbe_color; (parent)
->entry.rbe_color = 0; if ((tmp)->entry.rbe_left) ((tmp
)->entry.rbe_left)->entry.rbe_color = 0; do { (tmp) = (
parent)->entry.rbe_left; if (((parent)->entry.rbe_left =
(tmp)->entry.rbe_right)) { ((tmp)->entry.rbe_right)->
entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->
entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent
) == ((parent)->entry.rbe_parent)->entry.rbe_left) ((parent
)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent
)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (
head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent
); (parent)->entry.rbe_parent = (tmp); do {} while (0); if
(((tmp)->entry.rbe_parent)) do {} while (0); } while (0);
elm = (head)->rbh_root; break; } } } if (elm) (elm)->entry
.rbe_color = 0; } struct rt_node * rt_tree_RB_REMOVE(struct rt_tree
*head, struct rt_node *elm) { struct rt_node *child, *parent
, *old = elm; int color; if ((elm)->entry.rbe_left == ((void
*)0)) child = (elm)->entry.rbe_right; else if ((elm)->entry
.rbe_right == ((void*)0)) child = (elm)->entry.rbe_left; else
{ struct rt_node *left; elm = (elm)->entry.rbe_right; while
((left = (elm)->entry.rbe_left)) elm = left; child = (elm
)->entry.rbe_right; parent = (elm)->entry.rbe_parent; color
= (elm)->entry.rbe_color; if (child) (child)->entry.rbe_parent
= parent; if (parent) { if ((parent)->entry.rbe_left == elm
) (parent)->entry.rbe_left = child; else (parent)->entry
.rbe_right = child; do {} while (0); } else (head)->rbh_root
= child; if ((elm)->entry.rbe_parent == old) parent = elm
; (elm)->entry = (old)->entry; if ((old)->entry.rbe_parent
) { if (((old)->entry.rbe_parent)->entry.rbe_left == old
) ((old)->entry.rbe_parent)->entry.rbe_left = elm; else
((old)->entry.rbe_parent)->entry.rbe_right = elm; do {
} while (0); } else (head)->rbh_root = elm; ((old)->entry
.rbe_left)->entry.rbe_parent = elm; if ((old)->entry.rbe_right
) ((old)->entry.rbe_right)->entry.rbe_parent = elm; if (
parent) { left = parent; do { do {} while (0); } while ((left
= (left)->entry.rbe_parent)); } goto color; } parent = (elm
)->entry.rbe_parent; color = (elm)->entry.rbe_color; if
(child) (child)->entry.rbe_parent = parent; if (parent) {
if ((parent)->entry.rbe_left == elm) (parent)->entry.rbe_left
= child; else (parent)->entry.rbe_right = child; do {} while
(0); } else (head)->rbh_root = child; color: if (color ==
0) rt_tree_RB_REMOVE_COLOR(head, parent, child); return (old
); } struct rt_node * rt_tree_RB_INSERT(struct rt_tree *head,
struct rt_node *elm) { struct rt_node *tmp; struct rt_node *
parent = ((void*)0); int comp = 0; tmp = (head)->rbh_root;
while (tmp) { parent = tmp; comp = (rt_compare)(elm, parent)
; if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp
> 0) tmp = (tmp)->entry.rbe_right; else return (tmp); }
do { (elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left
= (elm)->entry.rbe_right = ((void*)0); (elm)->entry.rbe_color
= 1; } while (0); if (parent != ((void*)0)) { if (comp < 0
) (parent)->entry.rbe_left = elm; else (parent)->entry.
rbe_right = elm; do {} while (0); } else (head)->rbh_root =
elm; rt_tree_RB_INSERT_COLOR(head, elm); return (((void*)0))
; } struct rt_node * rt_tree_RB_FIND(struct rt_tree *head, struct
rt_node *elm) { struct rt_node *tmp = (head)->rbh_root; int
comp; while (tmp) { comp = rt_compare(elm, tmp); if (comp <
0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp
= (tmp)->entry.rbe_right; else return (tmp); } return (((
void*)0)); } struct rt_node * rt_tree_RB_NFIND(struct rt_tree
*head, struct rt_node *elm) { struct rt_node *tmp = (head)->
rbh_root; struct rt_node *res = ((void*)0); int comp; while (
tmp) { comp = rt_compare(elm, tmp); if (comp < 0) { res = tmp
; tmp = (tmp)->entry.rbe_left; } else if (comp > 0) tmp
= (tmp)->entry.rbe_right; else return (tmp); } return (res
); } struct rt_node * rt_tree_RB_NEXT(struct rt_node *elm) { if
((elm)->entry.rbe_right) { elm = (elm)->entry.rbe_right
; while ((elm)->entry.rbe_left) elm = (elm)->entry.rbe_left
; } else { if ((elm)->entry.rbe_parent && (elm == (
(elm)->entry.rbe_parent)->entry.rbe_left)) elm = (elm)->
entry.rbe_parent; else { while ((elm)->entry.rbe_parent &&
(elm == ((elm)->entry.rbe_parent)->entry.rbe_right)) elm
= (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent
; } } return (elm); } struct rt_node * rt_tree_RB_PREV(struct
rt_node *elm) { if ((elm)->entry.rbe_left) { elm = (elm)->
entry.rbe_left; while ((elm)->entry.rbe_right) elm = (elm)
->entry.rbe_right; } else { if ((elm)->entry.rbe_parent
&& (elm == ((elm)->entry.rbe_parent)->entry.rbe_right
)) elm = (elm)->entry.rbe_parent; else { while ((elm)->
entry.rbe_parent && (elm == ((elm)->entry.rbe_parent
)->entry.rbe_left)) elm = (elm)->entry.rbe_parent; elm =
(elm)->entry.rbe_parent; } } return (elm); } struct rt_node
* rt_tree_RB_MINMAX(struct rt_tree *head, int val) { struct rt_node
*tmp = (head)->rbh_root; struct rt_node *parent = ((void*
)0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->
entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return
(parent); }
37struct vertex *spf_root = NULL((void*)0);
38
39void calc_nexthop(struct vertex *, struct vertex *,
40 struct area *, struct lsa_rtr_link *);
41void rt_nexthop_clear(struct rt_node *);
42void rt_nexthop_add(struct rt_node *, struct v_nexthead *, u_int8_t,
43 struct in_addr);
44void rt_update(struct in_addr, u_int8_t, struct v_nexthead *, u_int8_t,
45 u_int32_t, u_int32_t, struct in_addr, struct in_addr,
46 enum path_type, enum dst_type, u_int8_t, u_int32_t);
47void rt_invalidate(struct area *);
48struct lsa_rtr_link *get_rtr_link(struct vertex *, int);
49struct lsa_net_link *get_net_link(struct vertex *, int);
50int linked(struct vertex *, struct vertex *);
51
52void
53spf_calc(struct area *area)
54{
55 struct vertex *v, *w;
56 struct lsa_rtr_link *rtr_link = NULL((void*)0);
7
'rtr_link' initialized to a null pointer value
57 struct lsa_net_link *net_link;
58 u_int32_t d;
59 int i;
60 struct in_addr addr;
61
62 /* clear SPF tree */
63 spf_tree_clr(area);
64 cand_list_clr();
65
66 /* initialize SPF tree */
67 if ((v = spf_root = lsa_find_area(area, LSA_TYPE_ROUTER1,
8
Assuming the condition is false
9
Taking false branch
68 rde_router_id(), rde_router_id())) == NULL((void*)0)) {
69 /* empty area because no interface is active */
70 return;
71 }
72
73 area->transit = 0;
74 spf_root->cost = 0;
75 w = NULL((void*)0);
76
77 /* make sure the spf root has a nexthop */
78 vertex_nexthop_clear(spf_root);
79 vertex_nexthop_add(spf_root, spf_root, 0);
80
81 /* calculate SPF tree */
82 do {
83 /* loop links */
84 for (i = 0; i < lsa_num_links(v); i++) {
10
Assuming the condition is true
11
Loop condition is true. Entering loop body
85 switch (v->type) {
12
Control jumps to 'case 2:' at line 106
86 case LSA_TYPE_ROUTER1:
87 rtr_link = get_rtr_link(v, i);
88 switch (rtr_link->type) {
89 case LINK_TYPE_STUB_NET3:
90 /* skip */
91 continue;
92 case LINK_TYPE_POINTTOPOINT1:
93 case LINK_TYPE_VIRTUAL4:
94 /* find router LSA */
95 w = lsa_find_area(area, LSA_TYPE_ROUTER1,
96 rtr_link->id, rtr_link->id);
97 break;
98 case LINK_TYPE_TRANSIT_NET2:
99 /* find network LSA */
100 w = lsa_find_net(area, rtr_link->id);
101 break;
102 default:
103 fatalx("spf_calc: invalid link type");
104 }
105 break;
106 case LSA_TYPE_NETWORK2:
107 net_link = get_net_link(v, i);
108 /* find router LSA */
109 w = lsa_find_area(area, LSA_TYPE_ROUTER1,
110 net_link->att_rtr, net_link->att_rtr);
111 break;
13
Execution continues on line 116
112 default:
113 fatalx("spf_calc: invalid LSA type");
114 }
115
116 if (w == NULL((void*)0))
14
Assuming 'w' is not equal to NULL
15
Taking false branch
117 continue;
118
119 if (w->lsa->hdr.age == MAX_AGE3600)
16
Assuming field 'age' is not equal to MAX_AGE
17
Taking false branch
120 continue;
121
122 if (!linked(w, v)) {
18
Calling 'linked'
27
Returning from 'linked'
28
Taking false branch
123 addr.s_addr = htonl(w->ls_id)(__uint32_t)(__builtin_constant_p(w->ls_id) ? (__uint32_t)
(((__uint32_t)(w->ls_id) & 0xff) << 24 | ((__uint32_t
)(w->ls_id) & 0xff00) << 8 | ((__uint32_t)(w->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(w->ls_id
) & 0xff000000) >> 24) : __swap32md(w->ls_id))
;
124 log_debug("spf_calc: w id %s type %d has ",
125 inet_ntoa(addr), w->type);
126 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
;
127 log_debug(" no link to v id %s type %d",
128 inet_ntoa(addr), v->type);
129 continue;
130 }
131
132 if (v->type
28.1
Field 'type' is not equal to LSA_TYPE_ROUTER
== LSA_TYPE_ROUTER1)
29
Taking false branch
133 d = v->cost + ntohs(rtr_link->metric)(__uint16_t)(__builtin_constant_p(rtr_link->metric) ? (__uint16_t
)(((__uint16_t)(rtr_link->metric) & 0xffU) << 8 |
((__uint16_t)(rtr_link->metric) & 0xff00U) >> 8
) : __swap16md(rtr_link->metric))
;
134 else
135 d = v->cost;
136
137 if (cand_list_present(w)) {
30
Assuming the condition is false
31
Taking false branch
138 if (d > w->cost)
139 continue;
140 if (d < w->cost) {
141 w->cost = d;
142 vertex_nexthop_clear(w);
143 calc_nexthop(w, v, area, rtr_link);
144 /*
145 * need to readd to candidate list
146 * because the list is sorted
147 */
148 TAILQ_REMOVE(&cand_list, w, cand)do { if (((w)->cand.tqe_next) != ((void*)0)) (w)->cand.
tqe_next->cand.tqe_prev = (w)->cand.tqe_prev; else (&
cand_list)->tqh_last = (w)->cand.tqe_prev; *(w)->cand
.tqe_prev = (w)->cand.tqe_next; ; ; } while (0)
;
149 cand_list_add(w);
150 } else
151 /* equal cost path */
152 calc_nexthop(w, v, area, rtr_link);
153 } else if (w->cost == LS_INFINITY0xffffff && d < LS_INFINITY0xffffff) {
32
Assuming field 'cost' is equal to LS_INFINITY
33
Assuming 'd' is < LS_INFINITY
34
Taking true branch
154 w->cost = d;
155
156 vertex_nexthop_clear(w);
157 calc_nexthop(w, v, area, rtr_link);
35
Passing null pointer value via 4th parameter 'rtr_link'
36
Calling 'calc_nexthop'
158 cand_list_add(w);
159 }
160 }
161
162 /* get next vertex */
163 v = cand_list_pop();
164 w = NULL((void*)0);
165 } while (v != NULL((void*)0));
166
167 /* spf_dump(area); */
168 log_debug("spf_calc: area %s calculated", inet_ntoa(area->id));
169
170 area->num_spf_calc++;
171 start_spf_timer();
172}
173
174void
175rt_calc(struct vertex *v, struct area *area, struct ospfd_conf *conf)
176{
177 struct vertex *w;
178 struct v_nexthop *vn;
179 struct lsa_rtr_link *rtr_link = NULL((void*)0);
180 int i;
181 struct in_addr addr, adv_rtr;
182
183 lsa_age(v);
184 if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
== MAX_AGE3600)
185 return;
186
187 switch (v->type) {
188 case LSA_TYPE_ROUTER1:
189 /* stub networks */
190 if (v->cost >= LS_INFINITY0xffffff)
191 return;
192
193 for (i = 0; i < lsa_num_links(v); i++) {
194 rtr_link = get_rtr_link(v, i);
195 if (rtr_link->type != LINK_TYPE_STUB_NET3)
196 continue;
197
198 addr.s_addr = rtr_link->id & rtr_link->data;
199 adv_rtr.s_addr = htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
;
200
201 rt_update(addr, mask2prefixlen(rtr_link->data),
202 &v->nexthop, v->type,
203 v->cost + ntohs(rtr_link->metric)(__uint16_t)(__builtin_constant_p(rtr_link->metric) ? (__uint16_t
)(((__uint16_t)(rtr_link->metric) & 0xffU) << 8 |
((__uint16_t)(rtr_link->metric) & 0xff00U) >> 8
) : __swap16md(rtr_link->metric))
, 0,
204 area->id, adv_rtr, PT_INTRA_AREA, DT_NET,
205 v->lsa->data.rtr.flags, 0);
206 }
207
208 /* router, only add border and as-external routers */
209 if ((v->lsa->data.rtr.flags & (OSPF_RTR_B0x01 | OSPF_RTR_E0x02)) == 0)
210 return;
211
212 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
;
213 adv_rtr.s_addr = htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
;
214
215 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0, area->id,
216 adv_rtr, PT_INTRA_AREA, DT_RTR, v->lsa->data.rtr.flags, 0);
217 break;
218 case LSA_TYPE_NETWORK2:
219 if (v->cost >= LS_INFINITY0xffffff)
220 return;
221
222 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
& v->lsa->data.net.mask;
223 adv_rtr.s_addr = htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
;
224 rt_update(addr, mask2prefixlen(v->lsa->data.net.mask),
225 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr,
226 PT_INTRA_AREA, DT_NET, 0, 0);
227 break;
228 case LSA_TYPE_SUM_NETWORK3:
229 case LSA_TYPE_SUM_ROUTER4:
230 /* if ABR only look at area 0.0.0.0 LSA */
231 if (area_border_router(conf) && area->id.s_addr != INADDR_ANY((u_int32_t)(0x00000000)))
232 return;
233
234 /* ignore self-originated stuff */
235 if (v->self)
236 return;
237
238 /* TODO type 3 area address range check */
239
240 if ((w = lsa_find_area(area, LSA_TYPE_ROUTER1,
241 htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
,
242 htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
)) == NULL((void*)0))
243 return;
244
245 /* copy nexthops */
246 vertex_nexthop_clear(v); /* XXX needed ??? */
247 TAILQ_FOREACH(vn, &w->nexthop, entry)for((vn) = ((&w->nexthop)->tqh_first); (vn) != ((void
*)0); (vn) = ((vn)->entry.tqe_next))
248 vertex_nexthop_add(v, w, vn->nexthop.s_addr);
249
250 v->cost = w->cost +
251 (ntohl(v->lsa->data.sum.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.sum.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.sum.metric)
& 0xff) << 24 | ((__uint32_t)(v->lsa->data.sum
.metric) & 0xff00) << 8 | ((__uint32_t)(v->lsa->
data.sum.metric) & 0xff0000) >> 8 | ((__uint32_t)(v
->lsa->data.sum.metric) & 0xff000000) >> 24) :
__swap32md(v->lsa->data.sum.metric))
& LSA_METRIC_MASK0x00ffffff);
252
253 if (v->cost >= LS_INFINITY0xffffff)
254 return;
255
256 adv_rtr.s_addr = htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
;
257 if (v->type == LSA_TYPE_SUM_NETWORK3) {
258 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
& v->lsa->data.sum.mask;
259 rt_update(addr, mask2prefixlen(v->lsa->data.sum.mask),
260 &v->nexthop, v->type, v->cost, 0, area->id, adv_rtr,
261 PT_INTER_AREA, DT_NET, 0, 0);
262 } else {
263 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
;
264 rt_update(addr, 32, &v->nexthop, v->type, v->cost, 0,
265 area->id, adv_rtr, PT_INTER_AREA, DT_RTR,
266 v->lsa->data.rtr.flags, 0);
267 }
268
269 break;
270 case LSA_TYPE_AREA_OPAQ10:
271 /* nothing to calculate */
272 break;
273 default:
274 /* as-external LSA are stored in a different tree */
275 fatalx("rt_calc: invalid LSA type");
276 }
277}
278
279void
280asext_calc(struct vertex *v)
281{
282 struct rt_node *r;
283 struct rt_nexthop *rn;
284 u_int32_t cost2;
285 struct in_addr addr, adv_rtr, a;
286 enum path_type type;
287
288 lsa_age(v);
289 if (ntohs(v->lsa->hdr.age)(__uint16_t)(__builtin_constant_p(v->lsa->hdr.age) ? (__uint16_t
)(((__uint16_t)(v->lsa->hdr.age) & 0xffU) << 8
| ((__uint16_t)(v->lsa->hdr.age) & 0xff00U) >>
8) : __swap16md(v->lsa->hdr.age))
== MAX_AGE3600 ||
290 (ntohl(v->lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.metric
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.metric) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.metric) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.metric) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.metric))
& LSA_METRIC_MASK0x00ffffff) >=
291 LS_INFINITY0xffffff)
292 return;
293
294 switch (v->type) {
295 case LSA_TYPE_EXTERNAL5:
296 /* ignore self-originated stuff */
297 if (v->self)
298 return;
299
300 if ((r = rt_lookup(DT_RTR, htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
)) == NULL((void*)0))
301 return;
302
303 /* XXX RFC1583Compatibility */
304 if (v->lsa->data.asext.fw_addr != 0 &&
305 (r = rt_lookup(DT_NET, v->lsa->data.asext.fw_addr)) == NULL((void*)0))
306 return;
307
308 if (v->lsa->data.asext.fw_addr != 0 &&
309 r->p_type != PT_INTRA_AREA &&
310 r->p_type != PT_INTER_AREA)
311 return;
312
313 if (ntohl(v->lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.metric
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.metric) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.metric) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.metric) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.metric))
& LSA_ASEXT_E_FLAG0x80000000) {
314 v->cost = r->cost;
315 cost2 = ntohl(v->lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.metric
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.metric) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.metric) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.metric) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.metric))
&
316 LSA_METRIC_MASK0x00ffffff;
317 type = PT_TYPE2_EXT;
318 } else {
319 v->cost = r->cost + (ntohl(v->lsa->data.asext.metric)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.metric
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.metric
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.metric) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.metric) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.metric) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.metric))
&
320 LSA_METRIC_MASK0x00ffffff);
321 cost2 = 0;
322 type = PT_TYPE1_EXT;
323 }
324
325 a.s_addr = 0;
326 adv_rtr.s_addr = htonl(v->adv_rtr)(__uint32_t)(__builtin_constant_p(v->adv_rtr) ? (__uint32_t
)(((__uint32_t)(v->adv_rtr) & 0xff) << 24 | ((__uint32_t
)(v->adv_rtr) & 0xff00) << 8 | ((__uint32_t)(v->
adv_rtr) & 0xff0000) >> 8 | ((__uint32_t)(v->adv_rtr
) & 0xff000000) >> 24) : __swap32md(v->adv_rtr))
;
327 addr.s_addr = htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
& v->lsa->data.asext.mask;
328
329 vertex_nexthop_clear(v);
330 TAILQ_FOREACH(rn, &r->nexthop, entry)for((rn) = ((&r->nexthop)->tqh_first); (rn) != ((void
*)0); (rn) = ((rn)->entry.tqe_next))
{
331 if (rn->invalid)
332 continue;
333
334 /*
335 * if a fw_addr is specified and the nexthop
336 * is directly connected then it is possible to
337 * send traffic directly to fw_addr.
338 */
339 if (v->lsa->data.asext.fw_addr != 0 && rn->connected)
340 vertex_nexthop_add(v, NULL((void*)0),
341 v->lsa->data.asext.fw_addr);
342 else
343 vertex_nexthop_add(v, NULL((void*)0), rn->nexthop.s_addr);
344 }
345
346 rt_update(addr, mask2prefixlen(v->lsa->data.asext.mask),
347 &v->nexthop, v->type, v->cost, cost2, a, adv_rtr, type,
348 DT_NET, 0, ntohl(v->lsa->data.asext.ext_tag)(__uint32_t)(__builtin_constant_p(v->lsa->data.asext.ext_tag
) ? (__uint32_t)(((__uint32_t)(v->lsa->data.asext.ext_tag
) & 0xff) << 24 | ((__uint32_t)(v->lsa->data.
asext.ext_tag) & 0xff00) << 8 | ((__uint32_t)(v->
lsa->data.asext.ext_tag) & 0xff0000) >> 8 | ((__uint32_t
)(v->lsa->data.asext.ext_tag) & 0xff000000) >>
24) : __swap32md(v->lsa->data.asext.ext_tag))
);
349 break;
350 case LSA_TYPE_AS_OPAQ11:
351 /* nothing to calculate */
352 break;
353 default:
354 fatalx("asext_calc: invalid LSA type");
355 }
356}
357
358void
359spf_tree_clr(struct area *area)
360{
361 struct lsa_tree *tree = &area->lsa_tree;
362 struct vertex *v;
363
364 RB_FOREACH(v, lsa_tree, tree)for ((v) = lsa_tree_RB_MINMAX(tree, -1); (v) != ((void*)0); (
v) = lsa_tree_RB_NEXT(v))
{
365 v->cost = LS_INFINITY0xffffff;
366 vertex_nexthop_clear(v);
367 }
368}
369
370void
371calc_nexthop(struct vertex *dst, struct vertex *parent,
372 struct area *area, struct lsa_rtr_link *rtr_link)
373{
374 struct v_nexthop *vn;
375 struct iface *iface;
376 struct rde_nbr *nbr;
377 int i;
378
379 /* case 1 */
380 if (parent == spf_root) {
37
Assuming 'parent' is equal to 'spf_root'
38
Taking true branch
381 switch (dst->type) {
39
Control jumps to 'case 2:' at line 397
382 case LSA_TYPE_ROUTER1:
383 if (rtr_link->type != LINK_TYPE_POINTTOPOINT1)
384 fatalx("inconsistent SPF tree");
385 LIST_FOREACH(iface, &area->iface_list, entry)for((iface) = ((&area->iface_list)->lh_first); (iface
)!= ((void*)0); (iface) = ((iface)->entry.le_next))
{
386 if (rtr_link->data != iface->addr.s_addr)
387 continue;
388 LIST_FOREACH(nbr, &area->nbr_list, entry)for((nbr) = ((&area->nbr_list)->lh_first); (nbr)!= (
(void*)0); (nbr) = ((nbr)->entry.le_next))
{
389 if (nbr->ifindex == iface->ifindex) {
390 vertex_nexthop_add(dst, parent,
391 nbr->addr.s_addr);
392 return;
393 }
394 }
395 }
396 fatalx("no interface found for interface");
397 case LSA_TYPE_NETWORK2:
398 switch (rtr_link->type) {
40
Access to field 'type' results in a dereference of a null pointer (loaded from variable 'rtr_link')
399 case LINK_TYPE_POINTTOPOINT1:
400 case LINK_TYPE_STUB_NET3:
401 /* ignore */
402 break;
403 case LINK_TYPE_TRANSIT_NET2:
404 if ((htonl(dst->ls_id)(__uint32_t)(__builtin_constant_p(dst->ls_id) ? (__uint32_t
)(((__uint32_t)(dst->ls_id) & 0xff) << 24 | ((__uint32_t
)(dst->ls_id) & 0xff00) << 8 | ((__uint32_t)(dst
->ls_id) & 0xff0000) >> 8 | ((__uint32_t)(dst->
ls_id) & 0xff000000) >> 24) : __swap32md(dst->ls_id
))
&
405 dst->lsa->data.net.mask) ==
406 (rtr_link->data &
407 dst->lsa->data.net.mask)) {
408 vertex_nexthop_add(dst, parent,
409 rtr_link->data);
410 }
411 break;
412 default:
413 fatalx("calc_nexthop: invalid link "
414 "type");
415 }
416 return;
417 default:
418 fatalx("calc_nexthop: invalid dst type");
419 }
420 return;
421 }
422
423 /* case 2 */
424 if (parent->type == LSA_TYPE_NETWORK2 && dst->type == LSA_TYPE_ROUTER1) {
425 TAILQ_FOREACH(vn, &parent->nexthop, entry)for((vn) = ((&parent->nexthop)->tqh_first); (vn) !=
((void*)0); (vn) = ((vn)->entry.tqe_next))
{
426 if (vn->prev == spf_root) {
427 for (i = 0; i < lsa_num_links(dst); i++) {
428 rtr_link = get_rtr_link(dst, i);
429 if ((rtr_link->type ==
430 LINK_TYPE_TRANSIT_NET2) &&
431 (rtr_link->data &
432 parent->lsa->data.net.mask) ==
433 (htonl(parent->ls_id)(__uint32_t)(__builtin_constant_p(parent->ls_id) ? (__uint32_t
)(((__uint32_t)(parent->ls_id) & 0xff) << 24 | (
(__uint32_t)(parent->ls_id) & 0xff00) << 8 | ((__uint32_t
)(parent->ls_id) & 0xff0000) >> 8 | ((__uint32_t
)(parent->ls_id) & 0xff000000) >> 24) : __swap32md
(parent->ls_id))
&
434 parent->lsa->data.net.mask))
435 vertex_nexthop_add(dst, parent,
436 rtr_link->data);
437 }
438 } else {
439 vertex_nexthop_add(dst, parent,
440 vn->nexthop.s_addr);
441 }
442 }
443 return;
444 }
445
446 /* case 3 */
447 TAILQ_FOREACH(vn, &parent->nexthop, entry)for((vn) = ((&parent->nexthop)->tqh_first); (vn) !=
((void*)0); (vn) = ((vn)->entry.tqe_next))
448 vertex_nexthop_add(dst, parent, vn->nexthop.s_addr);
449}
450
451/* candidate list */
452void
453cand_list_init(void)
454{
455 TAILQ_INIT(&cand_list)do { (&cand_list)->tqh_first = ((void*)0); (&cand_list
)->tqh_last = &(&cand_list)->tqh_first; } while
(0)
;
456}
457
458void
459cand_list_add(struct vertex *v)
460{
461 struct vertex *c = NULL((void*)0);
462
463 TAILQ_FOREACH(c, &cand_list, cand)for((c) = ((&cand_list)->tqh_first); (c) != ((void*)0)
; (c) = ((c)->cand.tqe_next))
{
464 if (c->cost > v->cost) {
465 TAILQ_INSERT_BEFORE(c, v, cand)do { (v)->cand.tqe_prev = (c)->cand.tqe_prev; (v)->cand
.tqe_next = (c); *(c)->cand.tqe_prev = (v); (c)->cand.tqe_prev
= &(v)->cand.tqe_next; } while (0)
;
466 return;
467 } else if (c->cost == v->cost && c->type == LSA_TYPE_ROUTER1 &&
468 v->type == LSA_TYPE_NETWORK2) {
469 TAILQ_INSERT_BEFORE(c, v, cand)do { (v)->cand.tqe_prev = (c)->cand.tqe_prev; (v)->cand
.tqe_next = (c); *(c)->cand.tqe_prev = (v); (c)->cand.tqe_prev
= &(v)->cand.tqe_next; } while (0)
;
470 return;
471 }
472 }
473 TAILQ_INSERT_TAIL(&cand_list, v, cand)do { (v)->cand.tqe_next = ((void*)0); (v)->cand.tqe_prev
= (&cand_list)->tqh_last; *(&cand_list)->tqh_last
= (v); (&cand_list)->tqh_last = &(v)->cand.tqe_next
; } while (0)
;
474}
475
476struct vertex *
477cand_list_pop(void)
478{
479 struct vertex *c;
480
481 if ((c = TAILQ_FIRST(&cand_list)((&cand_list)->tqh_first)) != NULL((void*)0)) {
482 TAILQ_REMOVE(&cand_list, c, cand)do { if (((c)->cand.tqe_next) != ((void*)0)) (c)->cand.
tqe_next->cand.tqe_prev = (c)->cand.tqe_prev; else (&
cand_list)->tqh_last = (c)->cand.tqe_prev; *(c)->cand
.tqe_prev = (c)->cand.tqe_next; ; ; } while (0)
;
483 }
484
485 return (c);
486}
487
488int
489cand_list_present(struct vertex *v)
490{
491 struct vertex *c;
492
493 TAILQ_FOREACH(c, &cand_list, cand)for((c) = ((&cand_list)->tqh_first); (c) != ((void*)0)
; (c) = ((c)->cand.tqe_next))
{
494 if (c == v)
495 return (1);
496 }
497
498 return (0);
499}
500
501void
502cand_list_clr(void)
503{
504 struct vertex *c;
505
506 while ((c = TAILQ_FIRST(&cand_list)((&cand_list)->tqh_first)) != NULL((void*)0)) {
507 TAILQ_REMOVE(&cand_list, c, cand)do { if (((c)->cand.tqe_next) != ((void*)0)) (c)->cand.
tqe_next->cand.tqe_prev = (c)->cand.tqe_prev; else (&
cand_list)->tqh_last = (c)->cand.tqe_prev; *(c)->cand
.tqe_prev = (c)->cand.tqe_next; ; ; } while (0)
;
508 }
509}
510
511/* timers */
512/* ARGSUSED */
513void
514spf_timer(int fd, short event, void *arg)
515{
516 struct vertex *v;
517 struct ospfd_conf *conf = arg;
518 struct area *area;
519 struct rt_node *r;
520
521 switch (conf->spf_state) {
1
Control jumps to 'case SPF_DELAY:' at line 527
522 case SPF_IDLE:
523 fatalx("spf_timer: invalid state IDLE");
524 case SPF_HOLDQUEUE:
525 conf->spf_state = SPF_DELAY;
526 /* FALLTHROUGH */
527 case SPF_DELAY:
528 LIST_FOREACH(area, &conf->area_list, entry)for((area) = ((&conf->area_list)->lh_first); (area)
!= ((void*)0); (area) = ((area)->entry.le_next))
{
2
Assuming 'area' is not equal to null
3
Loop condition is true. Entering loop body
529 if (area->dirty) {
4
Assuming field 'dirty' is not equal to 0
5
Taking true branch
530 /* invalidate RIB entries of this area */
531 rt_invalidate(area);
532
533 /* calculate SPF tree */
534 spf_calc(area);
6
Calling 'spf_calc'
535
536 /* calculate route table */
537 RB_FOREACH(v, lsa_tree, &area->lsa_tree)for ((v) = lsa_tree_RB_MINMAX(&area->lsa_tree, -1); (v
) != ((void*)0); (v) = lsa_tree_RB_NEXT(v))
{
538 rt_calc(v, area, conf);
539 }
540
541 area->dirty = 0;
542 }
543 }
544
545 /* calculate as-external routes, first invalidate them */
546 rt_invalidate(NULL((void*)0));
547 RB_FOREACH(v, lsa_tree, &asext_tree)for ((v) = lsa_tree_RB_MINMAX(&asext_tree, -1); (v) != ((
void*)0); (v) = lsa_tree_RB_NEXT(v))
{
548 asext_calc(v);
549 }
550
551 RB_FOREACH(r, rt_tree, &rt)for ((r) = rt_tree_RB_MINMAX(&rt, -1); (r) != ((void*)0);
(r) = rt_tree_RB_NEXT(r))
{
552 LIST_FOREACH(area, &conf->area_list, entry)for((area) = ((&conf->area_list)->lh_first); (area)
!= ((void*)0); (area) = ((area)->entry.le_next))
553 rde_summary_update(r, area);
554
555 if (r->d_type != DT_NET)
556 continue;
557
558 if (r->invalid)
559 rde_send_delete_kroute(r);
560 else
561 rde_send_change_kroute(r);
562 }
563
564 LIST_FOREACH(area, &conf->area_list, entry)for((area) = ((&conf->area_list)->lh_first); (area)
!= ((void*)0); (area) = ((area)->entry.le_next))
{
565 lsa_generate_stub_sums(area);
566 lsa_remove_invalid_sums(area);
567 }
568
569 start_spf_holdtimer(conf);
570 break;
571 case SPF_HOLD:
572 conf->spf_state = SPF_IDLE;
573 break;
574 default:
575 fatalx("spf_timer: unknown state");
576 }
577}
578
579void
580start_spf_timer(void)
581{
582 struct timeval tv;
583
584 switch (rdeconf->spf_state) {
585 case SPF_IDLE:
586 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
587 tv.tv_sec = rdeconf->spf_delay / 1000;
588 tv.tv_usec = (rdeconf->spf_delay % 1000) * 1000;
589 rdeconf->spf_state = SPF_DELAY;
590 if (evtimer_add(&rdeconf->ev, &tv)event_add(&rdeconf->ev, &tv) == -1)
591 fatal("start_spf_timer");
592 break;
593 case SPF_DELAY:
594 /* ignore */
595 break;
596 case SPF_HOLD:
597 rdeconf->spf_state = SPF_HOLDQUEUE;
598 break;
599 case SPF_HOLDQUEUE:
600 /* ignore */
601 break;
602 default:
603 fatalx("start_spf_timer: invalid spf_state");
604 }
605}
606
607void
608stop_spf_timer(struct ospfd_conf *conf)
609{
610 if (evtimer_del(&conf->ev)event_del(&conf->ev) == -1)
611 fatal("stop_spf_timer");
612}
613
614void
615start_spf_holdtimer(struct ospfd_conf *conf)
616{
617 struct timeval tv;
618
619 switch (conf->spf_state) {
620 case SPF_DELAY:
621 timerclear(&tv)(&tv)->tv_sec = (&tv)->tv_usec = 0;
622 tv.tv_sec = rdeconf->spf_hold_time / 1000;
623 tv.tv_usec = (rdeconf->spf_hold_time % 1000) * 1000;
624 conf->spf_state = SPF_HOLD;
625 if (evtimer_add(&conf->ev, &tv)event_add(&conf->ev, &tv) == -1)
626 fatal("start_spf_holdtimer");
627 break;
628 case SPF_IDLE:
629 case SPF_HOLD:
630 case SPF_HOLDQUEUE:
631 fatalx("start_spf_holdtimer: invalid state");
632 default:
633 fatalx("start_spf_holdtimer: unknown state");
634 }
635}
636
637/* route table */
638void
639rt_init(void)
640{
641 RB_INIT(&rt)do { (&rt)->rbh_root = ((void*)0); } while (0);
642}
643
644int
645rt_compare(struct rt_node *a, struct rt_node *b)
646{
647 if (ntohl(a->prefix.s_addr)(__uint32_t)(__builtin_constant_p(a->prefix.s_addr) ? (__uint32_t
)(((__uint32_t)(a->prefix.s_addr) & 0xff) << 24 |
((__uint32_t)(a->prefix.s_addr) & 0xff00) << 8 |
((__uint32_t)(a->prefix.s_addr) & 0xff0000) >> 8
| ((__uint32_t)(a->prefix.s_addr) & 0xff000000) >>
24) : __swap32md(a->prefix.s_addr))
< ntohl(b->prefix.s_addr)(__uint32_t)(__builtin_constant_p(b->prefix.s_addr) ? (__uint32_t
)(((__uint32_t)(b->prefix.s_addr) & 0xff) << 24 |
((__uint32_t)(b->prefix.s_addr) & 0xff00) << 8 |
((__uint32_t)(b->prefix.s_addr) & 0xff0000) >> 8
| ((__uint32_t)(b->prefix.s_addr) & 0xff000000) >>
24) : __swap32md(b->prefix.s_addr))
)
648 return (-1);
649 if (ntohl(a->prefix.s_addr)(__uint32_t)(__builtin_constant_p(a->prefix.s_addr) ? (__uint32_t
)(((__uint32_t)(a->prefix.s_addr) & 0xff) << 24 |
((__uint32_t)(a->prefix.s_addr) & 0xff00) << 8 |
((__uint32_t)(a->prefix.s_addr) & 0xff0000) >> 8
| ((__uint32_t)(a->prefix.s_addr) & 0xff000000) >>
24) : __swap32md(a->prefix.s_addr))
> ntohl(b->prefix.s_addr)(__uint32_t)(__builtin_constant_p(b->prefix.s_addr) ? (__uint32_t
)(((__uint32_t)(b->prefix.s_addr) & 0xff) << 24 |
((__uint32_t)(b->prefix.s_addr) & 0xff00) << 8 |
((__uint32_t)(b->prefix.s_addr) & 0xff0000) >> 8
| ((__uint32_t)(b->prefix.s_addr) & 0xff000000) >>
24) : __swap32md(b->prefix.s_addr))
)
650 return (1);
651 if (a->prefixlen < b->prefixlen)
652 return (-1);
653 if (a->prefixlen > b->prefixlen)
654 return (1);
655 if (a->d_type > b->d_type)
656 return (-1);
657 if (a->d_type < b->d_type)
658 return (1);
659 return (0);
660}
661
662struct rt_node *
663rt_find(in_addr_t prefix, u_int8_t prefixlen, enum dst_type d_type)
664{
665 struct rt_node s;
666
667 s.prefix.s_addr = prefix;
668 s.prefixlen = prefixlen;
669 s.d_type = d_type;
670
671 return (RB_FIND(rt_tree, &rt, &s)rt_tree_RB_FIND(&rt, &s));
672}
673
674int
675rt_insert(struct rt_node *r)
676{
677 if (RB_INSERT(rt_tree, &rt, r)rt_tree_RB_INSERT(&rt, r) != NULL((void*)0)) {
678 log_warnx("rt_insert failed for %s/%u",
679 inet_ntoa(r->prefix), r->prefixlen);
680 free(r);
681 return (-1);
682 }
683
684 return (0);
685}
686
687int
688rt_remove(struct rt_node *r)
689{
690 if (RB_REMOVE(rt_tree, &rt, r)rt_tree_RB_REMOVE(&rt, r) == NULL((void*)0)) {
691 log_warnx("rt_remove failed for %s/%u",
692 inet_ntoa(r->prefix), r->prefixlen);
693 return (-1);
694 }
695
696 rt_nexthop_clear(r);
697 free(r);
698 return (0);
699}
700
701void
702rt_invalidate(struct area *area)
703{
704 struct rt_node *r, *nr;
705 struct rt_nexthop *rn, *nrn;
706
707 for (r = RB_MIN(rt_tree, &rt)rt_tree_RB_MINMAX(&rt, -1); r != NULL((void*)0); r = nr) {
708 nr = RB_NEXT(rt_tree, &rt, r)rt_tree_RB_NEXT(r);
709 if (area == NULL((void*)0)) {
710 /* look only at as_ext routes */
711 if (r->p_type != PT_TYPE1_EXT &&
712 r->p_type != PT_TYPE2_EXT)
713 continue;
714 } else {
715 /* ignore all as_ext routes */
716 if (r->p_type == PT_TYPE1_EXT ||
717 r->p_type == PT_TYPE2_EXT)
718 continue;
719
720 /* look only at routes matching the area */
721 if (r->area.s_addr != area->id.s_addr)
722 continue;
723 }
724 r->invalid = 1;
725 for (rn = TAILQ_FIRST(&r->nexthop)((&r->nexthop)->tqh_first); rn != NULL((void*)0); rn = nrn) {
726 nrn = TAILQ_NEXT(rn, entry)((rn)->entry.tqe_next);
727 if (rn->invalid) {
728 TAILQ_REMOVE(&r->nexthop, rn, entry)do { if (((rn)->entry.tqe_next) != ((void*)0)) (rn)->entry
.tqe_next->entry.tqe_prev = (rn)->entry.tqe_prev; else (
&r->nexthop)->tqh_last = (rn)->entry.tqe_prev; *
(rn)->entry.tqe_prev = (rn)->entry.tqe_next; ; ; } while
(0)
;
729 free(rn);
730 } else
731 rn->invalid = 1;
732 }
733 if (TAILQ_EMPTY(&r->nexthop)(((&r->nexthop)->tqh_first) == ((void*)0)))
734 rt_remove(r);
735 }
736}
737
738void
739rt_nexthop_clear(struct rt_node *r)
740{
741 struct rt_nexthop *rn;
742
743 while ((rn = TAILQ_FIRST(&r->nexthop)((&r->nexthop)->tqh_first)) != NULL((void*)0)) {
744 TAILQ_REMOVE(&r->nexthop, rn, entry)do { if (((rn)->entry.tqe_next) != ((void*)0)) (rn)->entry
.tqe_next->entry.tqe_prev = (rn)->entry.tqe_prev; else (
&r->nexthop)->tqh_last = (rn)->entry.tqe_prev; *
(rn)->entry.tqe_prev = (rn)->entry.tqe_next; ; ; } while
(0)
;
745 free(rn);
746 }
747}
748
749void
750rt_nexthop_add(struct rt_node *r, struct v_nexthead *vnh, u_int8_t type,
751 struct in_addr adv_rtr)
752{
753 struct v_nexthop *vn;
754 struct rt_nexthop *rn;
755 struct timespec now;
756
757 TAILQ_FOREACH(vn, vnh, entry)for((vn) = ((vnh)->tqh_first); (vn) != ((void*)0); (vn) = (
(vn)->entry.tqe_next))
{
758 TAILQ_FOREACH(rn, &r->nexthop, entry)for((rn) = ((&r->nexthop)->tqh_first); (rn) != ((void
*)0); (rn) = ((rn)->entry.tqe_next))
{
759 if (rn->nexthop.s_addr != vn->nexthop.s_addr)
760 continue;
761
762 rn->adv_rtr.s_addr = adv_rtr.s_addr;
763 rn->connected = (type == LSA_TYPE_NETWORK2 &&
764 vn->prev == spf_root) || (vn->nexthop.s_addr == 0);
765 rn->invalid = 0;
766
767 r->invalid = 0;
768 break;
769 }
770 if (rn)
771 continue;
772
773 if ((rn = calloc(1, sizeof(struct rt_nexthop))) == NULL((void*)0))
774 fatal("rt_nexthop_add");
775
776 clock_gettime(CLOCK_MONOTONIC3, &now);
777 rn->nexthop.s_addr = vn->nexthop.s_addr;
778 rn->adv_rtr.s_addr = adv_rtr.s_addr;
779 rn->uptime = now.tv_sec;
780 rn->connected = (type == LSA_TYPE_NETWORK2 &&
781 vn->prev == spf_root) || (vn->nexthop.s_addr == 0);
782 rn->invalid = 0;
783
784 r->invalid = 0;
785 TAILQ_INSERT_TAIL(&r->nexthop, rn, entry)do { (rn)->entry.tqe_next = ((void*)0); (rn)->entry.tqe_prev
= (&r->nexthop)->tqh_last; *(&r->nexthop)->
tqh_last = (rn); (&r->nexthop)->tqh_last = &(rn
)->entry.tqe_next; } while (0)
;
786 }
787}
788
789void
790rt_clear(void)
791{
792 struct rt_node *r;
793
794 while ((r = RB_MIN(rt_tree, &rt)rt_tree_RB_MINMAX(&rt, -1)) != NULL((void*)0))
795 rt_remove(r);
796}
797
798void
799rt_dump(struct in_addr area, pid_t pid, u_int8_t r_type)
800{
801 static struct ctl_rt rtctl;
802 struct timespec now;
803 struct rt_node *r;
804 struct rt_nexthop *rn;
805
806 clock_gettime(CLOCK_MONOTONIC3, &now);
807
808 RB_FOREACH(r, rt_tree, &rt)for ((r) = rt_tree_RB_MINMAX(&rt, -1); (r) != ((void*)0);
(r) = rt_tree_RB_NEXT(r))
{
809 if (r->invalid)
810 continue;
811
812 if (r->area.s_addr != area.s_addr)
813 continue;
814
815 switch (r_type) {
816 case RIB_RTR:
817 if (r->d_type != DT_RTR)
818 continue;
819 break;
820 case RIB_NET:
821 if (r->d_type != DT_NET)
822 continue;
823 if (r->p_type == PT_TYPE1_EXT ||
824 r->p_type == PT_TYPE2_EXT)
825 continue;
826 break;
827 case RIB_EXT:
828 if (r->p_type != PT_TYPE1_EXT &&
829 r->p_type != PT_TYPE2_EXT)
830 continue;
831 break;
832 default:
833 fatalx("rt_dump: invalid RIB type");
834 }
835
836 bzero(&rtctl, sizeof(rtctl));
837 rtctl.prefix.s_addr = r->prefix.s_addr;
838 rtctl.area.s_addr = r->area.s_addr;
839 rtctl.cost = r->cost;
840 rtctl.cost2 = r->cost2;
841 rtctl.p_type = r->p_type;
842 rtctl.d_type = r->d_type;
843 rtctl.flags = r->flags;
844 rtctl.prefixlen = r->prefixlen;
845
846 TAILQ_FOREACH(rn, &r->nexthop, entry)for((rn) = ((&r->nexthop)->tqh_first); (rn) != ((void
*)0); (rn) = ((rn)->entry.tqe_next))
{
847 if (rn->invalid)
848 continue;
849
850 rtctl.connected = rn->connected;
851 rtctl.nexthop.s_addr = rn->nexthop.s_addr;
852 rtctl.adv_rtr.s_addr = rn->adv_rtr.s_addr;
853 rtctl.uptime = now.tv_sec - rn->uptime;
854
855 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_RIB, 0, pid,
856 &rtctl, sizeof(rtctl));
857 }
858 }
859}
860
861void
862rt_update(struct in_addr prefix, u_int8_t prefixlen, struct v_nexthead *vnh,
863 u_int8_t v_type, u_int32_t cost, u_int32_t cost2, struct in_addr area,
864 struct in_addr adv_rtr, enum path_type p_type, enum dst_type d_type,
865 u_int8_t flags, u_int32_t tag)
866{
867 struct rt_node *rte;
868 struct rt_nexthop *rn;
869 int better = 0, equal = 0;
870
871 if ((rte = rt_find(prefix.s_addr, prefixlen, d_type)) == NULL((void*)0)) {
872 if ((rte = calloc(1, sizeof(struct rt_node))) == NULL((void*)0))
873 fatal("rt_update");
874
875 TAILQ_INIT(&rte->nexthop)do { (&rte->nexthop)->tqh_first = ((void*)0); (&
rte->nexthop)->tqh_last = &(&rte->nexthop)->
tqh_first; } while (0)
;
876 rte->prefix.s_addr = prefix.s_addr;
877 rte->prefixlen = prefixlen;
878 rte->cost = cost;
879 rte->cost2 = cost2;
880 rte->area = area;
881 rte->p_type = p_type;
882 rte->d_type = d_type;
883 rte->flags = flags;
884 rte->ext_tag = tag;
885
886 rt_nexthop_add(rte, vnh, v_type, adv_rtr);
887
888 rt_insert(rte);
889 } else {
890 /* order:
891 * 1. intra-area
892 * 2. inter-area
893 * 3. type 1 as ext
894 * 4. type 2 as ext
895 */
896 if (rte->invalid) /* everything is better than invalid */
897 better = 1;
898 else if (p_type < rte->p_type)
899 better = 1;
900 else if (p_type == rte->p_type)
901 switch (p_type) {
902 case PT_INTRA_AREA:
903 case PT_INTER_AREA:
904 if (cost < rte->cost)
905 better = 1;
906 else if (cost == rte->cost &&
907 rte->area.s_addr == area.s_addr)
908 equal = 1;
909 break;
910 case PT_TYPE1_EXT:
911 /* XXX rfc1583 compat */
912 if (cost < rte->cost)
913 better = 1;
914 else if (cost == rte->cost)
915 equal = 1;
916 break;
917 case PT_TYPE2_EXT:
918 if (cost2 < rte->cost2)
919 better = 1;
920 /* XXX rfc1583 compat */
921 else if (cost2 == rte->cost2 &&
922 cost < rte->cost)
923 better = 1;
924 else if (cost2 == rte->cost2 &&
925 cost == rte->cost)
926 equal = 1;
927 break;
928 }
929
930 if (better) {
931 TAILQ_FOREACH(rn, &rte->nexthop, entry)for((rn) = ((&rte->nexthop)->tqh_first); (rn) != ((
void*)0); (rn) = ((rn)->entry.tqe_next))
932 rn->invalid = 1;
933
934 rte->area = area;
935 rte->cost = cost;
936 rte->cost2 = cost2;
937 rte->p_type = p_type;
938 rte->flags = flags;
939 rte->ext_tag = tag;
940 }
941
942 if (equal || better)
943 rt_nexthop_add(rte, vnh, v_type, adv_rtr);
944 }
945}
946
947struct rt_node *
948rt_lookup(enum dst_type type, in_addr_t addr)
949{
950 struct rt_node *rn;
951 u_int8_t i = 32;
952
953 if (type == DT_RTR) {
954 rn = rt_find(addr, 32, type);
955 if (rn && rn->invalid == 0)
956 return (rn);
957 return (NULL((void*)0));
958 }
959
960 /* type == DT_NET */
961 do {
962 if ((rn = rt_find(addr & prefixlen2mask(i), i, type)) &&
963 rn->invalid == 0)
964 return (rn);
965 } while (i-- != 0);
966
967 return (NULL((void*)0));
968}
969
970/* router LSA links */
971struct lsa_rtr_link *
972get_rtr_link(struct vertex *v, int idx)
973{
974 struct lsa_rtr_link *rtr_link = NULL((void*)0);
975 char *buf = (char *)v->lsa;
976 u_int16_t i, off, nlinks;
977
978 if (v->type != LSA_TYPE_ROUTER1)
979 fatalx("get_rtr_link: invalid LSA type");
980
981 off = sizeof(v->lsa->hdr) + sizeof(struct lsa_rtr);
982
983 /* nlinks validated earlier by lsa_check() */
984 nlinks = lsa_num_links(v);
985 for (i = 0; i < nlinks; i++) {
986 rtr_link = (struct lsa_rtr_link *)(buf + off);
987 if (i == idx)
988 return (rtr_link);
989
990 off += sizeof(struct lsa_rtr_link) +
991 rtr_link->num_tos * sizeof(u_int32_t);
992 }
993
994 fatalx("get_rtr_link: index not found");
995}
996
997/* network LSA links */
998struct lsa_net_link *
999get_net_link(struct vertex *v, int idx)
1000{
1001 struct lsa_net_link *net_link = NULL((void*)0);
1002 char *buf = (char *)v->lsa;
1003 u_int16_t i, off, nlinks;
1004
1005 if (v->type != LSA_TYPE_NETWORK2)
1006 fatalx("get_net_link: invalid LSA type");
1007
1008 off = sizeof(v->lsa->hdr) + sizeof(u_int32_t);
1009
1010 /* nlinks validated earlier by lsa_check() */
1011 nlinks = lsa_num_links(v);
1012 for (i = 0; i < nlinks; i++) {
1013 net_link = (struct lsa_net_link *)(buf + off);
1014 if (i == idx)
1015 return (net_link);
1016
1017 off += sizeof(struct lsa_net_link);
1018 }
1019
1020 fatalx("get_net_link: index not found");
1021}
1022
1023/* misc */
1024int
1025linked(struct vertex *w, struct vertex *v)
1026{
1027 struct lsa_rtr_link *rtr_link = NULL((void*)0);
1028 struct lsa_net_link *net_link = NULL((void*)0);
1029 int i;
1030
1031 switch (w->type) {
19
Control jumps to 'case 1:' at line 1032
1032 case LSA_TYPE_ROUTER1:
1033 for (i = 0; i < lsa_num_links(w); i++) {
20
Assuming the condition is true
21
Loop condition is true. Entering loop body
1034 rtr_link = get_rtr_link(w, i);
1035 switch (v->type) {
22
Control jumps to 'case 2:' at line 1041
1036 case LSA_TYPE_ROUTER1:
1037 if (rtr_link->type == LINK_TYPE_POINTTOPOINT1 &&
1038 rtr_link->id == htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
)
1039 return (1);
1040 break;
1041 case LSA_TYPE_NETWORK2:
1042 if (rtr_link->id == htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
)
23
'?' condition is false
24
Assuming the condition is true
25
Taking true branch
1043 return (1);
26
Returning the value 1, which participates in a condition later
1044 break;
1045 default:
1046 fatalx("linked: invalid type");
1047 }
1048 }
1049 return (0);
1050 case LSA_TYPE_NETWORK2:
1051 for (i = 0; i < lsa_num_links(w); i++) {
1052 net_link = get_net_link(w, i);
1053 switch (v->type) {
1054 case LSA_TYPE_ROUTER1:
1055 if (net_link->att_rtr == htonl(v->ls_id)(__uint32_t)(__builtin_constant_p(v->ls_id) ? (__uint32_t)
(((__uint32_t)(v->ls_id) & 0xff) << 24 | ((__uint32_t
)(v->ls_id) & 0xff00) << 8 | ((__uint32_t)(v->
ls_id) & 0xff0000) >> 8 | ((__uint32_t)(v->ls_id
) & 0xff000000) >> 24) : __swap32md(v->ls_id))
)
1056 return (1);
1057 break;
1058 default:
1059 fatalx("linked: invalid type");
1060 }
1061 }
1062 return (0);
1063 default:
1064 fatalx("linked: invalid LSA type");
1065 }
1066
1067 return (0);
1068}