File: | src/usr.sbin/bgpd/rde_attr.c |
Warning: | line 1091, column 13 Although the value stored to 'seg_type' is used in the enclosing expression, the value is never actually read from 'seg_type' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: rde_attr.c,v 1.125 2021/06/24 10:04:05 claudio Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org> |
5 | * Copyright (c) 2016 Job Snijders <job@instituut.net> |
6 | * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org> |
7 | * |
8 | * Permission to use, copy, modify, and distribute this software for any |
9 | * purpose with or without fee is hereby granted, provided that the above |
10 | * copyright notice and this permission notice appear in all copies. |
11 | * |
12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 | */ |
20 | |
21 | #include <sys/queue.h> |
22 | |
23 | #include <endian.h> |
24 | #include <limits.h> |
25 | #include <stdlib.h> |
26 | #include <string.h> |
27 | #include <siphash.h> |
28 | |
29 | #include "bgpd.h" |
30 | #include "rde.h" |
31 | #include "log.h" |
32 | |
33 | int |
34 | attr_write(void *p, u_int16_t p_len, u_int8_t flags, u_int8_t type, |
35 | void *data, u_int16_t data_len) |
36 | { |
37 | u_char *b = p; |
38 | u_int16_t tmp, tot_len = 2; /* attribute header (without len) */ |
39 | |
40 | flags &= ~ATTR_DEFMASK(0x0f | 0x10); |
41 | if (data_len > 255) { |
42 | tot_len += 2 + data_len; |
43 | flags |= ATTR_EXTLEN0x10; |
44 | } else { |
45 | tot_len += 1 + data_len; |
46 | } |
47 | |
48 | if (tot_len > p_len) |
49 | return (-1); |
50 | |
51 | *b++ = flags; |
52 | *b++ = type; |
53 | if (data_len > 255) { |
54 | tmp = htons(data_len)(__uint16_t)(__builtin_constant_p(data_len) ? (__uint16_t)((( __uint16_t)(data_len) & 0xffU) << 8 | ((__uint16_t) (data_len) & 0xff00U) >> 8) : __swap16md(data_len)); |
55 | memcpy(b, &tmp, sizeof(tmp)); |
56 | b += 2; |
57 | } else |
58 | *b++ = (u_char)data_len; |
59 | |
60 | if (data == NULL((void*)0)) |
61 | return (tot_len - data_len); |
62 | |
63 | if (data_len != 0) |
64 | memcpy(b, data, data_len); |
65 | |
66 | return (tot_len); |
67 | } |
68 | |
69 | int |
70 | attr_writebuf(struct ibuf *buf, u_int8_t flags, u_int8_t type, void *data, |
71 | u_int16_t data_len) |
72 | { |
73 | u_char hdr[4]; |
74 | |
75 | flags &= ~ATTR_DEFMASK(0x0f | 0x10); |
76 | if (data_len > 255) { |
77 | flags |= ATTR_EXTLEN0x10; |
78 | hdr[2] = (data_len >> 8) & 0xff; |
79 | hdr[3] = data_len & 0xff; |
80 | } else { |
81 | hdr[2] = data_len & 0xff; |
82 | } |
83 | |
84 | hdr[0] = flags; |
85 | hdr[1] = type; |
86 | |
87 | if (ibuf_add(buf, hdr, flags & ATTR_EXTLEN0x10 ? 4 : 3) == -1) |
88 | return (-1); |
89 | if (data && ibuf_add(buf, data, data_len) == -1) |
90 | return (-1); |
91 | return (0); |
92 | } |
93 | |
94 | /* optional attribute specific functions */ |
95 | int attr_diff(struct attr *, struct attr *); |
96 | struct attr *attr_alloc(u_int8_t, u_int8_t, const void *, u_int16_t); |
97 | struct attr *attr_lookup(u_int8_t, u_int8_t, const void *, u_int16_t); |
98 | void attr_put(struct attr *); |
99 | |
100 | struct attr_table { |
101 | struct attr_list *hashtbl; |
102 | u_int64_t hashmask; |
103 | } attrtable; |
104 | |
105 | SIPHASH_KEY attrtablekey; |
106 | |
107 | #define ATTR_HASH(x)&attrtable.hashtbl[(x) & attrtable.hashmask] \ |
108 | &attrtable.hashtbl[(x) & attrtable.hashmask] |
109 | |
110 | void |
111 | attr_init(u_int32_t hashsize) |
112 | { |
113 | u_int32_t hs, i; |
114 | |
115 | arc4random_buf(&attrtablekey, sizeof(attrtablekey)); |
116 | for (hs = 1; hs < hashsize; hs <<= 1) |
117 | ; |
118 | attrtable.hashtbl = calloc(hs, sizeof(struct attr_list)); |
119 | if (attrtable.hashtbl == NULL((void*)0)) |
120 | fatal("attr_init"); |
121 | |
122 | for (i = 0; i < hs; i++) |
123 | LIST_INIT(&attrtable.hashtbl[i])do { ((&attrtable.hashtbl[i])->lh_first) = ((void*)0); } while (0); |
124 | |
125 | attrtable.hashmask = hs - 1; |
126 | } |
127 | |
128 | void |
129 | attr_shutdown(void) |
130 | { |
131 | u_int64_t i; |
132 | |
133 | for (i = 0; i <= attrtable.hashmask; i++) |
134 | if (!LIST_EMPTY(&attrtable.hashtbl[i])(((&attrtable.hashtbl[i])->lh_first) == ((void*)0))) |
135 | log_warnx("%s: free non-free table", __func__); |
136 | |
137 | free(attrtable.hashtbl); |
138 | } |
139 | |
140 | void |
141 | attr_hash_stats(struct rde_hashstats *hs) |
142 | { |
143 | struct attr *a; |
144 | u_int64_t i; |
145 | int64_t n; |
146 | |
147 | memset(hs, 0, sizeof(*hs)); |
148 | strlcpy(hs->name, "attr hash", sizeof(hs->name)); |
149 | hs->min = LLONG_MAX9223372036854775807LL; |
150 | hs->num = attrtable.hashmask + 1; |
151 | |
152 | for (i = 0; i <= attrtable.hashmask; i++) { |
153 | n = 0; |
154 | LIST_FOREACH(a, &attrtable.hashtbl[i], entry)for((a) = ((&attrtable.hashtbl[i])->lh_first); (a)!= ( (void*)0); (a) = ((a)->entry.le_next)) |
155 | n++; |
156 | if (n < hs->min) |
157 | hs->min = n; |
158 | if (n > hs->max) |
159 | hs->max = n; |
160 | hs->sum += n; |
161 | hs->sumq += n * n; |
162 | } |
163 | } |
164 | |
165 | int |
166 | attr_optadd(struct rde_aspath *asp, u_int8_t flags, u_int8_t type, |
167 | void *data, u_int16_t len) |
168 | { |
169 | u_int8_t l; |
170 | struct attr *a, *t; |
171 | void *p; |
172 | |
173 | /* known optional attributes were validated previously */ |
174 | if ((a = attr_lookup(flags, type, data, len)) == NULL((void*)0)) |
175 | a = attr_alloc(flags, type, data, len); |
176 | |
177 | /* attribute allowed only once */ |
178 | for (l = 0; l < asp->others_len; l++) { |
179 | if (asp->others[l] == NULL((void*)0)) |
180 | break; |
181 | if (type == asp->others[l]->type) { |
182 | if (a->refcnt == 0) |
183 | attr_put(a); |
184 | return (-1); |
185 | } |
186 | } |
187 | |
188 | /* add attribute to the table but first bump refcnt */ |
189 | a->refcnt++; |
190 | rdemem.attr_refs++; |
191 | |
192 | for (l = 0; l < asp->others_len; l++) { |
193 | if (asp->others[l] == NULL((void*)0)) { |
194 | asp->others[l] = a; |
195 | return (0); |
196 | } |
197 | /* list is sorted */ |
198 | if (a->type < asp->others[l]->type) { |
199 | t = asp->others[l]; |
200 | asp->others[l] = a; |
201 | a = t; |
202 | } |
203 | } |
204 | |
205 | /* no empty slot found, need to realloc */ |
206 | if (asp->others_len == UCHAR_MAX(127*2 +1)) |
207 | fatalx("attr_optadd: others_len overflow"); |
208 | |
209 | asp->others_len++; |
210 | if ((p = reallocarray(asp->others, |
211 | asp->others_len, sizeof(struct attr *))) == NULL((void*)0)) |
212 | fatal("attr_optadd"); |
213 | asp->others = p; |
214 | |
215 | /* l stores the size of others before resize */ |
216 | asp->others[l] = a; |
217 | return (0); |
218 | } |
219 | |
220 | struct attr * |
221 | attr_optget(const struct rde_aspath *asp, u_int8_t type) |
222 | { |
223 | u_int8_t l; |
224 | |
225 | for (l = 0; l < asp->others_len; l++) { |
226 | if (asp->others[l] == NULL((void*)0)) |
227 | break; |
228 | if (type == asp->others[l]->type) |
229 | return (asp->others[l]); |
230 | if (type < asp->others[l]->type) |
231 | break; |
232 | } |
233 | return (NULL((void*)0)); |
234 | } |
235 | |
236 | void |
237 | attr_copy(struct rde_aspath *t, const struct rde_aspath *s) |
238 | { |
239 | u_int8_t l; |
240 | |
241 | if (t->others != NULL((void*)0)) |
242 | attr_freeall(t); |
243 | |
244 | t->others_len = s->others_len; |
245 | if (t->others_len == 0) { |
246 | t->others = NULL((void*)0); |
247 | return; |
248 | } |
249 | |
250 | if ((t->others = calloc(s->others_len, sizeof(struct attr *))) == 0) |
251 | fatal("attr_copy"); |
252 | |
253 | for (l = 0; l < t->others_len; l++) { |
254 | if (s->others[l] == NULL((void*)0)) |
255 | break; |
256 | s->others[l]->refcnt++; |
257 | rdemem.attr_refs++; |
258 | t->others[l] = s->others[l]; |
259 | } |
260 | } |
261 | |
262 | int |
263 | attr_diff(struct attr *oa, struct attr *ob) |
264 | { |
265 | int r; |
266 | |
267 | if (ob == NULL((void*)0)) |
268 | return (1); |
269 | if (oa == NULL((void*)0)) |
270 | return (-1); |
271 | if (oa->flags > ob->flags) |
272 | return (1); |
273 | if (oa->flags < ob->flags) |
274 | return (-1); |
275 | if (oa->type > ob->type) |
276 | return (1); |
277 | if (oa->type < ob->type) |
278 | return (-1); |
279 | if (oa->len > ob->len) |
280 | return (1); |
281 | if (oa->len < ob->len) |
282 | return (-1); |
283 | r = memcmp(oa->data, ob->data, oa->len); |
284 | if (r > 0) |
285 | return (1); |
286 | if (r < 0) |
287 | return (-1); |
288 | |
289 | fatalx("attr_diff: equal attributes encountered"); |
290 | } |
291 | |
292 | int |
293 | attr_compare(struct rde_aspath *a, struct rde_aspath *b) |
294 | { |
295 | u_int8_t l, min; |
296 | |
297 | min = a->others_len < b->others_len ? a->others_len : b->others_len; |
298 | for (l = 0; l < min; l++) |
299 | if (a->others[l] != b->others[l]) |
300 | return (attr_diff(a->others[l], b->others[l])); |
301 | |
302 | if (a->others_len < b->others_len) { |
303 | for (; l < b->others_len; l++) |
304 | if (b->others[l] != NULL((void*)0)) |
305 | return (-1); |
306 | } else if (a->others_len > b->others_len) { |
307 | for (; l < a->others_len; l++) |
308 | if (a->others[l] != NULL((void*)0)) |
309 | return (1); |
310 | } |
311 | |
312 | return (0); |
313 | } |
314 | |
315 | u_int64_t |
316 | attr_hash(struct rde_aspath *a) |
317 | { |
318 | u_int64_t hash = 0; |
319 | u_int8_t l; |
320 | |
321 | for (l = 0; l < a->others_len; l++) |
322 | if (a->others[l] != NULL((void*)0)) |
323 | hash ^= a->others[l]->hash; |
324 | return (hash); |
325 | } |
326 | |
327 | void |
328 | attr_free(struct rde_aspath *asp, struct attr *attr) |
329 | { |
330 | u_int8_t l; |
331 | |
332 | for (l = 0; l < asp->others_len; l++) |
333 | if (asp->others[l] == attr) { |
334 | attr_put(asp->others[l]); |
335 | for (++l; l < asp->others_len; l++) |
336 | asp->others[l - 1] = asp->others[l]; |
337 | asp->others[asp->others_len - 1] = NULL((void*)0); |
338 | return; |
339 | } |
340 | |
341 | /* no realloc() because the slot may be reused soon */ |
342 | } |
343 | |
344 | void |
345 | attr_freeall(struct rde_aspath *asp) |
346 | { |
347 | u_int8_t l; |
348 | |
349 | for (l = 0; l < asp->others_len; l++) |
350 | attr_put(asp->others[l]); |
351 | |
352 | free(asp->others); |
353 | asp->others = NULL((void*)0); |
354 | asp->others_len = 0; |
355 | } |
356 | |
357 | struct attr * |
358 | attr_alloc(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len) |
359 | { |
360 | struct attr *a; |
361 | SIPHASH_CTX ctx; |
362 | |
363 | a = calloc(1, sizeof(struct attr)); |
364 | if (a == NULL((void*)0)) |
365 | fatal("attr_optadd"); |
366 | rdemem.attr_cnt++; |
367 | |
368 | flags &= ~ATTR_DEFMASK(0x0f | 0x10); /* normalize mask */ |
369 | a->flags = flags; |
370 | a->type = type; |
371 | a->len = len; |
372 | if (len != 0) { |
373 | if ((a->data = malloc(len)) == NULL((void*)0)) |
374 | fatal("attr_optadd"); |
375 | |
376 | rdemem.attr_dcnt++; |
377 | rdemem.attr_data += len; |
378 | memcpy(a->data, data, len); |
379 | } else |
380 | a->data = NULL((void*)0); |
381 | |
382 | SipHash24_Init(&ctx, &attrtablekey)SipHash_Init((&ctx), (&attrtablekey)); |
383 | SipHash24_Update(&ctx, &flags, sizeof(flags))SipHash_Update((&ctx), 2, 4, (&flags), (sizeof(flags) )); |
384 | SipHash24_Update(&ctx, &type, sizeof(type))SipHash_Update((&ctx), 2, 4, (&type), (sizeof(type))); |
385 | SipHash24_Update(&ctx, &len, sizeof(len))SipHash_Update((&ctx), 2, 4, (&len), (sizeof(len))); |
386 | SipHash24_Update(&ctx, a->data, a->len)SipHash_Update((&ctx), 2, 4, (a->data), (a->len)); |
387 | a->hash = SipHash24_End(&ctx)SipHash_End((&ctx), 2, 4); |
388 | LIST_INSERT_HEAD(ATTR_HASH(a->hash), a, entry)do { if (((a)->entry.le_next = (&attrtable.hashtbl[(a-> hash) & attrtable.hashmask])->lh_first) != ((void*)0)) (&attrtable.hashtbl[(a->hash) & attrtable.hashmask ])->lh_first->entry.le_prev = &(a)->entry.le_next ; (&attrtable.hashtbl[(a->hash) & attrtable.hashmask ])->lh_first = (a); (a)->entry.le_prev = &(&attrtable .hashtbl[(a->hash) & attrtable.hashmask])->lh_first ; } while (0); |
389 | |
390 | return (a); |
391 | } |
392 | |
393 | struct attr * |
394 | attr_lookup(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len) |
395 | { |
396 | struct attr_list *head; |
397 | struct attr *a; |
398 | u_int64_t hash; |
399 | SIPHASH_CTX ctx; |
400 | |
401 | flags &= ~ATTR_DEFMASK(0x0f | 0x10); /* normalize mask */ |
402 | |
403 | SipHash24_Init(&ctx, &attrtablekey)SipHash_Init((&ctx), (&attrtablekey)); |
404 | SipHash24_Update(&ctx, &flags, sizeof(flags))SipHash_Update((&ctx), 2, 4, (&flags), (sizeof(flags) )); |
405 | SipHash24_Update(&ctx, &type, sizeof(type))SipHash_Update((&ctx), 2, 4, (&type), (sizeof(type))); |
406 | SipHash24_Update(&ctx, &len, sizeof(len))SipHash_Update((&ctx), 2, 4, (&len), (sizeof(len))); |
407 | SipHash24_Update(&ctx, data, len)SipHash_Update((&ctx), 2, 4, (data), (len)); |
408 | hash = SipHash24_End(&ctx)SipHash_End((&ctx), 2, 4); |
409 | head = ATTR_HASH(hash)&attrtable.hashtbl[(hash) & attrtable.hashmask]; |
410 | |
411 | LIST_FOREACH(a, head, entry)for((a) = ((head)->lh_first); (a)!= ((void*)0); (a) = ((a) ->entry.le_next)) { |
412 | if (hash == a->hash && type == a->type && |
413 | flags == a->flags && len == a->len && |
414 | memcmp(data, a->data, len) == 0) |
415 | return (a); |
416 | } |
417 | return (NULL((void*)0)); |
418 | } |
419 | |
420 | void |
421 | attr_put(struct attr *a) |
422 | { |
423 | if (a == NULL((void*)0)) |
424 | return; |
425 | |
426 | rdemem.attr_refs--; |
427 | if (--a->refcnt > 0) |
428 | /* somebody still holds a reference */ |
429 | return; |
430 | |
431 | /* unlink */ |
432 | LIST_REMOVE(a, entry)do { if ((a)->entry.le_next != ((void*)0)) (a)->entry.le_next ->entry.le_prev = (a)->entry.le_prev; *(a)->entry.le_prev = (a)->entry.le_next; ; ; } while (0); |
433 | |
434 | if (a->len != 0) |
435 | rdemem.attr_dcnt--; |
436 | rdemem.attr_data -= a->len; |
437 | rdemem.attr_cnt--; |
438 | free(a->data); |
439 | free(a); |
440 | } |
441 | |
442 | /* aspath specific functions */ |
443 | |
444 | static u_int16_t aspath_count(const void *, u_int16_t); |
445 | static u_int32_t aspath_extract_origin(const void *, u_int16_t); |
446 | static u_int16_t aspath_countlength(struct aspath *, u_int16_t, int); |
447 | static void aspath_countcopy(struct aspath *, u_int16_t, u_int8_t *, |
448 | u_int16_t, int); |
449 | struct aspath *aspath_lookup(const void *, u_int16_t); |
450 | |
451 | struct aspath_table { |
452 | struct aspath_list *hashtbl; |
453 | u_int32_t hashmask; |
454 | } astable; |
455 | |
456 | SIPHASH_KEY astablekey; |
457 | |
458 | #define ASPATH_HASH(x)&astable.hashtbl[(x) & astable.hashmask] \ |
459 | &astable.hashtbl[(x) & astable.hashmask] |
460 | |
461 | void |
462 | aspath_init(u_int32_t hashsize) |
463 | { |
464 | u_int32_t hs, i; |
465 | |
466 | for (hs = 1; hs < hashsize; hs <<= 1) |
467 | ; |
468 | astable.hashtbl = calloc(hs, sizeof(struct aspath_list)); |
469 | if (astable.hashtbl == NULL((void*)0)) |
470 | fatal("aspath_init"); |
471 | |
472 | for (i = 0; i < hs; i++) |
473 | LIST_INIT(&astable.hashtbl[i])do { ((&astable.hashtbl[i])->lh_first) = ((void*)0); } while (0); |
474 | |
475 | astable.hashmask = hs - 1; |
476 | arc4random_buf(&astablekey, sizeof(astablekey)); |
477 | } |
478 | |
479 | void |
480 | aspath_shutdown(void) |
481 | { |
482 | u_int32_t i; |
483 | |
484 | for (i = 0; i <= astable.hashmask; i++) |
485 | if (!LIST_EMPTY(&astable.hashtbl[i])(((&astable.hashtbl[i])->lh_first) == ((void*)0))) |
486 | log_warnx("aspath_shutdown: free non-free table"); |
487 | |
488 | free(astable.hashtbl); |
489 | } |
490 | |
491 | void |
492 | aspath_hash_stats(struct rde_hashstats *hs) |
493 | { |
494 | struct aspath *a; |
495 | u_int32_t i; |
496 | int64_t n; |
497 | |
498 | memset(hs, 0, sizeof(*hs)); |
499 | strlcpy(hs->name, "aspath hash", sizeof(hs->name)); |
500 | hs->min = LLONG_MAX9223372036854775807LL; |
501 | hs->num = astable.hashmask + 1; |
502 | |
503 | for (i = 0; i <= astable.hashmask; i++) { |
504 | n = 0; |
505 | LIST_FOREACH(a, &astable.hashtbl[i], entry)for((a) = ((&astable.hashtbl[i])->lh_first); (a)!= ((void *)0); (a) = ((a)->entry.le_next)) |
506 | n++; |
507 | if (n < hs->min) |
508 | hs->min = n; |
509 | if (n > hs->max) |
510 | hs->max = n; |
511 | hs->sum += n; |
512 | hs->sumq += n * n; |
513 | } |
514 | } |
515 | |
516 | struct aspath * |
517 | aspath_get(void *data, u_int16_t len) |
518 | { |
519 | struct aspath_list *head; |
520 | struct aspath *aspath; |
521 | |
522 | /* The aspath must already have been checked for correctness. */ |
523 | aspath = aspath_lookup(data, len); |
524 | if (aspath == NULL((void*)0)) { |
525 | aspath = malloc(ASPATH_HEADER_SIZE(__builtin_offsetof(struct aspath, data)) + len); |
526 | if (aspath == NULL((void*)0)) |
527 | fatal("aspath_get"); |
528 | |
529 | rdemem.aspath_cnt++; |
530 | rdemem.aspath_size += ASPATH_HEADER_SIZE(__builtin_offsetof(struct aspath, data)) + len; |
531 | |
532 | aspath->refcnt = 0; |
533 | aspath->len = len; |
534 | aspath->ascnt = aspath_count(data, len); |
535 | aspath->source_as = aspath_extract_origin(data, len); |
536 | memcpy(aspath->data, data, len); |
537 | |
538 | /* link */ |
539 | head = ASPATH_HASH(SipHash24(&astablekey, aspath->data,&astable.hashtbl[(SipHash((&astablekey), 2, 4, (aspath ->data), (aspath->len))) & astable.hashmask] |
540 | aspath->len))&astable.hashtbl[(SipHash((&astablekey), 2, 4, (aspath ->data), (aspath->len))) & astable.hashmask]; |
541 | LIST_INSERT_HEAD(head, aspath, entry)do { if (((aspath)->entry.le_next = (head)->lh_first) != ((void*)0)) (head)->lh_first->entry.le_prev = &(aspath )->entry.le_next; (head)->lh_first = (aspath); (aspath) ->entry.le_prev = &(head)->lh_first; } while (0); |
542 | } |
543 | aspath->refcnt++; |
544 | rdemem.aspath_refs++; |
545 | |
546 | return (aspath); |
547 | } |
548 | |
549 | void |
550 | aspath_put(struct aspath *aspath) |
551 | { |
552 | if (aspath == NULL((void*)0)) |
553 | return; |
554 | |
555 | rdemem.aspath_refs--; |
556 | if (--aspath->refcnt > 0) { |
557 | /* somebody still holds a reference */ |
558 | return; |
559 | } |
560 | |
561 | /* unlink */ |
562 | LIST_REMOVE(aspath, entry)do { if ((aspath)->entry.le_next != ((void*)0)) (aspath)-> entry.le_next->entry.le_prev = (aspath)->entry.le_prev; *(aspath)->entry.le_prev = (aspath)->entry.le_next; ; ; } while (0); |
563 | |
564 | rdemem.aspath_cnt--; |
565 | rdemem.aspath_size -= ASPATH_HEADER_SIZE(__builtin_offsetof(struct aspath, data)) + aspath->len; |
566 | free(aspath); |
567 | } |
568 | |
569 | /* |
570 | * convert a 4 byte aspath to a 2 byte one. |
571 | */ |
572 | u_char * |
573 | aspath_deflate(u_char *data, u_int16_t *len, int *flagnew) |
574 | { |
575 | u_int8_t *seg, *nseg, *ndata; |
576 | u_int32_t as; |
577 | int i; |
578 | u_int16_t seg_size, olen, nlen; |
579 | u_int8_t seg_len; |
580 | |
581 | /* first calculate the length of the aspath */ |
582 | nlen = 0; |
583 | seg = data; |
584 | olen = *len; |
585 | for (; olen > 0; olen -= seg_size, seg += seg_size) { |
586 | seg_len = seg[1]; |
587 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
588 | nlen += 2 + sizeof(u_int16_t) * seg_len; |
589 | |
590 | if (seg_size > olen) |
591 | fatalx("%s: would overflow", __func__); |
592 | } |
593 | |
594 | if ((ndata = malloc(nlen)) == NULL((void*)0)) |
595 | fatal("aspath_deflate"); |
596 | |
597 | /* then copy the aspath */ |
598 | seg = data; |
599 | olen = *len; |
600 | for (nseg = ndata; seg < data + olen; seg += seg_size) { |
601 | *nseg++ = seg[0]; |
602 | *nseg++ = seg_len = seg[1]; |
603 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
604 | |
605 | for (i = 0; i < seg_len; i++) { |
606 | as = aspath_extract(seg, i); |
607 | if (as > USHRT_MAX(32767 *2 +1)) { |
608 | as = AS_TRANS23456; |
609 | *flagnew = 1; |
610 | } |
611 | *nseg++ = (as >> 8) & 0xff; |
612 | *nseg++ = as & 0xff; |
613 | } |
614 | } |
615 | |
616 | *len = nlen; |
617 | return (ndata); |
618 | } |
619 | |
620 | void |
621 | aspath_merge(struct rde_aspath *a, struct attr *attr) |
622 | { |
623 | u_int8_t *np; |
624 | u_int16_t ascnt, diff, nlen, difflen; |
625 | int hroom = 0; |
626 | |
627 | ascnt = aspath_count(attr->data, attr->len); |
628 | if (ascnt > a->aspath->ascnt) { |
629 | /* ASPATH is shorter then AS4_PATH no way to merge */ |
630 | attr_free(a, attr); |
631 | return; |
632 | } |
633 | |
634 | diff = a->aspath->ascnt - ascnt; |
635 | if (diff && attr->len > 2 && attr->data[0] == AS_SEQUENCE2) |
636 | hroom = attr->data[1]; |
637 | difflen = aspath_countlength(a->aspath, diff, hroom); |
638 | nlen = attr->len + difflen; |
639 | |
640 | if ((np = malloc(nlen)) == NULL((void*)0)) |
641 | fatal("aspath_merge"); |
642 | |
643 | /* copy head from old aspath */ |
644 | aspath_countcopy(a->aspath, diff, np, difflen, hroom); |
645 | |
646 | /* copy tail from new aspath */ |
647 | if (hroom > 0) |
648 | memcpy(np + nlen - attr->len + 2, attr->data + 2, |
649 | attr->len - 2); |
650 | else |
651 | memcpy(np + nlen - attr->len, attr->data, attr->len); |
652 | |
653 | aspath_put(a->aspath); |
654 | a->aspath = aspath_get(np, nlen); |
655 | free(np); |
656 | attr_free(a, attr); |
657 | } |
658 | |
659 | u_char * |
660 | aspath_dump(struct aspath *aspath) |
661 | { |
662 | return (aspath->data); |
663 | } |
664 | |
665 | u_int16_t |
666 | aspath_length(struct aspath *aspath) |
667 | { |
668 | return (aspath->len); |
669 | } |
670 | |
671 | u_int32_t |
672 | aspath_neighbor(struct aspath *aspath) |
673 | { |
674 | /* |
675 | * Empty aspath is OK -- internal AS route. |
676 | * Additionally the RFC specifies that if the path starts with an |
677 | * AS_SET the neighbor AS is also the local AS. |
678 | */ |
679 | if (aspath->len == 0 || |
680 | aspath->data[0] != AS_SEQUENCE2) |
681 | return (rde_local_as()); |
682 | return (aspath_extract(aspath->data, 0)); |
683 | } |
684 | |
685 | u_int32_t |
686 | aspath_origin(struct aspath *aspath) |
687 | { |
688 | return aspath->source_as; |
689 | } |
690 | |
691 | static u_int16_t |
692 | aspath_count(const void *data, u_int16_t len) |
693 | { |
694 | const u_int8_t *seg; |
695 | u_int16_t cnt, seg_size; |
696 | u_int8_t seg_type, seg_len; |
697 | |
698 | cnt = 0; |
699 | seg = data; |
700 | for (; len > 0; len -= seg_size, seg += seg_size) { |
701 | seg_type = seg[0]; |
702 | seg_len = seg[1]; |
703 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
704 | |
705 | if (seg_type == AS_SET1) |
706 | cnt += 1; |
707 | else |
708 | cnt += seg_len; |
709 | |
710 | if (seg_size > len) |
711 | fatalx("%s: would overflow", __func__); |
712 | } |
713 | return (cnt); |
714 | } |
715 | |
716 | /* |
717 | * The origin AS number derived from a Route as follows: |
718 | * o the rightmost AS in the final segment of the AS_PATH attribute |
719 | * in the Route if that segment is of type AS_SEQUENCE, or |
720 | * o the BGP speaker's own AS number if that segment is of type |
721 | * AS_CONFED_SEQUENCE or AS_CONFED_SET or if the AS_PATH is empty, |
722 | * o the distinguished value "NONE" if the final segment of the |
723 | * AS_PATH attribute is of any other type. |
724 | */ |
725 | static u_int32_t |
726 | aspath_extract_origin(const void *data, u_int16_t len) |
727 | { |
728 | const u_int8_t *seg; |
729 | u_int32_t as = AS_NONE0; |
730 | u_int16_t seg_size; |
731 | u_int8_t seg_len; |
732 | |
733 | /* AS_PATH is empty */ |
734 | if (len == 0) |
735 | return (rde_local_as()); |
736 | |
737 | seg = data; |
738 | for (; len > 0; len -= seg_size, seg += seg_size) { |
739 | seg_len = seg[1]; |
740 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
741 | |
742 | if (len == seg_size && seg[0] == AS_SEQUENCE2) { |
743 | as = aspath_extract(seg, seg_len - 1); |
744 | } |
745 | if (seg_size > len) |
746 | fatalx("%s: would overflow", __func__); |
747 | } |
748 | return (as); |
749 | } |
750 | |
751 | static u_int16_t |
752 | aspath_countlength(struct aspath *aspath, u_int16_t cnt, int headcnt) |
753 | { |
754 | const u_int8_t *seg; |
755 | u_int16_t seg_size, len, clen; |
756 | u_int8_t seg_type = 0, seg_len = 0; |
757 | |
758 | seg = aspath->data; |
759 | clen = 0; |
760 | for (len = aspath->len; len > 0 && cnt > 0; |
761 | len -= seg_size, seg += seg_size) { |
762 | seg_type = seg[0]; |
763 | seg_len = seg[1]; |
764 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
765 | |
766 | if (seg_type == AS_SET1) |
767 | cnt -= 1; |
768 | else if (seg_len > cnt) { |
769 | seg_len = cnt; |
770 | clen += 2 + sizeof(u_int32_t) * cnt; |
771 | break; |
772 | } else |
773 | cnt -= seg_len; |
774 | |
775 | clen += seg_size; |
776 | |
777 | if (seg_size > len) |
778 | fatalx("%s: would overflow", __func__); |
779 | } |
780 | if (headcnt > 0 && seg_type == AS_SEQUENCE2 && headcnt + seg_len < 256) |
781 | /* no need for additional header from the new aspath. */ |
782 | clen -= 2; |
783 | |
784 | return (clen); |
785 | } |
786 | |
787 | static void |
788 | aspath_countcopy(struct aspath *aspath, u_int16_t cnt, u_int8_t *buf, |
789 | u_int16_t size, int headcnt) |
790 | { |
791 | const u_int8_t *seg; |
792 | u_int16_t seg_size, len; |
793 | u_int8_t seg_type, seg_len; |
794 | |
795 | if (headcnt > 0) |
796 | /* |
797 | * additional room because we steal the segment header |
798 | * from the other aspath |
799 | */ |
800 | size += 2; |
801 | seg = aspath->data; |
802 | for (len = aspath->len; len > 0 && cnt > 0; |
803 | len -= seg_size, seg += seg_size) { |
804 | seg_type = seg[0]; |
805 | seg_len = seg[1]; |
806 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
807 | |
808 | if (seg_type == AS_SET1) |
809 | cnt -= 1; |
810 | else if (seg_len > cnt) { |
811 | seg_len = cnt + headcnt; |
812 | seg_size = 2 + sizeof(u_int32_t) * cnt; |
813 | cnt = 0; |
814 | } else { |
815 | cnt -= seg_len; |
816 | if (cnt == 0) |
817 | seg_len += headcnt; |
818 | } |
819 | |
820 | memcpy(buf, seg, seg_size); |
821 | buf[0] = seg_type; |
822 | buf[1] = seg_len; |
823 | buf += seg_size; |
824 | if (size < seg_size) |
825 | fatalx("%s: would overflow", __func__); |
826 | size -= seg_size; |
827 | } |
828 | } |
829 | |
830 | int |
831 | aspath_loopfree(struct aspath *aspath, u_int32_t myAS) |
832 | { |
833 | u_int8_t *seg; |
834 | u_int16_t len, seg_size; |
835 | u_int8_t i, seg_len; |
836 | |
837 | seg = aspath->data; |
838 | for (len = aspath->len; len > 0; len -= seg_size, seg += seg_size) { |
839 | seg_len = seg[1]; |
840 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
841 | |
842 | for (i = 0; i < seg_len; i++) { |
843 | if (myAS == aspath_extract(seg, i)) |
844 | return (0); |
845 | } |
846 | |
847 | if (seg_size > len) |
848 | fatalx("%s: would overflow", __func__); |
849 | } |
850 | return (1); |
851 | } |
852 | |
853 | int |
854 | aspath_compare(struct aspath *a1, struct aspath *a2) |
855 | { |
856 | int r; |
857 | |
858 | if (a1->len > a2->len) |
859 | return (1); |
860 | if (a1->len < a2->len) |
861 | return (-1); |
862 | r = memcmp(a1->data, a2->data, a1->len); |
863 | if (r > 0) |
864 | return (1); |
865 | if (r < 0) |
866 | return (-1); |
867 | return (0); |
868 | } |
869 | |
870 | struct aspath * |
871 | aspath_lookup(const void *data, u_int16_t len) |
872 | { |
873 | struct aspath_list *head; |
874 | struct aspath *aspath; |
875 | u_int32_t hash; |
876 | |
877 | hash = SipHash24(&astablekey, data, len)SipHash((&astablekey), 2, 4, (data), (len)); |
878 | head = ASPATH_HASH(hash)&astable.hashtbl[(hash) & astable.hashmask]; |
879 | |
880 | LIST_FOREACH(aspath, head, entry)for((aspath) = ((head)->lh_first); (aspath)!= ((void*)0); ( aspath) = ((aspath)->entry.le_next)) { |
881 | if (len == aspath->len && memcmp(data, aspath->data, len) == 0) |
882 | return (aspath); |
883 | } |
884 | return (NULL((void*)0)); |
885 | } |
886 | |
887 | |
888 | static int |
889 | as_compare(struct filter_as *f, u_int32_t as, u_int32_t neighas) |
890 | { |
891 | u_int32_t match; |
892 | |
893 | if (f->flags & AS_FLAG_AS_SET_NAME0x02) /* should not happen */ |
894 | return (0); |
895 | if (f->flags & AS_FLAG_AS_SET0x04) |
896 | return (as_set_match(f->aset, as)); |
897 | |
898 | if (f->flags & AS_FLAG_NEIGHBORAS0x01) |
899 | match = neighas; |
900 | else |
901 | match = f->as_min; |
902 | |
903 | switch (f->op) { |
904 | case OP_NONE: |
905 | case OP_EQ: |
906 | if (as == match) |
907 | return (1); |
908 | break; |
909 | case OP_NE: |
910 | if (as != match) |
911 | return (1); |
912 | break; |
913 | case OP_RANGE: |
914 | if (as >= f->as_min && as <= f->as_max) |
915 | return (1); |
916 | break; |
917 | case OP_XRANGE: |
918 | if (as < f->as_min || as > f->as_max) |
919 | return (1); |
920 | break; |
921 | } |
922 | return (0); |
923 | } |
924 | |
925 | /* we need to be able to search more than one as */ |
926 | int |
927 | aspath_match(struct aspath *aspath, struct filter_as *f, u_int32_t neighas) |
928 | { |
929 | const u_int8_t *seg; |
930 | int final; |
931 | u_int16_t len, seg_size; |
932 | u_int8_t i, seg_len; |
933 | u_int32_t as = AS_NONE0; |
934 | |
935 | if (f->type == AS_EMPTY) { |
936 | if (aspath_length(aspath) == 0) |
937 | return (1); |
938 | else |
939 | return (0); |
940 | } |
941 | |
942 | /* just check the leftmost AS */ |
943 | if (f->type == AS_PEER) { |
944 | as = aspath_neighbor(aspath); |
945 | if (as_compare(f, as, neighas)) |
946 | return (1); |
947 | else |
948 | return (0); |
949 | } |
950 | |
951 | seg = aspath->data; |
952 | len = aspath->len; |
953 | for (; len >= 6; len -= seg_size, seg += seg_size) { |
954 | seg_len = seg[1]; |
955 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
956 | |
957 | final = (len == seg_size); |
958 | |
959 | if (f->type == AS_SOURCE) { |
960 | /* |
961 | * Just extract the rightmost AS |
962 | * but if that segment is an AS_SET then the rightmost |
963 | * AS of a previous AS_SEQUENCE segment should be used. |
964 | * Because of that just look at AS_SEQUENCE segments. |
965 | */ |
966 | if (seg[0] == AS_SEQUENCE2) |
967 | as = aspath_extract(seg, seg_len - 1); |
968 | /* not yet in the final segment */ |
969 | if (!final) |
970 | continue; |
971 | if (as_compare(f, as, neighas)) |
972 | return (1); |
973 | else |
974 | return (0); |
975 | } |
976 | /* AS_TRANSIT or AS_ALL */ |
977 | for (i = 0; i < seg_len; i++) { |
978 | /* |
979 | * the source (rightmost) AS is excluded from |
980 | * AS_TRANSIT matches. |
981 | */ |
982 | if (final && i == seg_len - 1 && f->type == AS_TRANSIT) |
983 | return (0); |
984 | as = aspath_extract(seg, i); |
985 | if (as_compare(f, as, neighas)) |
986 | return (1); |
987 | } |
988 | |
989 | if (seg_size > len) |
990 | fatalx("%s: would overflow", __func__); |
991 | } |
992 | return (0); |
993 | } |
994 | |
995 | /* |
996 | * Returns a new prepended aspath. Old needs to be freed by caller. |
997 | */ |
998 | u_char * |
999 | aspath_prepend(struct aspath *asp, u_int32_t as, int quantum, u_int16_t *len) |
1000 | { |
1001 | u_char *p; |
1002 | int l, overflow = 0, shift = 0, size, wpos = 0; |
1003 | u_int8_t type; |
1004 | |
1005 | /* lunatic prepends are blocked in the parser and limited */ |
1006 | |
1007 | /* first calculate new size */ |
1008 | if (asp->len > 0) { |
1009 | if (asp->len < 2) |
1010 | fatalx("aspath_prepend: bad aspath length"); |
1011 | type = asp->data[0]; |
1012 | size = asp->data[1]; |
1013 | } else { |
1014 | /* empty as path */ |
1015 | type = AS_SET1; |
1016 | size = 0; |
1017 | } |
1018 | |
1019 | if (quantum > 255) |
1020 | fatalx("aspath_prepend: preposterous prepend"); |
1021 | if (quantum == 0) { |
1022 | /* no change needed but return a copy */ |
1023 | p = malloc(asp->len); |
1024 | if (p == NULL((void*)0)) |
1025 | fatal("aspath_prepend"); |
1026 | memcpy(p, asp->data, asp->len); |
1027 | *len = asp->len; |
1028 | return (p); |
1029 | } else if (type == AS_SET1 || size + quantum > 255) { |
1030 | /* need to attach a new AS_SEQUENCE */ |
1031 | l = 2 + quantum * sizeof(u_int32_t) + asp->len; |
1032 | if (type == AS_SET1) |
1033 | overflow = quantum; |
1034 | else |
1035 | overflow = size + quantum - 255; |
1036 | } else |
1037 | l = quantum * sizeof(u_int32_t) + asp->len; |
1038 | |
1039 | quantum -= overflow; |
1040 | |
1041 | p = malloc(l); |
1042 | if (p == NULL((void*)0)) |
1043 | fatal("aspath_prepend"); |
1044 | |
1045 | /* first prepends */ |
1046 | as = htonl(as)(__uint32_t)(__builtin_constant_p(as) ? (__uint32_t)(((__uint32_t )(as) & 0xff) << 24 | ((__uint32_t)(as) & 0xff00 ) << 8 | ((__uint32_t)(as) & 0xff0000) >> 8 | ((__uint32_t)(as) & 0xff000000) >> 24) : __swap32md (as)); |
1047 | if (overflow > 0) { |
1048 | p[wpos++] = AS_SEQUENCE2; |
1049 | p[wpos++] = overflow; |
1050 | |
1051 | for (; overflow > 0; overflow--) { |
1052 | memcpy(p + wpos, &as, sizeof(u_int32_t)); |
1053 | wpos += sizeof(u_int32_t); |
1054 | } |
1055 | } |
1056 | if (quantum > 0) { |
1057 | shift = 2; |
1058 | p[wpos++] = AS_SEQUENCE2; |
1059 | p[wpos++] = quantum + size; |
1060 | |
1061 | for (; quantum > 0; quantum--) { |
1062 | memcpy(p + wpos, &as, sizeof(u_int32_t)); |
1063 | wpos += sizeof(u_int32_t); |
1064 | } |
1065 | } |
1066 | memcpy(p + wpos, asp->data + shift, asp->len - shift); |
1067 | |
1068 | *len = l; |
1069 | return (p); |
1070 | } |
1071 | |
1072 | /* |
1073 | * Returns a new aspath where neighbor_as is replaced by local_as. |
1074 | */ |
1075 | u_char * |
1076 | aspath_override(struct aspath *asp, u_int32_t neighbor_as, u_int32_t local_as, |
1077 | u_int16_t *len) |
1078 | { |
1079 | u_char *p, *seg, *nseg; |
1080 | u_int32_t as; |
1081 | u_int16_t l, seg_size; |
1082 | u_int8_t i, seg_len, seg_type; |
1083 | |
1084 | p = malloc(asp->len); |
1085 | if (p == NULL((void*)0)) |
1086 | fatal("aspath_override"); |
1087 | |
1088 | seg = asp->data; |
1089 | nseg = p; |
1090 | for (l = asp->len; l > 0; l -= seg_size, seg += seg_size) { |
1091 | *nseg++ = seg_type = seg[0]; |
Although the value stored to 'seg_type' is used in the enclosing expression, the value is never actually read from 'seg_type' | |
1092 | *nseg++ = seg_len = seg[1]; |
1093 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
1094 | |
1095 | for (i = 0; i < seg_len; i++) { |
1096 | as = aspath_extract(seg, i); |
1097 | if (as == neighbor_as) |
1098 | as = local_as; |
1099 | as = htonl(as)(__uint32_t)(__builtin_constant_p(as) ? (__uint32_t)(((__uint32_t )(as) & 0xff) << 24 | ((__uint32_t)(as) & 0xff00 ) << 8 | ((__uint32_t)(as) & 0xff0000) >> 8 | ((__uint32_t)(as) & 0xff000000) >> 24) : __swap32md (as)); |
1100 | memcpy(nseg, &as, sizeof(as)); |
1101 | nseg += sizeof(as); |
1102 | } |
1103 | |
1104 | if (seg_size > l) |
1105 | fatalx("%s: would overflow", __func__); |
1106 | } |
1107 | |
1108 | *len = asp->len; |
1109 | return (p); |
1110 | } |
1111 | |
1112 | int |
1113 | aspath_lenmatch(struct aspath *a, enum aslen_spec type, u_int aslen) |
1114 | { |
1115 | u_int8_t *seg; |
1116 | u_int32_t as, lastas = 0; |
1117 | u_int count = 0; |
1118 | u_int16_t len, seg_size; |
1119 | u_int8_t i, seg_len, seg_type; |
1120 | |
1121 | if (type == ASLEN_MAX) { |
1122 | if (aslen < aspath_count(a->data, a->len)) |
1123 | return (1); |
1124 | else |
1125 | return (0); |
1126 | } |
1127 | |
1128 | /* type == ASLEN_SEQ */ |
1129 | seg = a->data; |
1130 | for (len = a->len; len > 0; len -= seg_size, seg += seg_size) { |
1131 | seg_type = seg[0]; |
1132 | seg_len = seg[1]; |
1133 | seg_size = 2 + sizeof(u_int32_t) * seg_len; |
1134 | |
1135 | for (i = 0; i < seg_len; i++) { |
1136 | as = aspath_extract(seg, i); |
1137 | if (as == lastas) { |
1138 | if (aslen < ++count) |
1139 | return (1); |
1140 | } else if (seg_type == AS_SET1) { |
1141 | /* AS path 3 { 4 3 7 } 3 will have count = 3 */ |
1142 | continue; |
1143 | } else |
1144 | count = 1; |
1145 | lastas = as; |
1146 | } |
1147 | |
1148 | if (seg_size > len) |
1149 | fatalx("%s: would overflow", __func__); |
1150 | } |
1151 | return (0); |
1152 | } |