FreeBSD/Linux Kernel Cross Reference
sys/net/radix.c
1 /* $NetBSD: radix.c,v 1.28 2005/02/26 22:45:09 perry Exp $ */
2
3 /*
4 * Copyright (c) 1988, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)radix.c 8.6 (Berkeley) 10/17/95
32 */
33
34 /*
35 * Routines to build and maintain radix trees for routing lookups.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: radix.c,v 1.28 2005/02/26 22:45:09 perry Exp $");
40
41 #ifndef _NET_RADIX_H_
42 #include <sys/param.h>
43 #ifdef _KERNEL
44 #include "opt_inet.h"
45
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #define M_DONTWAIT M_NOWAIT
49 #include <sys/domain.h>
50 #include <netinet/ip_encap.h>
51 #else
52 #include <stdlib.h>
53 #endif
54 #include <sys/syslog.h>
55 #include <net/radix.h>
56 #endif
57
58 int max_keylen;
59 struct radix_mask *rn_mkfreelist;
60 struct radix_node_head *mask_rnhead;
61 static char *addmask_key;
62 static const char normal_chars[] =
63 {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
64 static char *rn_zeros, *rn_ones;
65
66 #define rn_masktop (mask_rnhead->rnh_treetop)
67 #undef Bcmp
68 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
69
70 static int rn_satisfies_leaf(const char *, struct radix_node *, int);
71 static int rn_lexobetter(const void *, const void *);
72 static struct radix_mask *rn_new_radix_mask(struct radix_node *,
73 struct radix_mask *);
74
75 /*
76 * The data structure for the keys is a radix tree with one way
77 * branching removed. The index rn_b at an internal node n represents a bit
78 * position to be tested. The tree is arranged so that all descendants
79 * of a node n have keys whose bits all agree up to position rn_b - 1.
80 * (We say the index of n is rn_b.)
81 *
82 * There is at least one descendant which has a one bit at position rn_b,
83 * and at least one with a zero there.
84 *
85 * A route is determined by a pair of key and mask. We require that the
86 * bit-wise logical and of the key and mask to be the key.
87 * We define the index of a route to associated with the mask to be
88 * the first bit number in the mask where 0 occurs (with bit number 0
89 * representing the highest order bit).
90 *
91 * We say a mask is normal if every bit is 0, past the index of the mask.
92 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
93 * and m is a normal mask, then the route applies to every descendant of n.
94 * If the index(m) < rn_b, this implies the trailing last few bits of k
95 * before bit b are all 0, (and hence consequently true of every descendant
96 * of n), so the route applies to all descendants of the node as well.
97 *
98 * Similar logic shows that a non-normal mask m such that
99 * index(m) <= index(n) could potentially apply to many children of n.
100 * Thus, for each non-host route, we attach its mask to a list at an internal
101 * node as high in the tree as we can go.
102 *
103 * The present version of the code makes use of normal routes in short-
104 * circuiting an explict mask and compare operation when testing whether
105 * a key satisfies a normal route, and also in remembering the unique leaf
106 * that governs a subtree.
107 */
108
109 struct radix_node *
110 rn_search(
111 const void *v_arg,
112 struct radix_node *head)
113 {
114 const u_char * const v = v_arg;
115 struct radix_node *x;
116
117 for (x = head; x->rn_b >= 0;) {
118 if (x->rn_bmask & v[x->rn_off])
119 x = x->rn_r;
120 else
121 x = x->rn_l;
122 }
123 return (x);
124 }
125
126 struct radix_node *
127 rn_search_m(
128 const void *v_arg,
129 struct radix_node *head,
130 const void *m_arg)
131 {
132 struct radix_node *x;
133 const u_char * const v = v_arg;
134 const u_char * const m = m_arg;
135
136 for (x = head; x->rn_b >= 0;) {
137 if ((x->rn_bmask & m[x->rn_off]) &&
138 (x->rn_bmask & v[x->rn_off]))
139 x = x->rn_r;
140 else
141 x = x->rn_l;
142 }
143 return x;
144 }
145
146 int
147 rn_refines(
148 const void *m_arg,
149 const void *n_arg)
150 {
151 const char *m = m_arg;
152 const char *n = n_arg;
153 const char *lim = n + *(u_char *)n;
154 const char *lim2 = lim;
155 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
156 int masks_are_equal = 1;
157
158 if (longer > 0)
159 lim -= longer;
160 while (n < lim) {
161 if (*n & ~(*m))
162 return 0;
163 if (*n++ != *m++)
164 masks_are_equal = 0;
165 }
166 while (n < lim2)
167 if (*n++)
168 return 0;
169 if (masks_are_equal && (longer < 0))
170 for (lim2 = m - longer; m < lim2; )
171 if (*m++)
172 return 1;
173 return (!masks_are_equal);
174 }
175
176 struct radix_node *
177 rn_lookup(
178 const void *v_arg,
179 const void *m_arg,
180 struct radix_node_head *head)
181 {
182 struct radix_node *x;
183 const char *netmask = NULL;
184
185 if (m_arg) {
186 if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0)
187 return (0);
188 netmask = x->rn_key;
189 }
190 x = rn_match(v_arg, head);
191 if (x && netmask) {
192 while (x && x->rn_mask != netmask)
193 x = x->rn_dupedkey;
194 }
195 return x;
196 }
197
198 static int
199 rn_satisfies_leaf(
200 const char *trial,
201 struct radix_node *leaf,
202 int skip)
203 {
204 const char *cp = trial;
205 const char *cp2 = leaf->rn_key;
206 const char *cp3 = leaf->rn_mask;
207 const char *cplim;
208 int length = min(*(u_char *)cp, *(u_char *)cp2);
209
210 if (cp3 == 0)
211 cp3 = rn_ones;
212 else
213 length = min(length, *(u_char *)cp3);
214 cplim = cp + length; cp3 += skip; cp2 += skip;
215 for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
216 if ((*cp ^ *cp2) & *cp3)
217 return 0;
218 return 1;
219 }
220
221 struct radix_node *
222 rn_match(
223 const void *v_arg,
224 struct radix_node_head *head)
225 {
226 const char * const v = v_arg;
227 struct radix_node *t = head->rnh_treetop;
228 struct radix_node *top = t;
229 struct radix_node *x;
230 struct radix_node *saved_t;
231 const char *cp = v;
232 const char *cp2;
233 const char *cplim;
234 int off = t->rn_off;
235 int vlen = *(u_char *)cp;
236 int matched_off;
237 int test, b, rn_b;
238
239 /*
240 * Open code rn_search(v, top) to avoid overhead of extra
241 * subroutine call.
242 */
243 for (; t->rn_b >= 0; ) {
244 if (t->rn_bmask & cp[t->rn_off])
245 t = t->rn_r;
246 else
247 t = t->rn_l;
248 }
249 /*
250 * See if we match exactly as a host destination
251 * or at least learn how many bits match, for normal mask finesse.
252 *
253 * It doesn't hurt us to limit how many bytes to check
254 * to the length of the mask, since if it matches we had a genuine
255 * match and the leaf we have is the most specific one anyway;
256 * if it didn't match with a shorter length it would fail
257 * with a long one. This wins big for class B&C netmasks which
258 * are probably the most common case...
259 */
260 if (t->rn_mask)
261 vlen = *(u_char *)t->rn_mask;
262 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
263 for (; cp < cplim; cp++, cp2++)
264 if (*cp != *cp2)
265 goto on1;
266 /*
267 * This extra grot is in case we are explicitly asked
268 * to look up the default. Ugh!
269 */
270 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
271 t = t->rn_dupedkey;
272 return t;
273 on1:
274 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
275 for (b = 7; (test >>= 1) > 0;)
276 b--;
277 matched_off = cp - v;
278 b += matched_off << 3;
279 rn_b = -1 - b;
280 /*
281 * If there is a host route in a duped-key chain, it will be first.
282 */
283 if ((saved_t = t)->rn_mask == 0)
284 t = t->rn_dupedkey;
285 for (; t; t = t->rn_dupedkey)
286 /*
287 * Even if we don't match exactly as a host,
288 * we may match if the leaf we wound up at is
289 * a route to a net.
290 */
291 if (t->rn_flags & RNF_NORMAL) {
292 if (rn_b <= t->rn_b)
293 return t;
294 } else if (rn_satisfies_leaf(v, t, matched_off))
295 return t;
296 t = saved_t;
297 /* start searching up the tree */
298 do {
299 struct radix_mask *m;
300 t = t->rn_p;
301 m = t->rn_mklist;
302 if (m) {
303 /*
304 * If non-contiguous masks ever become important
305 * we can restore the masking and open coding of
306 * the search and satisfaction test and put the
307 * calculation of "off" back before the "do".
308 */
309 do {
310 if (m->rm_flags & RNF_NORMAL) {
311 if (rn_b <= m->rm_b)
312 return (m->rm_leaf);
313 } else {
314 off = min(t->rn_off, matched_off);
315 x = rn_search_m(v, t, m->rm_mask);
316 while (x && x->rn_mask != m->rm_mask)
317 x = x->rn_dupedkey;
318 if (x && rn_satisfies_leaf(v, x, off))
319 return x;
320 }
321 m = m->rm_mklist;
322 } while (m);
323 }
324 } while (t != top);
325 return 0;
326 }
327
328 #ifdef RN_DEBUG
329 int rn_nodenum;
330 struct radix_node *rn_clist;
331 int rn_saveinfo;
332 int rn_debug = 1;
333 #endif
334
335 struct radix_node *
336 rn_newpair(
337 const void *v,
338 int b,
339 struct radix_node nodes[2])
340 {
341 struct radix_node *tt = nodes;
342 struct radix_node *t = tt + 1;
343 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
344 t->rn_l = tt; t->rn_off = b >> 3;
345 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
346 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
347 #ifdef RN_DEBUG
348 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
349 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
350 #endif
351 return t;
352 }
353
354 struct radix_node *
355 rn_insert(
356 const void *v_arg,
357 struct radix_node_head *head,
358 int *dupentry,
359 struct radix_node nodes[2])
360 {
361 struct radix_node *top = head->rnh_treetop;
362 struct radix_node *t = rn_search(v_arg, top);
363 struct radix_node *tt;
364 const char *v = v_arg;
365 int head_off = top->rn_off;
366 int vlen = *((u_char *)v);
367 const char *cp = v + head_off;
368 int b;
369 /*
370 * Find first bit at which v and t->rn_key differ
371 */
372 {
373 const char *cp2 = t->rn_key + head_off;
374 const char *cplim = v + vlen;
375 int cmp_res;
376
377 while (cp < cplim)
378 if (*cp2++ != *cp++)
379 goto on1;
380 *dupentry = 1;
381 return t;
382 on1:
383 *dupentry = 0;
384 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
385 for (b = (cp - v) << 3; cmp_res; b--)
386 cmp_res >>= 1;
387 }
388 {
389 struct radix_node *p, *x = top;
390 cp = v;
391 do {
392 p = x;
393 if (cp[x->rn_off] & x->rn_bmask)
394 x = x->rn_r;
395 else x = x->rn_l;
396 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
397 #ifdef RN_DEBUG
398 if (rn_debug)
399 log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
400 #endif
401 t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
402 if ((cp[p->rn_off] & p->rn_bmask) == 0)
403 p->rn_l = t;
404 else
405 p->rn_r = t;
406 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
407 if ((cp[t->rn_off] & t->rn_bmask) == 0) {
408 t->rn_r = x;
409 } else {
410 t->rn_r = tt; t->rn_l = x;
411 }
412 #ifdef RN_DEBUG
413 if (rn_debug)
414 log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
415 #endif
416 }
417 return (tt);
418 }
419
420 struct radix_node *
421 rn_addmask(
422 const void *n_arg,
423 int search,
424 int skip)
425 {
426 const char *netmask = n_arg;
427 const char *cp;
428 const char *cplim;
429 struct radix_node *x;
430 struct radix_node *saved_x;
431 int b = 0, mlen, j;
432 int maskduplicated, m0, isnormal;
433 static int last_zeroed = 0;
434
435 if ((mlen = *(u_char *)netmask) > max_keylen)
436 mlen = max_keylen;
437 if (skip == 0)
438 skip = 1;
439 if (mlen <= skip)
440 return (mask_rnhead->rnh_nodes);
441 if (skip > 1)
442 Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
443 if ((m0 = mlen) > skip)
444 Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
445 /*
446 * Trim trailing zeroes.
447 */
448 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
449 cp--;
450 mlen = cp - addmask_key;
451 if (mlen <= skip) {
452 if (m0 >= last_zeroed)
453 last_zeroed = mlen;
454 return (mask_rnhead->rnh_nodes);
455 }
456 if (m0 < last_zeroed)
457 Bzero(addmask_key + m0, last_zeroed - m0);
458 *addmask_key = last_zeroed = mlen;
459 x = rn_search(addmask_key, rn_masktop);
460 if (Bcmp(addmask_key, x->rn_key, mlen) != 0)
461 x = 0;
462 if (x || search)
463 return (x);
464 R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
465 if ((saved_x = x) == 0)
466 return (0);
467 Bzero(x, max_keylen + 2 * sizeof (*x));
468 cp = netmask = (caddr_t)(x + 2);
469 Bcopy(addmask_key, (caddr_t)(x + 2), mlen);
470 x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
471 if (maskduplicated) {
472 log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n");
473 Free(saved_x);
474 return (x);
475 }
476 /*
477 * Calculate index of mask, and check for normalcy.
478 */
479 cplim = netmask + mlen; isnormal = 1;
480 for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
481 cp++;
482 if (cp != cplim) {
483 for (j = 0x80; (j & *cp) != 0; j >>= 1)
484 b++;
485 if (*cp != normal_chars[b] || cp != (cplim - 1))
486 isnormal = 0;
487 }
488 b += (cp - netmask) << 3;
489 x->rn_b = -1 - b;
490 if (isnormal)
491 x->rn_flags |= RNF_NORMAL;
492 return (x);
493 }
494
495 static int /* XXX: arbitrary ordering for non-contiguous masks */
496 rn_lexobetter(
497 const void *m_arg,
498 const void *n_arg)
499 {
500 const u_char *mp = m_arg;
501 const u_char *np = n_arg;
502 const u_char *lim;
503
504 if (*mp > *np)
505 return 1; /* not really, but need to check longer one first */
506 if (*mp == *np)
507 for (lim = mp + *mp; mp < lim;)
508 if (*mp++ > *np++)
509 return 1;
510 return 0;
511 }
512
513 static struct radix_mask *
514 rn_new_radix_mask(
515 struct radix_node *tt,
516 struct radix_mask *next)
517 {
518 struct radix_mask *m;
519
520 MKGet(m);
521 if (m == 0) {
522 log(LOG_ERR, "Mask for route not entered\n");
523 return (0);
524 }
525 Bzero(m, sizeof *m);
526 m->rm_b = tt->rn_b;
527 m->rm_flags = tt->rn_flags;
528 if (tt->rn_flags & RNF_NORMAL)
529 m->rm_leaf = tt;
530 else
531 m->rm_mask = tt->rn_mask;
532 m->rm_mklist = next;
533 tt->rn_mklist = m;
534 return m;
535 }
536
537 struct radix_node *
538 rn_addroute(
539 const void *v_arg,
540 const void *n_arg,
541 struct radix_node_head *head,
542 struct radix_node treenodes[2])
543 {
544 const char *v = v_arg;
545 const char *netmask = n_arg;
546 struct radix_node *t;
547 struct radix_node *x = 0;
548 struct radix_node *tt;
549 struct radix_node *saved_tt;
550 struct radix_node *top = head->rnh_treetop;
551 short b = 0, b_leaf = 0;
552 int keyduplicated;
553 const char *mmask;
554 struct radix_mask *m, **mp;
555
556 /*
557 * In dealing with non-contiguous masks, there may be
558 * many different routes which have the same mask.
559 * We will find it useful to have a unique pointer to
560 * the mask to speed avoiding duplicate references at
561 * nodes and possibly save time in calculating indices.
562 */
563 if (netmask) {
564 if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0)
565 return (0);
566 b_leaf = x->rn_b;
567 b = -1 - x->rn_b;
568 netmask = x->rn_key;
569 }
570 /*
571 * Deal with duplicated keys: attach node to previous instance
572 */
573 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
574 if (keyduplicated) {
575 for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
576 if (tt->rn_mask == netmask)
577 return (0);
578 if (netmask == 0 ||
579 (tt->rn_mask &&
580 ((b_leaf < tt->rn_b) || /* index(netmask) > node */
581 rn_refines(netmask, tt->rn_mask) ||
582 rn_lexobetter(netmask, tt->rn_mask))))
583 break;
584 }
585 /*
586 * If the mask is not duplicated, we wouldn't
587 * find it among possible duplicate key entries
588 * anyway, so the above test doesn't hurt.
589 *
590 * We sort the masks for a duplicated key the same way as
591 * in a masklist -- most specific to least specific.
592 * This may require the unfortunate nuisance of relocating
593 * the head of the list.
594 *
595 * We also reverse, or doubly link the list through the
596 * parent pointer.
597 */
598 if (tt == saved_tt) {
599 struct radix_node *xx = x;
600 /* link in at head of list */
601 (tt = treenodes)->rn_dupedkey = t;
602 tt->rn_flags = t->rn_flags;
603 tt->rn_p = x = t->rn_p;
604 t->rn_p = tt;
605 if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
606 saved_tt = tt; x = xx;
607 } else {
608 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
609 t->rn_dupedkey = tt;
610 tt->rn_p = t;
611 if (tt->rn_dupedkey)
612 tt->rn_dupedkey->rn_p = tt;
613 }
614 #ifdef RN_DEBUG
615 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
616 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
617 #endif
618 tt->rn_key = (caddr_t) v;
619 tt->rn_b = -1;
620 tt->rn_flags = RNF_ACTIVE;
621 }
622 /*
623 * Put mask in tree.
624 */
625 if (netmask) {
626 tt->rn_mask = netmask;
627 tt->rn_b = x->rn_b;
628 tt->rn_flags |= x->rn_flags & RNF_NORMAL;
629 }
630 t = saved_tt->rn_p;
631 if (keyduplicated)
632 goto on2;
633 b_leaf = -1 - t->rn_b;
634 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
635 /* Promote general routes from below */
636 if (x->rn_b < 0) {
637 for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
638 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
639 *mp = m = rn_new_radix_mask(x, 0);
640 if (m)
641 mp = &m->rm_mklist;
642 }
643 } else if (x->rn_mklist) {
644 /*
645 * Skip over masks whose index is > that of new node
646 */
647 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
648 if (m->rm_b >= b_leaf)
649 break;
650 t->rn_mklist = m; *mp = 0;
651 }
652 on2:
653 /* Add new route to highest possible ancestor's list */
654 if ((netmask == 0) || (b > t->rn_b ))
655 return tt; /* can't lift at all */
656 b_leaf = tt->rn_b;
657 do {
658 x = t;
659 t = t->rn_p;
660 } while (b <= t->rn_b && x != top);
661 /*
662 * Search through routes associated with node to
663 * insert new route according to index.
664 * Need same criteria as when sorting dupedkeys to avoid
665 * double loop on deletion.
666 */
667 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
668 if (m->rm_b < b_leaf)
669 continue;
670 if (m->rm_b > b_leaf)
671 break;
672 if (m->rm_flags & RNF_NORMAL) {
673 mmask = m->rm_leaf->rn_mask;
674 if (tt->rn_flags & RNF_NORMAL) {
675 log(LOG_ERR, "Non-unique normal route,"
676 " mask not entered\n");
677 return tt;
678 }
679 } else
680 mmask = m->rm_mask;
681 if (mmask == netmask) {
682 m->rm_refs++;
683 tt->rn_mklist = m;
684 return tt;
685 }
686 if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
687 break;
688 }
689 *mp = rn_new_radix_mask(tt, *mp);
690 return tt;
691 }
692
693 struct radix_node *
694 rn_delete(
695 const void *v_arg,
696 const void *netmask_arg,
697 struct radix_node_head *head)
698 {
699 struct radix_node *t;
700 struct radix_node *p;
701 struct radix_node *x;
702 struct radix_node *tt;
703 struct radix_node *dupedkey;
704 struct radix_node *saved_tt;
705 struct radix_node *top;
706 struct radix_mask *m;
707 struct radix_mask *saved_m;
708 struct radix_mask **mp;
709 const char *v = v_arg;
710 const char *netmask = netmask_arg;
711 int b, head_off, vlen;
712
713 x = head->rnh_treetop;
714 tt = rn_search(v, x);
715 head_off = x->rn_off;
716 vlen = *(u_char *)v;
717 saved_tt = tt;
718 top = x;
719 if (tt == 0 ||
720 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
721 return (0);
722 /*
723 * Delete our route from mask lists.
724 */
725 if (netmask) {
726 if ((x = rn_addmask(netmask, 1, head_off)) == 0)
727 return (0);
728 netmask = x->rn_key;
729 while (tt->rn_mask != netmask)
730 if ((tt = tt->rn_dupedkey) == 0)
731 return (0);
732 }
733 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
734 goto on1;
735 if (tt->rn_flags & RNF_NORMAL) {
736 if (m->rm_leaf != tt || m->rm_refs > 0) {
737 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
738 return 0; /* dangling ref could cause disaster */
739 }
740 } else {
741 if (m->rm_mask != tt->rn_mask) {
742 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
743 goto on1;
744 }
745 if (--m->rm_refs >= 0)
746 goto on1;
747 }
748 b = -1 - tt->rn_b;
749 t = saved_tt->rn_p;
750 if (b > t->rn_b)
751 goto on1; /* Wasn't lifted at all */
752 do {
753 x = t;
754 t = t->rn_p;
755 } while (b <= t->rn_b && x != top);
756 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
757 if (m == saved_m) {
758 *mp = m->rm_mklist;
759 MKFree(m);
760 break;
761 }
762 if (m == 0) {
763 log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
764 if (tt->rn_flags & RNF_NORMAL)
765 return (0); /* Dangling ref to us */
766 }
767 on1:
768 /*
769 * Eliminate us from tree
770 */
771 if (tt->rn_flags & RNF_ROOT)
772 return (0);
773 #ifdef RN_DEBUG
774 /* Get us out of the creation list */
775 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
776 if (t) t->rn_ybro = tt->rn_ybro;
777 #endif
778 t = tt->rn_p;
779 dupedkey = saved_tt->rn_dupedkey;
780 if (dupedkey) {
781 /*
782 * Here, tt is the deletion target, and
783 * saved_tt is the head of the dupedkey chain.
784 */
785 if (tt == saved_tt) {
786 x = dupedkey; x->rn_p = t;
787 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
788 } else {
789 /* find node in front of tt on the chain */
790 for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
791 p = p->rn_dupedkey;
792 if (p) {
793 p->rn_dupedkey = tt->rn_dupedkey;
794 if (tt->rn_dupedkey)
795 tt->rn_dupedkey->rn_p = p;
796 } else log(LOG_ERR, "rn_delete: couldn't find us\n");
797 }
798 t = tt + 1;
799 if (t->rn_flags & RNF_ACTIVE) {
800 #ifndef RN_DEBUG
801 *++x = *t; p = t->rn_p;
802 #else
803 b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
804 #endif
805 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
806 x->rn_l->rn_p = x; x->rn_r->rn_p = x;
807 }
808 goto out;
809 }
810 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
811 p = t->rn_p;
812 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
813 x->rn_p = p;
814 /*
815 * Demote routes attached to us.
816 */
817 if (t->rn_mklist) {
818 if (x->rn_b >= 0) {
819 for (mp = &x->rn_mklist; (m = *mp);)
820 mp = &m->rm_mklist;
821 *mp = t->rn_mklist;
822 } else {
823 /* If there are any key,mask pairs in a sibling
824 duped-key chain, some subset will appear sorted
825 in the same order attached to our mklist */
826 for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
827 if (m == x->rn_mklist) {
828 struct radix_mask *mm = m->rm_mklist;
829 x->rn_mklist = 0;
830 if (--(m->rm_refs) < 0)
831 MKFree(m);
832 m = mm;
833 }
834 if (m)
835 log(LOG_ERR, "%s %p at %p\n",
836 "rn_delete: Orphaned Mask", m, x);
837 }
838 }
839 /*
840 * We may be holding an active internal node in the tree.
841 */
842 x = tt + 1;
843 if (t != x) {
844 #ifndef RN_DEBUG
845 *t = *x;
846 #else
847 b = t->rn_info; *t = *x; t->rn_info = b;
848 #endif
849 t->rn_l->rn_p = t; t->rn_r->rn_p = t;
850 p = x->rn_p;
851 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
852 }
853 out:
854 tt->rn_flags &= ~RNF_ACTIVE;
855 tt[1].rn_flags &= ~RNF_ACTIVE;
856 return (tt);
857 }
858
859 int
860 rn_walktree(
861 struct radix_node_head *h,
862 int (*f)(struct radix_node *, void *),
863 void *w)
864 {
865 int error;
866 struct radix_node *base;
867 struct radix_node *next;
868 struct radix_node *rn = h->rnh_treetop;
869 /*
870 * This gets complicated because we may delete the node
871 * while applying the function f to it, so we need to calculate
872 * the successor node in advance.
873 */
874 /* First time through node, go left */
875 while (rn->rn_b >= 0)
876 rn = rn->rn_l;
877 for (;;) {
878 base = rn;
879 /* If at right child go back up, otherwise, go right */
880 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
881 rn = rn->rn_p;
882 /* Find the next *leaf* since next node might vanish, too */
883 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
884 rn = rn->rn_l;
885 next = rn;
886 /* Process leaves */
887 while ((rn = base) != NULL) {
888 base = rn->rn_dupedkey;
889 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
890 return (error);
891 }
892 rn = next;
893 if (rn->rn_flags & RNF_ROOT)
894 return (0);
895 }
896 /* NOTREACHED */
897 }
898
899 int
900 rn_inithead(head, off)
901 void **head;
902 int off;
903 {
904 struct radix_node_head *rnh;
905
906 if (*head)
907 return (1);
908 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
909 if (rnh == 0)
910 return (0);
911 *head = rnh;
912 return rn_inithead0(rnh, off);
913 }
914
915 int
916 rn_inithead0(rnh, off)
917 struct radix_node_head *rnh;
918 int off;
919 {
920 struct radix_node *t;
921 struct radix_node *tt;
922 struct radix_node *ttt;
923
924 Bzero(rnh, sizeof (*rnh));
925 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
926 ttt = rnh->rnh_nodes + 2;
927 t->rn_r = ttt;
928 t->rn_p = t;
929 tt = t->rn_l;
930 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
931 tt->rn_b = -1 - off;
932 *ttt = *tt;
933 ttt->rn_key = rn_ones;
934 rnh->rnh_addaddr = rn_addroute;
935 rnh->rnh_deladdr = rn_delete;
936 rnh->rnh_matchaddr = rn_match;
937 rnh->rnh_lookup = rn_lookup;
938 rnh->rnh_walktree = rn_walktree;
939 rnh->rnh_treetop = t;
940 return (1);
941 }
942
943 void
944 rn_init()
945 {
946 char *cp, *cplim;
947 #ifdef _KERNEL
948 static int initialized;
949 __link_set_decl(domains, struct domain);
950 struct domain *const *dpp;
951
952 if (initialized)
953 return;
954 initialized = 1;
955
956 __link_set_foreach(dpp, domains) {
957 if ((*dpp)->dom_maxrtkey > max_keylen)
958 max_keylen = (*dpp)->dom_maxrtkey;
959 }
960 #ifdef INET
961 encap_setkeylen();
962 #endif
963 #endif
964 if (max_keylen == 0) {
965 log(LOG_ERR,
966 "rn_init: radix functions require max_keylen be set\n");
967 return;
968 }
969 R_Malloc(rn_zeros, char *, 3 * max_keylen);
970 if (rn_zeros == NULL)
971 panic("rn_init");
972 Bzero(rn_zeros, 3 * max_keylen);
973 rn_ones = cp = rn_zeros + max_keylen;
974 addmask_key = cplim = rn_ones + max_keylen;
975 while (cp < cplim)
976 *cp++ = -1;
977 if (rn_inithead((void *)&mask_rnhead, 0) == 0)
978 panic("rn_init 2");
979 }
Cache object: e85ad72f9ae2bac3766540af7092d560
|