FreeBSD/Linux Kernel Cross Reference
sys/net/radix.c
1 /* $NetBSD: radix.c,v 1.20 2003/08/07 16:32:56 agc Exp $ */
2
3 /*
4 * Copyright (c) 1988, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)radix.c 8.6 (Berkeley) 10/17/95
32 */
33
34 /*
35 * Routines to build and maintain radix trees for routing lookups.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: radix.c,v 1.20 2003/08/07 16:32:56 agc Exp $");
40
41 #ifndef _NET_RADIX_H_
42 #include <sys/param.h>
43 #ifdef _KERNEL
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #define M_DONTWAIT M_NOWAIT
47 #include <sys/domain.h>
48 #else
49 #include <stdlib.h>
50 #endif
51 #include <sys/syslog.h>
52 #include <net/radix.h>
53 #endif
54
55 int max_keylen;
56 struct radix_mask *rn_mkfreelist;
57 struct radix_node_head *mask_rnhead;
58 static char *addmask_key;
59 static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
60 static char *rn_zeros, *rn_ones;
61
62 #define rn_masktop (mask_rnhead->rnh_treetop)
63 #undef Bcmp
64 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
65
66 static int rn_satisfies_leaf __P((char *, struct radix_node *, int));
67 static int rn_lexobetter __P((void *, void *));
68 static struct radix_mask *rn_new_radix_mask __P((struct radix_node *,
69 struct radix_mask *));
70
71 /*
72 * The data structure for the keys is a radix tree with one way
73 * branching removed. The index rn_b at an internal node n represents a bit
74 * position to be tested. The tree is arranged so that all descendants
75 * of a node n have keys whose bits all agree up to position rn_b - 1.
76 * (We say the index of n is rn_b.)
77 *
78 * There is at least one descendant which has a one bit at position rn_b,
79 * and at least one with a zero there.
80 *
81 * A route is determined by a pair of key and mask. We require that the
82 * bit-wise logical and of the key and mask to be the key.
83 * We define the index of a route to associated with the mask to be
84 * the first bit number in the mask where 0 occurs (with bit number 0
85 * representing the highest order bit).
86 *
87 * We say a mask is normal if every bit is 0, past the index of the mask.
88 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
89 * and m is a normal mask, then the route applies to every descendant of n.
90 * If the index(m) < rn_b, this implies the trailing last few bits of k
91 * before bit b are all 0, (and hence consequently true of every descendant
92 * of n), so the route applies to all descendants of the node as well.
93 *
94 * Similar logic shows that a non-normal mask m such that
95 * index(m) <= index(n) could potentially apply to many children of n.
96 * Thus, for each non-host route, we attach its mask to a list at an internal
97 * node as high in the tree as we can go.
98 *
99 * The present version of the code makes use of normal routes in short-
100 * circuiting an explict mask and compare operation when testing whether
101 * a key satisfies a normal route, and also in remembering the unique leaf
102 * that governs a subtree.
103 */
104
105 struct radix_node *
106 rn_search(v_arg, head)
107 void *v_arg;
108 struct radix_node *head;
109 {
110 struct radix_node *x;
111 caddr_t v;
112
113 for (x = head, v = v_arg; x->rn_b >= 0;) {
114 if (x->rn_bmask & v[x->rn_off])
115 x = x->rn_r;
116 else
117 x = x->rn_l;
118 }
119 return (x);
120 }
121
122 struct radix_node *
123 rn_search_m(v_arg, head, m_arg)
124 struct radix_node *head;
125 void *v_arg, *m_arg;
126 {
127 struct radix_node *x;
128 caddr_t v = v_arg, m = m_arg;
129
130 for (x = head; x->rn_b >= 0;) {
131 if ((x->rn_bmask & m[x->rn_off]) &&
132 (x->rn_bmask & v[x->rn_off]))
133 x = x->rn_r;
134 else
135 x = x->rn_l;
136 }
137 return x;
138 }
139
140 int
141 rn_refines(m_arg, n_arg)
142 void *m_arg, *n_arg;
143 {
144 caddr_t m = m_arg, n = n_arg;
145 caddr_t lim, lim2 = lim = n + *(u_char *)n;
146 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
147 int masks_are_equal = 1;
148
149 if (longer > 0)
150 lim -= longer;
151 while (n < lim) {
152 if (*n & ~(*m))
153 return 0;
154 if (*n++ != *m++)
155 masks_are_equal = 0;
156 }
157 while (n < lim2)
158 if (*n++)
159 return 0;
160 if (masks_are_equal && (longer < 0))
161 for (lim2 = m - longer; m < lim2; )
162 if (*m++)
163 return 1;
164 return (!masks_are_equal);
165 }
166
167 struct radix_node *
168 rn_lookup(v_arg, m_arg, head)
169 void *v_arg, *m_arg;
170 struct radix_node_head *head;
171 {
172 struct radix_node *x;
173 caddr_t netmask = 0;
174
175 if (m_arg) {
176 if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0)
177 return (0);
178 netmask = x->rn_key;
179 }
180 x = rn_match(v_arg, head);
181 if (x && netmask) {
182 while (x && x->rn_mask != netmask)
183 x = x->rn_dupedkey;
184 }
185 return x;
186 }
187
188 static int
189 rn_satisfies_leaf(trial, leaf, skip)
190 char *trial;
191 struct radix_node *leaf;
192 int skip;
193 {
194 char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
195 char *cplim;
196 int length = min(*(u_char *)cp, *(u_char *)cp2);
197
198 if (cp3 == 0)
199 cp3 = rn_ones;
200 else
201 length = min(length, *(u_char *)cp3);
202 cplim = cp + length; cp3 += skip; cp2 += skip;
203 for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
204 if ((*cp ^ *cp2) & *cp3)
205 return 0;
206 return 1;
207 }
208
209 struct radix_node *
210 rn_match(v_arg, head)
211 void *v_arg;
212 struct radix_node_head *head;
213 {
214 caddr_t v = v_arg;
215 struct radix_node *t = head->rnh_treetop, *x;
216 caddr_t cp = v, cp2;
217 caddr_t cplim;
218 struct radix_node *saved_t, *top = t;
219 int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
220 int test, b, rn_b;
221
222 /*
223 * Open code rn_search(v, top) to avoid overhead of extra
224 * subroutine call.
225 */
226 for (; t->rn_b >= 0; ) {
227 if (t->rn_bmask & cp[t->rn_off])
228 t = t->rn_r;
229 else
230 t = t->rn_l;
231 }
232 /*
233 * See if we match exactly as a host destination
234 * or at least learn how many bits match, for normal mask finesse.
235 *
236 * It doesn't hurt us to limit how many bytes to check
237 * to the length of the mask, since if it matches we had a genuine
238 * match and the leaf we have is the most specific one anyway;
239 * if it didn't match with a shorter length it would fail
240 * with a long one. This wins big for class B&C netmasks which
241 * are probably the most common case...
242 */
243 if (t->rn_mask)
244 vlen = *(u_char *)t->rn_mask;
245 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
246 for (; cp < cplim; cp++, cp2++)
247 if (*cp != *cp2)
248 goto on1;
249 /*
250 * This extra grot is in case we are explicitly asked
251 * to look up the default. Ugh!
252 */
253 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
254 t = t->rn_dupedkey;
255 return t;
256 on1:
257 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
258 for (b = 7; (test >>= 1) > 0;)
259 b--;
260 matched_off = cp - v;
261 b += matched_off << 3;
262 rn_b = -1 - b;
263 /*
264 * If there is a host route in a duped-key chain, it will be first.
265 */
266 if ((saved_t = t)->rn_mask == 0)
267 t = t->rn_dupedkey;
268 for (; t; t = t->rn_dupedkey)
269 /*
270 * Even if we don't match exactly as a host,
271 * we may match if the leaf we wound up at is
272 * a route to a net.
273 */
274 if (t->rn_flags & RNF_NORMAL) {
275 if (rn_b <= t->rn_b)
276 return t;
277 } else if (rn_satisfies_leaf(v, t, matched_off))
278 return t;
279 t = saved_t;
280 /* start searching up the tree */
281 do {
282 struct radix_mask *m;
283 t = t->rn_p;
284 m = t->rn_mklist;
285 if (m) {
286 /*
287 * If non-contiguous masks ever become important
288 * we can restore the masking and open coding of
289 * the search and satisfaction test and put the
290 * calculation of "off" back before the "do".
291 */
292 do {
293 if (m->rm_flags & RNF_NORMAL) {
294 if (rn_b <= m->rm_b)
295 return (m->rm_leaf);
296 } else {
297 off = min(t->rn_off, matched_off);
298 x = rn_search_m(v, t, m->rm_mask);
299 while (x && x->rn_mask != m->rm_mask)
300 x = x->rn_dupedkey;
301 if (x && rn_satisfies_leaf(v, x, off))
302 return x;
303 }
304 m = m->rm_mklist;
305 } while (m);
306 }
307 } while (t != top);
308 return 0;
309 }
310
311 #ifdef RN_DEBUG
312 int rn_nodenum;
313 struct radix_node *rn_clist;
314 int rn_saveinfo;
315 int rn_debug = 1;
316 #endif
317
318 struct radix_node *
319 rn_newpair(v, b, nodes)
320 void *v;
321 int b;
322 struct radix_node nodes[2];
323 {
324 struct radix_node *tt = nodes, *t = tt + 1;
325 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
326 t->rn_l = tt; t->rn_off = b >> 3;
327 tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t;
328 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
329 #ifdef RN_DEBUG
330 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
331 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
332 #endif
333 return t;
334 }
335
336 struct radix_node *
337 rn_insert(v_arg, head, dupentry, nodes)
338 void *v_arg;
339 struct radix_node_head *head;
340 int *dupentry;
341 struct radix_node nodes[2];
342 {
343 caddr_t v = v_arg;
344 struct radix_node *top = head->rnh_treetop;
345 int head_off = top->rn_off, vlen = (int)*((u_char *)v);
346 struct radix_node *t = rn_search(v_arg, top);
347 caddr_t cp = v + head_off;
348 int b;
349 struct radix_node *tt;
350 /*
351 * Find first bit at which v and t->rn_key differ
352 */
353 {
354 caddr_t cp2 = t->rn_key + head_off;
355 int cmp_res;
356 caddr_t cplim = v + vlen;
357
358 while (cp < cplim)
359 if (*cp2++ != *cp++)
360 goto on1;
361 *dupentry = 1;
362 return t;
363 on1:
364 *dupentry = 0;
365 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
366 for (b = (cp - v) << 3; cmp_res; b--)
367 cmp_res >>= 1;
368 }
369 {
370 struct radix_node *p, *x = top;
371 cp = v;
372 do {
373 p = x;
374 if (cp[x->rn_off] & x->rn_bmask)
375 x = x->rn_r;
376 else x = x->rn_l;
377 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
378 #ifdef RN_DEBUG
379 if (rn_debug)
380 log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
381 #endif
382 t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
383 if ((cp[p->rn_off] & p->rn_bmask) == 0)
384 p->rn_l = t;
385 else
386 p->rn_r = t;
387 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
388 if ((cp[t->rn_off] & t->rn_bmask) == 0) {
389 t->rn_r = x;
390 } else {
391 t->rn_r = tt; t->rn_l = x;
392 }
393 #ifdef RN_DEBUG
394 if (rn_debug)
395 log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
396 #endif
397 }
398 return (tt);
399 }
400
401 struct radix_node *
402 rn_addmask(n_arg, search, skip)
403 int search, skip;
404 void *n_arg;
405 {
406 caddr_t netmask = (caddr_t)n_arg;
407 struct radix_node *x;
408 caddr_t cp, cplim;
409 int b = 0, mlen, j;
410 int maskduplicated, m0, isnormal;
411 struct radix_node *saved_x;
412 static int last_zeroed = 0;
413
414 if ((mlen = *(u_char *)netmask) > max_keylen)
415 mlen = max_keylen;
416 if (skip == 0)
417 skip = 1;
418 if (mlen <= skip)
419 return (mask_rnhead->rnh_nodes);
420 if (skip > 1)
421 Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
422 if ((m0 = mlen) > skip)
423 Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
424 /*
425 * Trim trailing zeroes.
426 */
427 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
428 cp--;
429 mlen = cp - addmask_key;
430 if (mlen <= skip) {
431 if (m0 >= last_zeroed)
432 last_zeroed = mlen;
433 return (mask_rnhead->rnh_nodes);
434 }
435 if (m0 < last_zeroed)
436 Bzero(addmask_key + m0, last_zeroed - m0);
437 *addmask_key = last_zeroed = mlen;
438 x = rn_search(addmask_key, rn_masktop);
439 if (Bcmp(addmask_key, x->rn_key, mlen) != 0)
440 x = 0;
441 if (x || search)
442 return (x);
443 R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
444 if ((saved_x = x) == 0)
445 return (0);
446 Bzero(x, max_keylen + 2 * sizeof (*x));
447 netmask = cp = (caddr_t)(x + 2);
448 Bcopy(addmask_key, cp, mlen);
449 x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
450 if (maskduplicated) {
451 log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n");
452 Free(saved_x);
453 return (x);
454 }
455 /*
456 * Calculate index of mask, and check for normalcy.
457 */
458 cplim = netmask + mlen; isnormal = 1;
459 for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
460 cp++;
461 if (cp != cplim) {
462 for (j = 0x80; (j & *cp) != 0; j >>= 1)
463 b++;
464 if (*cp != normal_chars[b] || cp != (cplim - 1))
465 isnormal = 0;
466 }
467 b += (cp - netmask) << 3;
468 x->rn_b = -1 - b;
469 if (isnormal)
470 x->rn_flags |= RNF_NORMAL;
471 return (x);
472 }
473
474 static int /* XXX: arbitrary ordering for non-contiguous masks */
475 rn_lexobetter(m_arg, n_arg)
476 void *m_arg, *n_arg;
477 {
478 u_char *mp = m_arg, *np = n_arg, *lim;
479
480 if (*mp > *np)
481 return 1; /* not really, but need to check longer one first */
482 if (*mp == *np)
483 for (lim = mp + *mp; mp < lim;)
484 if (*mp++ > *np++)
485 return 1;
486 return 0;
487 }
488
489 static struct radix_mask *
490 rn_new_radix_mask(tt, next)
491 struct radix_node *tt;
492 struct radix_mask *next;
493 {
494 struct radix_mask *m;
495
496 MKGet(m);
497 if (m == 0) {
498 log(LOG_ERR, "Mask for route not entered\n");
499 return (0);
500 }
501 Bzero(m, sizeof *m);
502 m->rm_b = tt->rn_b;
503 m->rm_flags = tt->rn_flags;
504 if (tt->rn_flags & RNF_NORMAL)
505 m->rm_leaf = tt;
506 else
507 m->rm_mask = tt->rn_mask;
508 m->rm_mklist = next;
509 tt->rn_mklist = m;
510 return m;
511 }
512
513 struct radix_node *
514 rn_addroute(v_arg, n_arg, head, treenodes)
515 void *v_arg, *n_arg;
516 struct radix_node_head *head;
517 struct radix_node treenodes[2];
518 {
519 caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
520 struct radix_node *t, *x = 0, *tt;
521 struct radix_node *saved_tt, *top = head->rnh_treetop;
522 short b = 0, b_leaf = 0;
523 int keyduplicated;
524 caddr_t mmask;
525 struct radix_mask *m, **mp;
526
527 /*
528 * In dealing with non-contiguous masks, there may be
529 * many different routes which have the same mask.
530 * We will find it useful to have a unique pointer to
531 * the mask to speed avoiding duplicate references at
532 * nodes and possibly save time in calculating indices.
533 */
534 if (netmask) {
535 if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0)
536 return (0);
537 b_leaf = x->rn_b;
538 b = -1 - x->rn_b;
539 netmask = x->rn_key;
540 }
541 /*
542 * Deal with duplicated keys: attach node to previous instance
543 */
544 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
545 if (keyduplicated) {
546 for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
547 if (tt->rn_mask == netmask)
548 return (0);
549 if (netmask == 0 ||
550 (tt->rn_mask &&
551 ((b_leaf < tt->rn_b) || /* index(netmask) > node */
552 rn_refines(netmask, tt->rn_mask) ||
553 rn_lexobetter(netmask, tt->rn_mask))))
554 break;
555 }
556 /*
557 * If the mask is not duplicated, we wouldn't
558 * find it among possible duplicate key entries
559 * anyway, so the above test doesn't hurt.
560 *
561 * We sort the masks for a duplicated key the same way as
562 * in a masklist -- most specific to least specific.
563 * This may require the unfortunate nuisance of relocating
564 * the head of the list.
565 *
566 * We also reverse, or doubly link the list through the
567 * parent pointer.
568 */
569 if (tt == saved_tt) {
570 struct radix_node *xx = x;
571 /* link in at head of list */
572 (tt = treenodes)->rn_dupedkey = t;
573 tt->rn_flags = t->rn_flags;
574 tt->rn_p = x = t->rn_p;
575 t->rn_p = tt;
576 if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
577 saved_tt = tt; x = xx;
578 } else {
579 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
580 t->rn_dupedkey = tt;
581 tt->rn_p = t;
582 if (tt->rn_dupedkey)
583 tt->rn_dupedkey->rn_p = tt;
584 }
585 #ifdef RN_DEBUG
586 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
587 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
588 #endif
589 tt->rn_key = (caddr_t) v;
590 tt->rn_b = -1;
591 tt->rn_flags = RNF_ACTIVE;
592 }
593 /*
594 * Put mask in tree.
595 */
596 if (netmask) {
597 tt->rn_mask = netmask;
598 tt->rn_b = x->rn_b;
599 tt->rn_flags |= x->rn_flags & RNF_NORMAL;
600 }
601 t = saved_tt->rn_p;
602 if (keyduplicated)
603 goto on2;
604 b_leaf = -1 - t->rn_b;
605 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
606 /* Promote general routes from below */
607 if (x->rn_b < 0) {
608 for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
609 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
610 *mp = m = rn_new_radix_mask(x, 0);
611 if (m)
612 mp = &m->rm_mklist;
613 }
614 } else if (x->rn_mklist) {
615 /*
616 * Skip over masks whose index is > that of new node
617 */
618 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
619 if (m->rm_b >= b_leaf)
620 break;
621 t->rn_mklist = m; *mp = 0;
622 }
623 on2:
624 /* Add new route to highest possible ancestor's list */
625 if ((netmask == 0) || (b > t->rn_b ))
626 return tt; /* can't lift at all */
627 b_leaf = tt->rn_b;
628 do {
629 x = t;
630 t = t->rn_p;
631 } while (b <= t->rn_b && x != top);
632 /*
633 * Search through routes associated with node to
634 * insert new route according to index.
635 * Need same criteria as when sorting dupedkeys to avoid
636 * double loop on deletion.
637 */
638 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
639 if (m->rm_b < b_leaf)
640 continue;
641 if (m->rm_b > b_leaf)
642 break;
643 if (m->rm_flags & RNF_NORMAL) {
644 mmask = m->rm_leaf->rn_mask;
645 if (tt->rn_flags & RNF_NORMAL) {
646 log(LOG_ERR, "Non-unique normal route,"
647 " mask not entered\n");
648 return tt;
649 }
650 } else
651 mmask = m->rm_mask;
652 if (mmask == netmask) {
653 m->rm_refs++;
654 tt->rn_mklist = m;
655 return tt;
656 }
657 if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
658 break;
659 }
660 *mp = rn_new_radix_mask(tt, *mp);
661 return tt;
662 }
663
664 struct radix_node *
665 rn_delete(v_arg, netmask_arg, head)
666 void *v_arg, *netmask_arg;
667 struct radix_node_head *head;
668 {
669 struct radix_node *t, *p, *x, *tt;
670 struct radix_mask *m, *saved_m, **mp;
671 struct radix_node *dupedkey, *saved_tt, *top;
672 caddr_t v, netmask;
673 int b, head_off, vlen;
674
675 v = v_arg;
676 netmask = netmask_arg;
677 x = head->rnh_treetop;
678 tt = rn_search(v, x);
679 head_off = x->rn_off;
680 vlen = *(u_char *)v;
681 saved_tt = tt;
682 top = x;
683 if (tt == 0 ||
684 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
685 return (0);
686 /*
687 * Delete our route from mask lists.
688 */
689 if (netmask) {
690 if ((x = rn_addmask(netmask, 1, head_off)) == 0)
691 return (0);
692 netmask = x->rn_key;
693 while (tt->rn_mask != netmask)
694 if ((tt = tt->rn_dupedkey) == 0)
695 return (0);
696 }
697 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
698 goto on1;
699 if (tt->rn_flags & RNF_NORMAL) {
700 if (m->rm_leaf != tt || m->rm_refs > 0) {
701 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
702 return 0; /* dangling ref could cause disaster */
703 }
704 } else {
705 if (m->rm_mask != tt->rn_mask) {
706 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
707 goto on1;
708 }
709 if (--m->rm_refs >= 0)
710 goto on1;
711 }
712 b = -1 - tt->rn_b;
713 t = saved_tt->rn_p;
714 if (b > t->rn_b)
715 goto on1; /* Wasn't lifted at all */
716 do {
717 x = t;
718 t = t->rn_p;
719 } while (b <= t->rn_b && x != top);
720 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
721 if (m == saved_m) {
722 *mp = m->rm_mklist;
723 MKFree(m);
724 break;
725 }
726 if (m == 0) {
727 log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
728 if (tt->rn_flags & RNF_NORMAL)
729 return (0); /* Dangling ref to us */
730 }
731 on1:
732 /*
733 * Eliminate us from tree
734 */
735 if (tt->rn_flags & RNF_ROOT)
736 return (0);
737 #ifdef RN_DEBUG
738 /* Get us out of the creation list */
739 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
740 if (t) t->rn_ybro = tt->rn_ybro;
741 #endif
742 t = tt->rn_p;
743 dupedkey = saved_tt->rn_dupedkey;
744 if (dupedkey) {
745 /*
746 * Here, tt is the deletion target, and
747 * saved_tt is the head of the dupedkey chain.
748 */
749 if (tt == saved_tt) {
750 x = dupedkey; x->rn_p = t;
751 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
752 } else {
753 /* find node in front of tt on the chain */
754 for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
755 p = p->rn_dupedkey;
756 if (p) {
757 p->rn_dupedkey = tt->rn_dupedkey;
758 if (tt->rn_dupedkey)
759 tt->rn_dupedkey->rn_p = p;
760 } else log(LOG_ERR, "rn_delete: couldn't find us\n");
761 }
762 t = tt + 1;
763 if (t->rn_flags & RNF_ACTIVE) {
764 #ifndef RN_DEBUG
765 *++x = *t; p = t->rn_p;
766 #else
767 b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
768 #endif
769 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
770 x->rn_l->rn_p = x; x->rn_r->rn_p = x;
771 }
772 goto out;
773 }
774 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
775 p = t->rn_p;
776 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
777 x->rn_p = p;
778 /*
779 * Demote routes attached to us.
780 */
781 if (t->rn_mklist) {
782 if (x->rn_b >= 0) {
783 for (mp = &x->rn_mklist; (m = *mp);)
784 mp = &m->rm_mklist;
785 *mp = t->rn_mklist;
786 } else {
787 /* If there are any key,mask pairs in a sibling
788 duped-key chain, some subset will appear sorted
789 in the same order attached to our mklist */
790 for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
791 if (m == x->rn_mklist) {
792 struct radix_mask *mm = m->rm_mklist;
793 x->rn_mklist = 0;
794 if (--(m->rm_refs) < 0)
795 MKFree(m);
796 m = mm;
797 }
798 if (m)
799 log(LOG_ERR, "%s %p at %p\n",
800 "rn_delete: Orphaned Mask", m, x);
801 }
802 }
803 /*
804 * We may be holding an active internal node in the tree.
805 */
806 x = tt + 1;
807 if (t != x) {
808 #ifndef RN_DEBUG
809 *t = *x;
810 #else
811 b = t->rn_info; *t = *x; t->rn_info = b;
812 #endif
813 t->rn_l->rn_p = t; t->rn_r->rn_p = t;
814 p = x->rn_p;
815 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
816 }
817 out:
818 tt->rn_flags &= ~RNF_ACTIVE;
819 tt[1].rn_flags &= ~RNF_ACTIVE;
820 return (tt);
821 }
822
823 int
824 rn_walktree(h, f, w)
825 struct radix_node_head *h;
826 int (*f) __P((struct radix_node *, void *));
827 void *w;
828 {
829 int error;
830 struct radix_node *base, *next;
831 struct radix_node *rn = h->rnh_treetop;
832 /*
833 * This gets complicated because we may delete the node
834 * while applying the function f to it, so we need to calculate
835 * the successor node in advance.
836 */
837 /* First time through node, go left */
838 while (rn->rn_b >= 0)
839 rn = rn->rn_l;
840 for (;;) {
841 base = rn;
842 /* If at right child go back up, otherwise, go right */
843 while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
844 rn = rn->rn_p;
845 /* Find the next *leaf* since next node might vanish, too */
846 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
847 rn = rn->rn_l;
848 next = rn;
849 /* Process leaves */
850 while ((rn = base) != NULL) {
851 base = rn->rn_dupedkey;
852 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
853 return (error);
854 }
855 rn = next;
856 if (rn->rn_flags & RNF_ROOT)
857 return (0);
858 }
859 /* NOTREACHED */
860 }
861
862 int
863 rn_inithead(head, off)
864 void **head;
865 int off;
866 {
867 struct radix_node_head *rnh;
868
869 if (*head)
870 return (1);
871 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
872 if (rnh == 0)
873 return (0);
874 *head = rnh;
875 return rn_inithead0(rnh, off);
876 }
877
878 int
879 rn_inithead0(rnh, off)
880 struct radix_node_head *rnh;
881 int off;
882 {
883 struct radix_node *t, *tt, *ttt;
884
885 Bzero(rnh, sizeof (*rnh));
886 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
887 ttt = rnh->rnh_nodes + 2;
888 t->rn_r = ttt;
889 t->rn_p = t;
890 tt = t->rn_l;
891 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
892 tt->rn_b = -1 - off;
893 *ttt = *tt;
894 ttt->rn_key = rn_ones;
895 rnh->rnh_addaddr = rn_addroute;
896 rnh->rnh_deladdr = rn_delete;
897 rnh->rnh_matchaddr = rn_match;
898 rnh->rnh_lookup = rn_lookup;
899 rnh->rnh_walktree = rn_walktree;
900 rnh->rnh_treetop = t;
901 return (1);
902 }
903
904 void
905 rn_init()
906 {
907 char *cp, *cplim;
908 #ifdef _KERNEL
909 struct domain *dom;
910
911 for (dom = domains; dom; dom = dom->dom_next)
912 if (dom->dom_maxrtkey > max_keylen)
913 max_keylen = dom->dom_maxrtkey;
914 #endif
915 if (max_keylen == 0) {
916 log(LOG_ERR,
917 "rn_init: radix functions require max_keylen be set\n");
918 return;
919 }
920 R_Malloc(rn_zeros, char *, 3 * max_keylen);
921 if (rn_zeros == NULL)
922 panic("rn_init");
923 Bzero(rn_zeros, 3 * max_keylen);
924 rn_ones = cp = rn_zeros + max_keylen;
925 addmask_key = cplim = rn_ones + max_keylen;
926 while (cp < cplim)
927 *cp++ = -1;
928 if (rn_inithead((void *)&mask_rnhead, 0) == 0)
929 panic("rn_init 2");
930 }
Cache object: c695673c4114a09337c14e6a9b53a3ce
|