FreeBSD/Linux Kernel Cross Reference
sys/net/route.c
1 /* $NetBSD: route.c,v 1.113.4.1 2009/04/03 17:59:03 snj Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the project nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1980, 1986, 1991, 1993
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)route.c 8.3 (Berkeley) 1/9/95
91 */
92
93 #include "opt_route.h"
94
95 #include <sys/cdefs.h>
96 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.113.4.1 2009/04/03 17:59:03 snj Exp $");
97
98 #include <sys/param.h>
99 #include <sys/sysctl.h>
100 #include <sys/systm.h>
101 #include <sys/callout.h>
102 #include <sys/proc.h>
103 #include <sys/mbuf.h>
104 #include <sys/socket.h>
105 #include <sys/socketvar.h>
106 #include <sys/domain.h>
107 #include <sys/protosw.h>
108 #include <sys/kernel.h>
109 #include <sys/ioctl.h>
110 #include <sys/pool.h>
111
112 #include <net/if.h>
113 #include <net/route.h>
114 #include <net/raw_cb.h>
115
116 #include <netinet/in.h>
117 #include <netinet/in_var.h>
118
119 #ifdef RTFLUSH_DEBUG
120 #define rtcache_debug() __predict_false(_rtcache_debug)
121 #else /* RTFLUSH_DEBUG */
122 #define rtcache_debug() 0
123 #endif /* RTFLUSH_DEBUG */
124
125 struct route_cb route_cb;
126 struct rtstat rtstat;
127 struct radix_node_head *rt_tables[AF_MAX+1];
128
129 int rttrash; /* routes not in table but not freed */
130
131 struct pool rtentry_pool;
132 struct pool rttimer_pool;
133
134 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
135
136 #ifdef RTFLUSH_DEBUG
137 static int _rtcache_debug = 0;
138 #endif /* RTFLUSH_DEBUG */
139
140 static int rtdeletemsg(struct rtentry *);
141 static int rtflushclone1(struct rtentry *, void *);
142 static void rtflushclone(sa_family_t family, struct rtentry *);
143
144 #ifdef RTFLUSH_DEBUG
145 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup")
146 {
147 const struct sysctlnode *rnode;
148
149 /* XXX do not duplicate */
150 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
151 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0)
152 return;
153 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT,
154 CTLTYPE_NODE,
155 "rtcache", SYSCTL_DESCR("Route cache related settings"),
156 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
157 return;
158 if (sysctl_createv(clog, 0, &rnode, &rnode,
159 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
160 "debug", SYSCTL_DESCR("Debug route caches"),
161 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
162 return;
163 }
164 #endif /* RTFLUSH_DEBUG */
165
166 struct ifaddr *
167 rt_get_ifa(struct rtentry *rt)
168 {
169 struct ifaddr *ifa;
170
171 if ((ifa = rt->rt_ifa) == NULL)
172 return ifa;
173 else if (ifa->ifa_getifa == NULL)
174 return ifa;
175 #if 0
176 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
177 return ifa;
178 #endif
179 else {
180 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt));
181 rt_replace_ifa(rt, ifa);
182 return ifa;
183 }
184 }
185
186 static void
187 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
188 {
189 rt->rt_ifa = ifa;
190 if (ifa->ifa_seqno != NULL)
191 rt->rt_ifa_seqno = *ifa->ifa_seqno;
192 }
193
194 void
195 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
196 {
197 IFAREF(ifa);
198 IFAFREE(rt->rt_ifa);
199 rt_set_ifa1(rt, ifa);
200 }
201
202 static void
203 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
204 {
205 IFAREF(ifa);
206 rt_set_ifa1(rt, ifa);
207 }
208
209 void
210 rtable_init(void **table)
211 {
212 struct domain *dom;
213 DOMAIN_FOREACH(dom)
214 if (dom->dom_rtattach)
215 dom->dom_rtattach(&table[dom->dom_family],
216 dom->dom_rtoffset);
217 }
218
219 void
220 route_init(void)
221 {
222
223 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl",
224 NULL, IPL_SOFTNET);
225 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl",
226 NULL, IPL_SOFTNET);
227
228 rt_init();
229 rn_init(); /* initialize all zeroes, all ones, mask table */
230 rtable_init((void **)rt_tables);
231 }
232
233 void
234 rtflushall(int family)
235 {
236 struct domain *dom;
237
238 if (rtcache_debug())
239 printf("%s: enter\n", __func__);
240
241 if ((dom = pffinddomain(family)) == NULL)
242 return;
243
244 rtcache_invalidate(&dom->dom_rtcache);
245 }
246
247 void
248 rtcache(struct route *ro)
249 {
250 struct domain *dom;
251
252 KASSERT(ro->_ro_rt != NULL);
253 KASSERT(ro->ro_invalid == false);
254 KASSERT(rtcache_getdst(ro) != NULL);
255
256 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL)
257 return;
258
259 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next);
260 }
261
262 /*
263 * Packet routing routines.
264 */
265 struct rtentry *
266 rtalloc1(const struct sockaddr *dst, int report)
267 {
268 struct radix_node_head *rnh = rt_tables[dst->sa_family];
269 struct rtentry *rt;
270 struct radix_node *rn;
271 struct rtentry *newrt = NULL;
272 struct rt_addrinfo info;
273 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
274
275 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
276 ((rn->rn_flags & RNF_ROOT) == 0)) {
277 newrt = rt = (struct rtentry *)rn;
278 if (report && (rt->rt_flags & RTF_CLONING)) {
279 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
280 &newrt);
281 if (err) {
282 newrt = rt;
283 rt->rt_refcnt++;
284 goto miss;
285 }
286 KASSERT(newrt != NULL);
287 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
288 msgtype = RTM_RESOLVE;
289 goto miss;
290 }
291 /* Inform listeners of the new route */
292 memset(&info, 0, sizeof(info));
293 info.rti_info[RTAX_DST] = rt_getkey(rt);
294 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
295 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
296 if (rt->rt_ifp != NULL) {
297 info.rti_info[RTAX_IFP] =
298 rt->rt_ifp->if_dl->ifa_addr;
299 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
300 }
301 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
302 } else
303 rt->rt_refcnt++;
304 } else {
305 rtstat.rts_unreach++;
306 miss: if (report) {
307 memset((void *)&info, 0, sizeof(info));
308 info.rti_info[RTAX_DST] = dst;
309 rt_missmsg(msgtype, &info, 0, err);
310 }
311 }
312 splx(s);
313 return newrt;
314 }
315
316 void
317 rtfree(struct rtentry *rt)
318 {
319 struct ifaddr *ifa;
320
321 if (rt == NULL)
322 panic("rtfree");
323 rt->rt_refcnt--;
324 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
325 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
326 panic ("rtfree 2");
327 rttrash--;
328 if (rt->rt_refcnt < 0) {
329 printf("rtfree: %p not freed (neg refs)\n", rt);
330 return;
331 }
332 rt_timer_remove_all(rt, 0);
333 ifa = rt->rt_ifa;
334 rt->rt_ifa = NULL;
335 IFAFREE(ifa);
336 rt->rt_ifp = NULL;
337 rt_destroy(rt);
338 pool_put(&rtentry_pool, rt);
339 }
340 }
341
342 void
343 ifafree(struct ifaddr *ifa)
344 {
345
346 #ifdef DIAGNOSTIC
347 if (ifa == NULL)
348 panic("ifafree: null ifa");
349 if (ifa->ifa_refcnt != 0)
350 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
351 #endif
352 #ifdef IFAREF_DEBUG
353 printf("ifafree: freeing ifaddr %p\n", ifa);
354 #endif
355 free(ifa, M_IFADDR);
356 }
357
358 static inline int
359 equal(const struct sockaddr *sa1, const struct sockaddr *sa2)
360 {
361 return sockaddr_cmp(sa1, sa2) == 0;
362 }
363
364 /*
365 * Force a routing table entry to the specified
366 * destination to go through the given gateway.
367 * Normally called as a result of a routing redirect
368 * message from the network layer.
369 *
370 * N.B.: must be called at splsoftnet
371 */
372 void
373 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
374 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
375 struct rtentry **rtp)
376 {
377 struct rtentry *rt;
378 int error = 0;
379 u_quad_t *stat = NULL;
380 struct rt_addrinfo info;
381 struct ifaddr *ifa;
382
383 /* verify the gateway is directly reachable */
384 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
385 error = ENETUNREACH;
386 goto out;
387 }
388 rt = rtalloc1(dst, 0);
389 /*
390 * If the redirect isn't from our current router for this dst,
391 * it's either old or wrong. If it redirects us to ourselves,
392 * we have a routing loop, perhaps as a result of an interface
393 * going down recently.
394 */
395 if (!(flags & RTF_DONE) && rt &&
396 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
397 error = EINVAL;
398 else if (ifa_ifwithaddr(gateway))
399 error = EHOSTUNREACH;
400 if (error)
401 goto done;
402 /*
403 * Create a new entry if we just got back a wildcard entry
404 * or the lookup failed. This is necessary for hosts
405 * which use routing redirects generated by smart gateways
406 * to dynamically build the routing tables.
407 */
408 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
409 goto create;
410 /*
411 * Don't listen to the redirect if it's
412 * for a route to an interface.
413 */
414 if (rt->rt_flags & RTF_GATEWAY) {
415 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
416 /*
417 * Changing from route to net => route to host.
418 * Create new route, rather than smashing route to net.
419 */
420 create:
421 if (rt != NULL)
422 rtfree(rt);
423 flags |= RTF_GATEWAY | RTF_DYNAMIC;
424 info.rti_info[RTAX_DST] = dst;
425 info.rti_info[RTAX_GATEWAY] = gateway;
426 info.rti_info[RTAX_NETMASK] = netmask;
427 info.rti_ifa = ifa;
428 info.rti_flags = flags;
429 rt = NULL;
430 error = rtrequest1(RTM_ADD, &info, &rt);
431 if (rt != NULL)
432 flags = rt->rt_flags;
433 stat = &rtstat.rts_dynamic;
434 } else {
435 /*
436 * Smash the current notion of the gateway to
437 * this destination. Should check about netmask!!!
438 */
439 rt->rt_flags |= RTF_MODIFIED;
440 flags |= RTF_MODIFIED;
441 stat = &rtstat.rts_newgateway;
442 rt_setgate(rt, gateway);
443 }
444 } else
445 error = EHOSTUNREACH;
446 done:
447 if (rt) {
448 if (rtp != NULL && !error)
449 *rtp = rt;
450 else
451 rtfree(rt);
452 }
453 out:
454 if (error)
455 rtstat.rts_badredirect++;
456 else if (stat != NULL)
457 (*stat)++;
458 memset(&info, 0, sizeof(info));
459 info.rti_info[RTAX_DST] = dst;
460 info.rti_info[RTAX_GATEWAY] = gateway;
461 info.rti_info[RTAX_NETMASK] = netmask;
462 info.rti_info[RTAX_AUTHOR] = src;
463 rt_missmsg(RTM_REDIRECT, &info, flags, error);
464 }
465
466 /*
467 * Delete a route and generate a message
468 */
469 static int
470 rtdeletemsg(struct rtentry *rt)
471 {
472 int error;
473 struct rt_addrinfo info;
474
475 /*
476 * Request the new route so that the entry is not actually
477 * deleted. That will allow the information being reported to
478 * be accurate (and consistent with route_output()).
479 */
480 memset(&info, 0, sizeof(info));
481 info.rti_info[RTAX_DST] = rt_getkey(rt);
482 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
483 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
484 info.rti_flags = rt->rt_flags;
485 error = rtrequest1(RTM_DELETE, &info, &rt);
486
487 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
488
489 /* Adjust the refcount */
490 if (error == 0 && rt->rt_refcnt <= 0) {
491 rt->rt_refcnt++;
492 rtfree(rt);
493 }
494 return error;
495 }
496
497 static int
498 rtflushclone1(struct rtentry *rt, void *arg)
499 {
500 struct rtentry *parent;
501
502 parent = (struct rtentry *)arg;
503 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
504 rtdeletemsg(rt);
505 return 0;
506 }
507
508 static void
509 rtflushclone(sa_family_t family, struct rtentry *parent)
510 {
511
512 #ifdef DIAGNOSTIC
513 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
514 panic("rtflushclone: called with a non-cloning route");
515 #endif
516 rt_walktree(family, rtflushclone1, (void *)parent);
517 }
518
519 /*
520 * Routing table ioctl interface.
521 */
522 int
523 rtioctl(u_long req, void *data, struct lwp *l)
524 {
525 return EOPNOTSUPP;
526 }
527
528 struct ifaddr *
529 ifa_ifwithroute(int flags, const struct sockaddr *dst,
530 const struct sockaddr *gateway)
531 {
532 struct ifaddr *ifa;
533 if ((flags & RTF_GATEWAY) == 0) {
534 /*
535 * If we are adding a route to an interface,
536 * and the interface is a pt to pt link
537 * we should search for the destination
538 * as our clue to the interface. Otherwise
539 * we can use the local address.
540 */
541 ifa = NULL;
542 if (flags & RTF_HOST)
543 ifa = ifa_ifwithdstaddr(dst);
544 if (ifa == NULL)
545 ifa = ifa_ifwithaddr(gateway);
546 } else {
547 /*
548 * If we are adding a route to a remote net
549 * or host, the gateway may still be on the
550 * other end of a pt to pt link.
551 */
552 ifa = ifa_ifwithdstaddr(gateway);
553 }
554 if (ifa == NULL)
555 ifa = ifa_ifwithnet(gateway);
556 if (ifa == NULL) {
557 struct rtentry *rt = rtalloc1(dst, 0);
558 if (rt == NULL)
559 return NULL;
560 rt->rt_refcnt--;
561 if ((ifa = rt->rt_ifa) == NULL)
562 return NULL;
563 }
564 if (ifa->ifa_addr->sa_family != dst->sa_family) {
565 struct ifaddr *oifa = ifa;
566 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
567 if (ifa == 0)
568 ifa = oifa;
569 }
570 return ifa;
571 }
572
573 int
574 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
575 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
576 {
577 struct rt_addrinfo info;
578
579 memset(&info, 0, sizeof(info));
580 info.rti_flags = flags;
581 info.rti_info[RTAX_DST] = dst;
582 info.rti_info[RTAX_GATEWAY] = gateway;
583 info.rti_info[RTAX_NETMASK] = netmask;
584 return rtrequest1(req, &info, ret_nrt);
585 }
586
587 int
588 rt_getifa(struct rt_addrinfo *info)
589 {
590 struct ifaddr *ifa;
591 const struct sockaddr *dst = info->rti_info[RTAX_DST];
592 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
593 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
594 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
595 int flags = info->rti_flags;
596
597 /*
598 * ifp may be specified by sockaddr_dl when protocol address
599 * is ambiguous
600 */
601 if (info->rti_ifp == NULL && ifpaddr != NULL
602 && ifpaddr->sa_family == AF_LINK &&
603 (ifa = ifa_ifwithnet(ifpaddr)) != NULL)
604 info->rti_ifp = ifa->ifa_ifp;
605 if (info->rti_ifa == NULL && ifaaddr != NULL)
606 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
607 if (info->rti_ifa == NULL) {
608 const struct sockaddr *sa;
609
610 sa = ifaaddr != NULL ? ifaaddr :
611 (gateway != NULL ? gateway : dst);
612 if (sa != NULL && info->rti_ifp != NULL)
613 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
614 else if (dst != NULL && gateway != NULL)
615 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
616 else if (sa != NULL)
617 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
618 }
619 if ((ifa = info->rti_ifa) == NULL)
620 return ENETUNREACH;
621 if (ifa->ifa_getifa != NULL)
622 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
623 if (info->rti_ifp == NULL)
624 info->rti_ifp = ifa->ifa_ifp;
625 return 0;
626 }
627
628 int
629 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
630 {
631 int s = splsoftnet();
632 int error = 0;
633 struct rtentry *rt, *crt;
634 struct radix_node *rn;
635 struct radix_node_head *rnh;
636 struct ifaddr *ifa;
637 struct sockaddr_storage maskeddst;
638 const struct sockaddr *dst = info->rti_info[RTAX_DST];
639 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
640 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
641 int flags = info->rti_flags;
642 #define senderr(x) { error = x ; goto bad; }
643
644 if ((rnh = rt_tables[dst->sa_family]) == NULL)
645 senderr(ESRCH);
646 if (flags & RTF_HOST)
647 netmask = NULL;
648 switch (req) {
649 case RTM_DELETE:
650 if (netmask) {
651 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
652 netmask);
653 dst = (struct sockaddr *)&maskeddst;
654 }
655 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
656 senderr(ESRCH);
657 rt = (struct rtentry *)rn;
658 if ((rt->rt_flags & RTF_CLONING) != 0) {
659 /* clean up any cloned children */
660 rtflushclone(dst->sa_family, rt);
661 }
662 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
663 senderr(ESRCH);
664 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
665 panic ("rtrequest delete");
666 rt = (struct rtentry *)rn;
667 if (rt->rt_gwroute) {
668 RTFREE(rt->rt_gwroute);
669 rt->rt_gwroute = NULL;
670 }
671 if (rt->rt_parent) {
672 rt->rt_parent->rt_refcnt--;
673 rt->rt_parent = NULL;
674 }
675 rt->rt_flags &= ~RTF_UP;
676 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
677 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
678 rttrash++;
679 if (ret_nrt)
680 *ret_nrt = rt;
681 else if (rt->rt_refcnt <= 0) {
682 rt->rt_refcnt++;
683 rtfree(rt);
684 }
685 break;
686
687 case RTM_RESOLVE:
688 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
689 senderr(EINVAL);
690 if ((rt->rt_flags & RTF_CLONING) == 0)
691 senderr(EINVAL);
692 ifa = rt->rt_ifa;
693 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
694 flags |= RTF_CLONED;
695 gateway = rt->rt_gateway;
696 flags |= RTF_HOST;
697 goto makeroute;
698
699 case RTM_ADD:
700 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
701 senderr(error);
702 ifa = info->rti_ifa;
703 makeroute:
704 /* Already at splsoftnet() so pool_get/pool_put are safe */
705 rt = pool_get(&rtentry_pool, PR_NOWAIT);
706 if (rt == NULL)
707 senderr(ENOBUFS);
708 memset(rt, 0, sizeof(*rt));
709 rt->rt_flags = RTF_UP | flags;
710 LIST_INIT(&rt->rt_timer);
711 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
712 if (rt_setkey(rt, dst, M_NOWAIT) == NULL ||
713 rt_setgate(rt, gateway) != 0) {
714 pool_put(&rtentry_pool, rt);
715 senderr(ENOBUFS);
716 }
717 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
718 if (netmask) {
719 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
720 netmask);
721 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT);
722 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
723 } else {
724 rt_setkey(rt, dst, M_NOWAIT);
725 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
726 }
727 rt_set_ifa(rt, ifa);
728 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
729 rt->rt_ifp = ifa->ifa_ifp;
730 if (req == RTM_RESOLVE) {
731 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
732 rt->rt_parent = *ret_nrt;
733 rt->rt_parent->rt_refcnt++;
734 }
735 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
736 rn = rnh->rnh_addaddr(rt_getkey(rt), netmask, rnh,
737 rt->rt_nodes);
738 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
739 if (rn == NULL && (crt = rtalloc1(rt_getkey(rt), 0)) != NULL) {
740 /* overwrite cloned route */
741 if ((crt->rt_flags & RTF_CLONED) != 0) {
742 rtdeletemsg(crt);
743 rn = rnh->rnh_addaddr(rt_getkey(rt),
744 netmask, rnh, rt->rt_nodes);
745 }
746 RTFREE(crt);
747 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
748 }
749 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
750 if (rn == NULL) {
751 IFAFREE(ifa);
752 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
753 rtfree(rt->rt_parent);
754 if (rt->rt_gwroute)
755 rtfree(rt->rt_gwroute);
756 rt_destroy(rt);
757 pool_put(&rtentry_pool, rt);
758 senderr(EEXIST);
759 }
760 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
761 if (ifa->ifa_rtrequest)
762 ifa->ifa_rtrequest(req, rt, info);
763 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
764 if (ret_nrt) {
765 *ret_nrt = rt;
766 rt->rt_refcnt++;
767 }
768 if ((rt->rt_flags & RTF_CLONING) != 0) {
769 /* clean up any cloned children */
770 rtflushclone(dst->sa_family, rt);
771 }
772 rtflushall(dst->sa_family);
773 break;
774 case RTM_GET:
775 if (netmask != NULL) {
776 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
777 netmask);
778 dst = (struct sockaddr *)&maskeddst;
779 }
780 rn = rnh->rnh_lookup(dst, netmask, rnh);
781 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
782 senderr(ESRCH);
783 if (ret_nrt != NULL) {
784 rt = (struct rtentry *)rn;
785 *ret_nrt = rt;
786 rt->rt_refcnt++;
787 }
788 break;
789 }
790 bad:
791 splx(s);
792 return error;
793 }
794
795 int
796 rt_setgate(struct rtentry *rt, const struct sockaddr *gate)
797 {
798 KASSERT(rt != rt->rt_gwroute);
799
800 KASSERT(rt->_rt_key != NULL);
801 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
802
803 if (rt->rt_gwroute) {
804 RTFREE(rt->rt_gwroute);
805 rt->rt_gwroute = NULL;
806 }
807 KASSERT(rt->_rt_key != NULL);
808 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
809 if (rt->rt_gateway != NULL)
810 sockaddr_free(rt->rt_gateway);
811 KASSERT(rt->_rt_key != NULL);
812 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
813 if ((rt->rt_gateway = sockaddr_dup(gate, M_NOWAIT)) == NULL)
814 return ENOMEM;
815 KASSERT(rt->_rt_key != NULL);
816 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
817
818 if (rt->rt_flags & RTF_GATEWAY) {
819 KASSERT(rt->_rt_key != NULL);
820 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
821 rt->rt_gwroute = rtalloc1(gate, 1);
822 /*
823 * If we switched gateways, grab the MTU from the new
824 * gateway route if the current MTU, if the current MTU is
825 * greater than the MTU of gateway.
826 * Note that, if the MTU of gateway is 0, we will reset the
827 * MTU of the route to run PMTUD again from scratch. XXX
828 */
829 KASSERT(rt->_rt_key != NULL);
830 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
831 if (rt->rt_gwroute
832 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
833 && rt->rt_rmx.rmx_mtu
834 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
835 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
836 }
837 }
838 KASSERT(rt->_rt_key != NULL);
839 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
840 return 0;
841 }
842
843 void
844 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
845 const struct sockaddr *netmask)
846 {
847 const char *netmaskp = &netmask->sa_data[0],
848 *srcp = &src->sa_data[0];
849 char *dstp = &dst->sa_data[0];
850 const char *maskend = dstp + MIN(netmask->sa_len, src->sa_len);
851 const char *srcend = dstp + src->sa_len;
852
853 dst->sa_len = src->sa_len;
854 dst->sa_family = src->sa_family;
855
856 while (dstp < maskend)
857 *dstp++ = *srcp++ & *netmaskp++;
858 if (dstp < srcend)
859 memset(dstp, 0, (size_t)(srcend - dstp));
860 }
861
862 /*
863 * Set up or tear down a routing table entry, normally
864 * for an interface.
865 */
866 int
867 rtinit(struct ifaddr *ifa, int cmd, int flags)
868 {
869 struct rtentry *rt;
870 struct sockaddr *dst, *odst;
871 struct sockaddr_storage maskeddst;
872 struct rtentry *nrt = NULL;
873 int error;
874 struct rt_addrinfo info;
875
876 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
877 if (cmd == RTM_DELETE) {
878 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
879 /* Delete subnet route for this interface */
880 odst = dst;
881 dst = (struct sockaddr *)&maskeddst;
882 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
883 }
884 if ((rt = rtalloc1(dst, 0)) != NULL) {
885 rt->rt_refcnt--;
886 if (rt->rt_ifa != ifa)
887 return (flags & RTF_HOST) ? EHOSTUNREACH
888 : ENETUNREACH;
889 }
890 }
891 memset(&info, 0, sizeof(info));
892 info.rti_ifa = ifa;
893 info.rti_flags = flags | ifa->ifa_flags;
894 info.rti_info[RTAX_DST] = dst;
895 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
896 /*
897 * XXX here, it seems that we are assuming that ifa_netmask is NULL
898 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
899 * variable) when RTF_HOST is 1. still not sure if i can safely
900 * change it to meet bsdi4 behavior.
901 */
902 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
903 error = rtrequest1(cmd, &info, &nrt);
904 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
905 rt_newaddrmsg(cmd, ifa, error, nrt);
906 if (rt->rt_refcnt <= 0) {
907 rt->rt_refcnt++;
908 rtfree(rt);
909 }
910 }
911 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
912 rt->rt_refcnt--;
913 if (rt->rt_ifa != ifa) {
914 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
915 rt->rt_ifa);
916 if (rt->rt_ifa->ifa_rtrequest)
917 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
918 rt_replace_ifa(rt, ifa);
919 rt->rt_ifp = ifa->ifa_ifp;
920 if (ifa->ifa_rtrequest)
921 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
922 }
923 rt_newaddrmsg(cmd, ifa, error, nrt);
924 }
925 return error;
926 }
927
928 /*
929 * Route timer routines. These routes allow functions to be called
930 * for various routes at any time. This is useful in supporting
931 * path MTU discovery and redirect route deletion.
932 *
933 * This is similar to some BSDI internal functions, but it provides
934 * for multiple queues for efficiency's sake...
935 */
936
937 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
938 static int rt_init_done = 0;
939
940 #define RTTIMER_CALLOUT(r) do { \
941 if (r->rtt_func != NULL) { \
942 (*r->rtt_func)(r->rtt_rt, r); \
943 } else { \
944 rtrequest((int) RTM_DELETE, \
945 rt_getkey(r->rtt_rt), \
946 0, 0, 0, 0); \
947 } \
948 } while (/*CONSTCOND*/0)
949
950 /*
951 * Some subtle order problems with domain initialization mean that
952 * we cannot count on this being run from rt_init before various
953 * protocol initializations are done. Therefore, we make sure
954 * that this is run when the first queue is added...
955 */
956
957 void
958 rt_timer_init(void)
959 {
960 assert(rt_init_done == 0);
961
962 LIST_INIT(&rttimer_queue_head);
963 callout_init(&rt_timer_ch, 0);
964 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
965 rt_init_done = 1;
966 }
967
968 struct rttimer_queue *
969 rt_timer_queue_create(u_int timeout)
970 {
971 struct rttimer_queue *rtq;
972
973 if (rt_init_done == 0)
974 rt_timer_init();
975
976 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
977 if (rtq == NULL)
978 return NULL;
979 memset(rtq, 0, sizeof(*rtq));
980
981 rtq->rtq_timeout = timeout;
982 TAILQ_INIT(&rtq->rtq_head);
983 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
984
985 return rtq;
986 }
987
988 void
989 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
990 {
991
992 rtq->rtq_timeout = timeout;
993 }
994
995 void
996 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
997 {
998 struct rttimer *r;
999
1000 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1001 LIST_REMOVE(r, rtt_link);
1002 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1003 if (destroy)
1004 RTTIMER_CALLOUT(r);
1005 /* we are already at splsoftnet */
1006 pool_put(&rttimer_pool, r);
1007 if (rtq->rtq_count > 0)
1008 rtq->rtq_count--;
1009 else
1010 printf("rt_timer_queue_remove_all: "
1011 "rtq_count reached 0\n");
1012 }
1013 }
1014
1015 void
1016 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
1017 {
1018
1019 rt_timer_queue_remove_all(rtq, destroy);
1020
1021 LIST_REMOVE(rtq, rtq_link);
1022
1023 /*
1024 * Caller is responsible for freeing the rttimer_queue structure.
1025 */
1026 }
1027
1028 unsigned long
1029 rt_timer_count(struct rttimer_queue *rtq)
1030 {
1031 return rtq->rtq_count;
1032 }
1033
1034 void
1035 rt_timer_remove_all(struct rtentry *rt, int destroy)
1036 {
1037 struct rttimer *r;
1038
1039 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1040 LIST_REMOVE(r, rtt_link);
1041 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1042 if (destroy)
1043 RTTIMER_CALLOUT(r);
1044 if (r->rtt_queue->rtq_count > 0)
1045 r->rtt_queue->rtq_count--;
1046 else
1047 printf("rt_timer_remove_all: rtq_count reached 0\n");
1048 /* we are already at splsoftnet */
1049 pool_put(&rttimer_pool, r);
1050 }
1051 }
1052
1053 int
1054 rt_timer_add(struct rtentry *rt,
1055 void (*func)(struct rtentry *, struct rttimer *),
1056 struct rttimer_queue *queue)
1057 {
1058 struct rttimer *r;
1059 int s;
1060
1061 /*
1062 * If there's already a timer with this action, destroy it before
1063 * we add a new one.
1064 */
1065 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1066 if (r->rtt_func == func)
1067 break;
1068 }
1069 if (r != NULL) {
1070 LIST_REMOVE(r, rtt_link);
1071 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1072 if (r->rtt_queue->rtq_count > 0)
1073 r->rtt_queue->rtq_count--;
1074 else
1075 printf("rt_timer_add: rtq_count reached 0\n");
1076 } else {
1077 s = splsoftnet();
1078 r = pool_get(&rttimer_pool, PR_NOWAIT);
1079 splx(s);
1080 if (r == NULL)
1081 return ENOBUFS;
1082 }
1083
1084 memset(r, 0, sizeof(*r));
1085
1086 r->rtt_rt = rt;
1087 r->rtt_time = time_uptime;
1088 r->rtt_func = func;
1089 r->rtt_queue = queue;
1090 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1091 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1092 r->rtt_queue->rtq_count++;
1093
1094 return 0;
1095 }
1096
1097 /* ARGSUSED */
1098 void
1099 rt_timer_timer(void *arg)
1100 {
1101 struct rttimer_queue *rtq;
1102 struct rttimer *r;
1103 int s;
1104
1105 s = splsoftnet();
1106 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1107 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1108 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1109 LIST_REMOVE(r, rtt_link);
1110 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1111 RTTIMER_CALLOUT(r);
1112 pool_put(&rttimer_pool, r);
1113 if (rtq->rtq_count > 0)
1114 rtq->rtq_count--;
1115 else
1116 printf("rt_timer_timer: rtq_count reached 0\n");
1117 }
1118 }
1119 splx(s);
1120
1121 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1122 }
1123
1124 static struct rtentry *
1125 _rtcache_init(struct route *ro, int flag)
1126 {
1127 KASSERT(ro->_ro_rt == NULL);
1128
1129 if (rtcache_getdst(ro) == NULL)
1130 return NULL;
1131 ro->ro_invalid = false;
1132 if ((ro->_ro_rt = rtalloc1(rtcache_getdst(ro), flag)) != NULL)
1133 rtcache(ro);
1134
1135 return ro->_ro_rt;
1136 }
1137
1138 struct rtentry *
1139 rtcache_init(struct route *ro)
1140 {
1141 return _rtcache_init(ro, 1);
1142 }
1143
1144 struct rtentry *
1145 rtcache_init_noclone(struct route *ro)
1146 {
1147 return _rtcache_init(ro, 0);
1148 }
1149
1150 struct rtentry *
1151 rtcache_update(struct route *ro, int clone)
1152 {
1153 rtcache_clear(ro);
1154 return _rtcache_init(ro, clone);
1155 }
1156
1157 void
1158 rtcache_copy(struct route *new_ro, const struct route *old_ro)
1159 {
1160 struct rtentry *rt;
1161
1162 KASSERT(new_ro != old_ro);
1163
1164 if ((rt = rtcache_validate(old_ro)) != NULL)
1165 rt->rt_refcnt++;
1166
1167 if (rtcache_getdst(old_ro) == NULL ||
1168 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0)
1169 return;
1170
1171 new_ro->ro_invalid = false;
1172 if ((new_ro->_ro_rt = rt) != NULL)
1173 rtcache(new_ro);
1174 }
1175
1176 static struct dom_rtlist invalid_routes = LIST_HEAD_INITIALIZER(dom_rtlist);
1177
1178 void
1179 rtcache_invalidate(struct dom_rtlist *rtlist)
1180 {
1181 struct route *ro;
1182
1183 while ((ro = LIST_FIRST(rtlist)) != NULL) {
1184 KASSERT(ro->_ro_rt != NULL);
1185 ro->ro_invalid = true;
1186 LIST_REMOVE(ro, ro_rtcache_next);
1187 LIST_INSERT_HEAD(&invalid_routes, ro, ro_rtcache_next);
1188 }
1189 }
1190
1191 void
1192 rtcache_clear(struct route *ro)
1193 {
1194 if (ro->_ro_rt == NULL)
1195 return;
1196
1197 KASSERT(rtcache_getdst(ro) != NULL);
1198
1199 LIST_REMOVE(ro, ro_rtcache_next);
1200
1201 RTFREE(ro->_ro_rt);
1202 ro->_ro_rt = NULL;
1203 }
1204
1205 struct rtentry *
1206 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone,
1207 int *hitp)
1208 {
1209 const struct sockaddr *odst;
1210 struct rtentry *rt = NULL;
1211
1212 odst = rtcache_getdst(ro);
1213
1214 if (odst == NULL)
1215 ;
1216 else if (sockaddr_cmp(odst, dst) != 0)
1217 rtcache_free(ro);
1218 else if ((rt = rtcache_validate(ro)) == NULL)
1219 rtcache_clear(ro);
1220
1221 if (rt == NULL) {
1222 *hitp = 0;
1223 if (rtcache_setdst(ro, dst) == 0)
1224 rt = _rtcache_init(ro, clone);
1225 } else
1226 *hitp = 1;
1227
1228 return rt;
1229 }
1230
1231 void
1232 rtcache_free(struct route *ro)
1233 {
1234 rtcache_clear(ro);
1235 if (ro->ro_sa != NULL) {
1236 sockaddr_free(ro->ro_sa);
1237 ro->ro_sa = NULL;
1238 KASSERT(ro->_ro_rt == NULL);
1239 }
1240 }
1241
1242 int
1243 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
1244 {
1245 KASSERT(sa != NULL);
1246
1247 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) {
1248 rtcache_clear(ro);
1249 if (sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa) != NULL)
1250 return 0;
1251 sockaddr_free(ro->ro_sa);
1252 } else if (ro->ro_sa != NULL)
1253 rtcache_free(ro); /* free ro_sa, wrong family */
1254
1255 KASSERT(ro->_ro_rt == NULL);
1256
1257 if ((ro->ro_sa = sockaddr_dup(sa, M_NOWAIT)) == NULL) {
1258 return ENOMEM;
1259 }
1260 return 0;
1261 }
1262
1263 static int
1264 rt_walktree_visitor(struct radix_node *rn, void *v)
1265 {
1266 struct rtwalk *rw = (struct rtwalk *)v;
1267
1268 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v);
1269 }
1270
1271 int
1272 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
1273 {
1274 struct radix_node_head *rnh = rt_tables[family];
1275 struct rtwalk rw;
1276
1277 if (rnh == NULL)
1278 return 0;
1279
1280 rw.rw_f = f;
1281 rw.rw_v = v;
1282
1283 return rn_walktree(rnh, rt_walktree_visitor, &rw);
1284 }
Cache object: 76ba6d2089848d6fc056f403937e2516
|