FreeBSD/Linux Kernel Cross Reference
sys/net/route.c
1 /*-
2 * Copyright (c) 1980, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)route.c 8.3.1.1 (Berkeley) 2/23/95
30 * $FreeBSD$
31 */
32 /************************************************************************
33 * Note: In this file a 'fib' is a "forwarding information base" *
34 * Which is the new name for an in kernel routing (next hop) table. *
35 ***********************************************************************/
36
37 #include "opt_inet.h"
38 #include "opt_route.h"
39 #include "opt_mrouting.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/proc.h>
49 #include <sys/domain.h>
50 #include <sys/kernel.h>
51
52 #include <net/if.h>
53 #include <net/route.h>
54
55 #include <netinet/in.h>
56 #include <netinet/ip_mroute.h>
57
58 #include <vm/uma.h>
59
60 #ifndef ROUTETABLES
61 #define RT_NUMFIBS 1
62 #define RT_MAXFIBS 1
63 #else
64 /* while we use 4 bits in the mbuf flags,
65 * we are limited to 16
66 */
67 #define RT_MAXFIBS 16
68 #if ROUTETABLES > RT_MAXFIBS
69 #define RT_NUMFIBS RT_MAXFIBS
70 #error "ROUTETABLES defined too big"
71 #else
72 #if ROUTETABLES == 0
73 #define RT_NUMFIBS 1
74 #else
75 #define RT_NUMFIBS ROUTETABLES
76 #endif
77 #endif
78 #endif
79
80 u_int rt_numfibs = RT_NUMFIBS;
81 SYSCTL_INT(_net, OID_AUTO, fibs, CTLFLAG_RD, &rt_numfibs, 0, "");
82 /*
83 * Allow the boot code to allow LESS than RT_MAXFIBS to be used.
84 * We can't do more because storage is statically allocated for now.
85 */
86 TUNABLE_INT("net.fibs", &rt_numfibs);
87
88 /*
89 * By default add routes to all fibs for new interfaces.
90 * Once this is set to 0 then only allocate routes on interface
91 * changes for the FIB of the caller when adding a new set of addresses
92 * to an interface. XXX this is a shotgun aproach to a problem that needs
93 * a more fine grained solution.. that will come.
94 */
95 u_int rt_add_addr_allfibs = 1;
96 SYSCTL_INT(_net, OID_AUTO, add_addr_allfibs, CTLFLAG_RW,
97 &rt_add_addr_allfibs, 0, "");
98 TUNABLE_INT("net.add_addr_allfibs", &rt_add_addr_allfibs);
99
100 static struct rtstat rtstat;
101
102 /* by default only the first 'row' of tables will be accessed. */
103 /*
104 * XXXMRT When we fix netstat, and do this differnetly,
105 * we can allocate this dynamically. As long as we are keeping
106 * things backwards compaitble we need to allocate this
107 * statically.
108 */
109 struct radix_node_head *rt_tables[RT_MAXFIBS][AF_MAX+1];
110
111 static int rttrash; /* routes not in table but not freed */
112
113 static void rt_maskedcopy(struct sockaddr *,
114 struct sockaddr *, struct sockaddr *);
115
116 /* compare two sockaddr structures */
117 #define sa_equal(a1, a2) (bcmp((a1), (a2), (a1)->sa_len) == 0)
118
119 /*
120 * Convert a 'struct radix_node *' to a 'struct rtentry *'.
121 * The operation can be done safely (in this code) because a
122 * 'struct rtentry' starts with two 'struct radix_node''s, the first
123 * one representing leaf nodes in the routing tree, which is
124 * what the code in radix.c passes us as a 'struct radix_node'.
125 *
126 * But because there are a lot of assumptions in this conversion,
127 * do not cast explicitly, but always use the macro below.
128 */
129 #define RNTORT(p) ((struct rtentry *)(p))
130
131 static uma_zone_t rtzone; /* Routing table UMA zone. */
132
133 #if 0
134 /* default fib for tunnels to use */
135 u_int tunnel_fib = 0;
136 SYSCTL_INT(_net, OID_AUTO, tunnelfib, CTLFLAG_RD, &tunnel_fib, 0, "");
137 #endif
138
139 /*
140 * handler for net.my_fibnum
141 */
142 static int
143 sysctl_my_fibnum(SYSCTL_HANDLER_ARGS)
144 {
145 int fibnum;
146 int error;
147
148 fibnum = curthread->td_proc->p_fibnum;
149 error = sysctl_handle_int(oidp, &fibnum, 0, req);
150 return (error);
151 }
152
153 SYSCTL_PROC(_net, OID_AUTO, my_fibnum, CTLTYPE_INT|CTLFLAG_RD,
154 NULL, 0, &sysctl_my_fibnum, "I", "default FIB of caller");
155
156 static void
157 route_init(void)
158 {
159 int table;
160 struct domain *dom;
161 int fam;
162
163 /* whack teh tunable ints into line. */
164 if (rt_numfibs > RT_MAXFIBS)
165 rt_numfibs = RT_MAXFIBS;
166 if (rt_numfibs == 0)
167 rt_numfibs = 1;
168 rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), NULL, NULL,
169 NULL, NULL, UMA_ALIGN_PTR, 0);
170 rn_init(); /* initialize all zeroes, all ones, mask table */
171
172 for (dom = domains; dom; dom = dom->dom_next) {
173 if (dom->dom_rtattach) {
174 for (table = 0; table < rt_numfibs; table++) {
175 if ( (fam = dom->dom_family) == AF_INET ||
176 table == 0) {
177 /* for now only AF_INET has > 1 table */
178 /* XXX MRT
179 * rtattach will be also called
180 * from vfs_export.c but the
181 * offset will be 0
182 * (only for AF_INET and AF_INET6
183 * which don't need it anyhow)
184 */
185 dom->dom_rtattach(
186 (void **)&rt_tables[table][fam],
187 dom->dom_rtoffset);
188 } else {
189 break;
190 }
191 }
192 }
193 }
194 }
195
196 #ifndef _SYS_SYSPROTO_H_
197 struct setfib_args {
198 int fibnum;
199 };
200 #endif
201 int
202 setfib(struct thread *td, struct setfib_args *uap)
203 {
204 if (uap->fibnum < 0 || uap->fibnum >= rt_numfibs)
205 return EINVAL;
206 td->td_proc->p_fibnum = uap->fibnum;
207 return (0);
208 }
209
210 /*
211 * Packet routing routines.
212 */
213 void
214 rtalloc(struct route *ro)
215 {
216 rtalloc_ign_fib(ro, 0UL, 0);
217 }
218
219 void
220 rtalloc_fib(struct route *ro, u_int fibnum)
221 {
222 rtalloc_ign_fib(ro, 0UL, fibnum);
223 }
224
225 void
226 rtalloc_ign(struct route *ro, u_long ignore)
227 {
228 struct rtentry *rt;
229
230 if ((rt = ro->ro_rt) != NULL) {
231 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
232 return;
233 RTFREE(rt);
234 ro->ro_rt = NULL;
235 }
236 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, 0);
237 if (ro->ro_rt)
238 RT_UNLOCK(ro->ro_rt);
239 }
240
241 void
242 rtalloc_ign_fib(struct route *ro, u_long ignore, u_int fibnum)
243 {
244 struct rtentry *rt;
245
246 if ((rt = ro->ro_rt) != NULL) {
247 if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
248 return;
249 RTFREE(rt);
250 ro->ro_rt = NULL;
251 }
252 ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, fibnum);
253 if (ro->ro_rt)
254 RT_UNLOCK(ro->ro_rt);
255 }
256
257 /*
258 * Look up the route that matches the address given
259 * Or, at least try.. Create a cloned route if needed.
260 *
261 * The returned route, if any, is locked.
262 */
263 struct rtentry *
264 rtalloc1(struct sockaddr *dst, int report, u_long ignflags)
265 {
266 return (rtalloc1_fib(dst, report, ignflags, 0));
267 }
268
269 struct rtentry *
270 rtalloc1_fib(struct sockaddr *dst, int report, u_long ignflags,
271 u_int fibnum)
272 {
273 struct radix_node_head *rnh;
274 struct rtentry *rt;
275 struct radix_node *rn;
276 struct rtentry *newrt;
277 struct rt_addrinfo info;
278 u_long nflags;
279 int err = 0, msgtype = RTM_MISS;
280
281 KASSERT((fibnum < rt_numfibs), ("rtalloc1_fib: bad fibnum"));
282 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */
283 fibnum = 0;
284 rnh = rt_tables[fibnum][dst->sa_family];
285 newrt = NULL;
286 /*
287 * Look up the address in the table for that Address Family
288 */
289 if (rnh == NULL) {
290 rtstat.rts_unreach++;
291 goto miss2;
292 }
293 RADIX_NODE_HEAD_LOCK(rnh);
294 if ((rn = rnh->rnh_matchaddr(dst, rnh)) &&
295 (rn->rn_flags & RNF_ROOT) == 0) {
296 /*
297 * If we find it and it's not the root node, then
298 * get a reference on the rtentry associated.
299 */
300 newrt = rt = RNTORT(rn);
301 nflags = rt->rt_flags & ~ignflags;
302 if (report && (nflags & RTF_CLONING)) {
303 /*
304 * We are apparently adding (report = 0 in delete).
305 * If it requires that it be cloned, do so.
306 * (This implies it wasn't a HOST route.)
307 */
308 err = rtrequest_fib(RTM_RESOLVE, dst, NULL,
309 NULL, 0, &newrt, fibnum);
310 if (err) {
311 /*
312 * If the cloning didn't succeed, maybe
313 * what we have will do. Return that.
314 */
315 newrt = rt; /* existing route */
316 RT_LOCK(newrt);
317 RT_ADDREF(newrt);
318 goto miss;
319 }
320 KASSERT(newrt, ("no route and no error"));
321 RT_LOCK(newrt);
322 if (newrt->rt_flags & RTF_XRESOLVE) {
323 /*
324 * If the new route specifies it be
325 * externally resolved, then go do that.
326 */
327 msgtype = RTM_RESOLVE;
328 goto miss;
329 }
330 /* Inform listeners of the new route. */
331 bzero(&info, sizeof(info));
332 info.rti_info[RTAX_DST] = rt_key(newrt);
333 info.rti_info[RTAX_NETMASK] = rt_mask(newrt);
334 info.rti_info[RTAX_GATEWAY] = newrt->rt_gateway;
335 if (newrt->rt_ifp != NULL) {
336 info.rti_info[RTAX_IFP] =
337 newrt->rt_ifp->if_addr->ifa_addr;
338 info.rti_info[RTAX_IFA] = newrt->rt_ifa->ifa_addr;
339 }
340 rt_missmsg(RTM_ADD, &info, newrt->rt_flags, 0);
341 } else {
342 KASSERT(rt == newrt, ("locking wrong route"));
343 RT_LOCK(newrt);
344 RT_ADDREF(newrt);
345 }
346 RADIX_NODE_HEAD_UNLOCK(rnh);
347 } else {
348 /*
349 * Either we hit the root or couldn't find any match,
350 * Which basically means
351 * "caint get there frm here"
352 */
353 rtstat.rts_unreach++;
354 miss:
355 RADIX_NODE_HEAD_UNLOCK(rnh);
356 miss2: if (report) {
357 /*
358 * If required, report the failure to the supervising
359 * Authorities.
360 * For a delete, this is not an error. (report == 0)
361 */
362 bzero(&info, sizeof(info));
363 info.rti_info[RTAX_DST] = dst;
364 rt_missmsg(msgtype, &info, 0, err);
365 }
366 }
367 if (newrt)
368 RT_LOCK_ASSERT(newrt);
369 return (newrt);
370 }
371
372 /*
373 * Remove a reference count from an rtentry.
374 * If the count gets low enough, take it out of the routing table
375 */
376 void
377 rtfree(struct rtentry *rt)
378 {
379 struct radix_node_head *rnh;
380
381 KASSERT(rt != NULL,("%s: NULL rt", __func__));
382 rnh = rt_tables[rt->rt_fibnum][rt_key(rt)->sa_family];
383 KASSERT(rnh != NULL,("%s: NULL rnh", __func__));
384
385 RT_LOCK_ASSERT(rt);
386
387 /*
388 * The callers should use RTFREE_LOCKED() or RTFREE(), so
389 * we should come here exactly with the last reference.
390 */
391 RT_REMREF(rt);
392 if (rt->rt_refcnt > 0) {
393 printf("%s: %p has %lu refs\n", __func__, rt, rt->rt_refcnt);
394 goto done;
395 }
396
397 /*
398 * On last reference give the "close method" a chance
399 * to cleanup private state. This also permits (for
400 * IPv4 and IPv6) a chance to decide if the routing table
401 * entry should be purged immediately or at a later time.
402 * When an immediate purge is to happen the close routine
403 * typically calls rtexpunge which clears the RTF_UP flag
404 * on the entry so that the code below reclaims the storage.
405 */
406 if (rt->rt_refcnt == 0 && rnh->rnh_close)
407 rnh->rnh_close((struct radix_node *)rt, rnh);
408
409 /*
410 * If we are no longer "up" (and ref == 0)
411 * then we can free the resources associated
412 * with the route.
413 */
414 if ((rt->rt_flags & RTF_UP) == 0) {
415 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
416 panic("rtfree 2");
417 /*
418 * the rtentry must have been removed from the routing table
419 * so it is represented in rttrash.. remove that now.
420 */
421 rttrash--;
422 #ifdef DIAGNOSTIC
423 if (rt->rt_refcnt < 0) {
424 printf("rtfree: %p not freed (neg refs)\n", rt);
425 goto done;
426 }
427 #endif
428 /*
429 * release references on items we hold them on..
430 * e.g other routes and ifaddrs.
431 */
432 if (rt->rt_ifa)
433 IFAFREE(rt->rt_ifa);
434 rt->rt_parent = NULL; /* NB: no refcnt on parent */
435
436 /*
437 * The key is separatly alloc'd so free it (see rt_setgate()).
438 * This also frees the gateway, as they are always malloc'd
439 * together.
440 */
441 Free(rt_key(rt));
442
443 /*
444 * and the rtentry itself of course
445 */
446 RT_LOCK_DESTROY(rt);
447 uma_zfree(rtzone, rt);
448 return;
449 }
450 done:
451 RT_UNLOCK(rt);
452 }
453
454
455 /*
456 * Force a routing table entry to the specified
457 * destination to go through the given gateway.
458 * Normally called as a result of a routing redirect
459 * message from the network layer.
460 */
461 void
462 rtredirect(struct sockaddr *dst,
463 struct sockaddr *gateway,
464 struct sockaddr *netmask,
465 int flags,
466 struct sockaddr *src)
467 {
468
469 rtredirect_fib(dst, gateway, netmask, flags, src, 0);
470 }
471
472 void
473 rtredirect_fib(struct sockaddr *dst,
474 struct sockaddr *gateway,
475 struct sockaddr *netmask,
476 int flags,
477 struct sockaddr *src,
478 u_int fibnum)
479 {
480 struct rtentry *rt, *rt0 = NULL;
481 int error = 0;
482 short *stat = NULL;
483 struct rt_addrinfo info;
484 struct ifaddr *ifa;
485
486 /* verify the gateway is directly reachable */
487 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
488 error = ENETUNREACH;
489 goto out;
490 }
491 rt = rtalloc1_fib(dst, 0, 0UL, fibnum); /* NB: rt is locked */
492 /*
493 * If the redirect isn't from our current router for this dst,
494 * it's either old or wrong. If it redirects us to ourselves,
495 * we have a routing loop, perhaps as a result of an interface
496 * going down recently.
497 */
498 if (!(flags & RTF_DONE) && rt &&
499 (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
500 error = EINVAL;
501 else if (ifa_ifwithaddr(gateway))
502 error = EHOSTUNREACH;
503 if (error)
504 goto done;
505 /*
506 * Create a new entry if we just got back a wildcard entry
507 * or the the lookup failed. This is necessary for hosts
508 * which use routing redirects generated by smart gateways
509 * to dynamically build the routing tables.
510 */
511 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
512 goto create;
513 /*
514 * Don't listen to the redirect if it's
515 * for a route to an interface.
516 */
517 if (rt->rt_flags & RTF_GATEWAY) {
518 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
519 /*
520 * Changing from route to net => route to host.
521 * Create new route, rather than smashing route to net.
522 */
523 create:
524 rt0 = rt;
525 rt = NULL;
526
527 flags |= RTF_GATEWAY | RTF_DYNAMIC;
528 bzero((caddr_t)&info, sizeof(info));
529 info.rti_info[RTAX_DST] = dst;
530 info.rti_info[RTAX_GATEWAY] = gateway;
531 info.rti_info[RTAX_NETMASK] = netmask;
532 info.rti_ifa = ifa;
533 info.rti_flags = flags;
534 error = rtrequest1_fib(RTM_ADD, &info, &rt, fibnum);
535 if (rt != NULL) {
536 RT_LOCK(rt);
537 EVENTHANDLER_INVOKE(route_redirect_event, rt0, rt, dst);
538 flags = rt->rt_flags;
539 }
540 if (rt0)
541 RTFREE_LOCKED(rt0);
542
543 stat = &rtstat.rts_dynamic;
544 } else {
545 struct rtentry *gwrt;
546
547 /*
548 * Smash the current notion of the gateway to
549 * this destination. Should check about netmask!!!
550 */
551 rt->rt_flags |= RTF_MODIFIED;
552 flags |= RTF_MODIFIED;
553 stat = &rtstat.rts_newgateway;
554 /*
555 * add the key and gateway (in one malloc'd chunk).
556 */
557 rt_setgate(rt, rt_key(rt), gateway);
558 gwrt = rtalloc1(gateway, 1, 0);
559 EVENTHANDLER_INVOKE(route_redirect_event, rt, gwrt, dst);
560 RTFREE_LOCKED(gwrt);
561 }
562 } else
563 error = EHOSTUNREACH;
564 done:
565 if (rt)
566 RTFREE_LOCKED(rt);
567 out:
568 if (error)
569 rtstat.rts_badredirect++;
570 else if (stat != NULL)
571 (*stat)++;
572 bzero((caddr_t)&info, sizeof(info));
573 info.rti_info[RTAX_DST] = dst;
574 info.rti_info[RTAX_GATEWAY] = gateway;
575 info.rti_info[RTAX_NETMASK] = netmask;
576 info.rti_info[RTAX_AUTHOR] = src;
577 rt_missmsg(RTM_REDIRECT, &info, flags, error);
578 }
579
580 int
581 rtioctl(u_long req, caddr_t data)
582 {
583 return (rtioctl_fib(req, data, 0));
584 }
585
586 /*
587 * Routing table ioctl interface.
588 */
589 int
590 rtioctl_fib(u_long req, caddr_t data, u_int fibnum)
591 {
592
593 /*
594 * If more ioctl commands are added here, make sure the proper
595 * super-user checks are being performed because it is possible for
596 * prison-root to make it this far if raw sockets have been enabled
597 * in jails.
598 */
599 #ifdef INET
600 /* Multicast goop, grrr... */
601 return mrt_ioctl ? mrt_ioctl(req, data, fibnum) : EOPNOTSUPP;
602 #else /* INET */
603 return ENXIO;
604 #endif /* INET */
605 }
606
607 struct ifaddr *
608 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
609 {
610 return (ifa_ifwithroute_fib(flags, dst, gateway, 0));
611 }
612
613 struct ifaddr *
614 ifa_ifwithroute_fib(int flags, struct sockaddr *dst, struct sockaddr *gateway,
615 u_int fibnum)
616 {
617 register struct ifaddr *ifa;
618 int not_found = 0;
619
620 if ((flags & RTF_GATEWAY) == 0) {
621 /*
622 * If we are adding a route to an interface,
623 * and the interface is a pt to pt link
624 * we should search for the destination
625 * as our clue to the interface. Otherwise
626 * we can use the local address.
627 */
628 ifa = NULL;
629 if (flags & RTF_HOST)
630 ifa = ifa_ifwithdstaddr(dst);
631 if (ifa == NULL)
632 ifa = ifa_ifwithaddr(gateway);
633 } else {
634 /*
635 * If we are adding a route to a remote net
636 * or host, the gateway may still be on the
637 * other end of a pt to pt link.
638 */
639 ifa = ifa_ifwithdstaddr(gateway);
640 }
641 if (ifa == NULL)
642 ifa = ifa_ifwithnet(gateway);
643 if (ifa == NULL) {
644 struct rtentry *rt = rtalloc1_fib(gateway, 0, 0UL, fibnum);
645 if (rt == NULL)
646 return (NULL);
647 /*
648 * dismiss a gateway that is reachable only
649 * through the default router
650 */
651 switch (gateway->sa_family) {
652 case AF_INET:
653 if (satosin(rt_key(rt))->sin_addr.s_addr == INADDR_ANY)
654 not_found = 1;
655 break;
656 case AF_INET6:
657 if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(rt))->sin6_addr))
658 not_found = 1;
659 break;
660 default:
661 break;
662 }
663 RT_REMREF(rt);
664 RT_UNLOCK(rt);
665 if (not_found)
666 return (NULL);
667 if ((ifa = rt->rt_ifa) == NULL)
668 return (NULL);
669 }
670 if (ifa->ifa_addr->sa_family != dst->sa_family) {
671 struct ifaddr *oifa = ifa;
672 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
673 if (ifa == NULL)
674 ifa = oifa;
675 }
676 return (ifa);
677 }
678
679 static walktree_f_t rt_fixdelete;
680 static walktree_f_t rt_fixchange;
681
682 struct rtfc_arg {
683 struct rtentry *rt0;
684 struct radix_node_head *rnh;
685 };
686
687 /*
688 * Do appropriate manipulations of a routing tree given
689 * all the bits of info needed
690 */
691 int
692 rtrequest(int req,
693 struct sockaddr *dst,
694 struct sockaddr *gateway,
695 struct sockaddr *netmask,
696 int flags,
697 struct rtentry **ret_nrt)
698 {
699 return (rtrequest_fib(req, dst, gateway, netmask, flags, ret_nrt, 0));
700 }
701
702 int
703 rtrequest_fib(int req,
704 struct sockaddr *dst,
705 struct sockaddr *gateway,
706 struct sockaddr *netmask,
707 int flags,
708 struct rtentry **ret_nrt,
709 u_int fibnum)
710 {
711 struct rt_addrinfo info;
712
713 if (dst->sa_len == 0)
714 return(EINVAL);
715
716 bzero((caddr_t)&info, sizeof(info));
717 info.rti_flags = flags;
718 info.rti_info[RTAX_DST] = dst;
719 info.rti_info[RTAX_GATEWAY] = gateway;
720 info.rti_info[RTAX_NETMASK] = netmask;
721 return rtrequest1_fib(req, &info, ret_nrt, fibnum);
722 }
723
724 /*
725 * These (questionable) definitions of apparent local variables apply
726 * to the next two functions. XXXXXX!!!
727 */
728 #define dst info->rti_info[RTAX_DST]
729 #define gateway info->rti_info[RTAX_GATEWAY]
730 #define netmask info->rti_info[RTAX_NETMASK]
731 #define ifaaddr info->rti_info[RTAX_IFA]
732 #define ifpaddr info->rti_info[RTAX_IFP]
733 #define flags info->rti_flags
734
735 int
736 rt_getifa(struct rt_addrinfo *info)
737 {
738 return (rt_getifa_fib(info, 0));
739 }
740
741 int
742 rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum)
743 {
744 struct ifaddr *ifa;
745 int error = 0;
746
747 /*
748 * ifp may be specified by sockaddr_dl
749 * when protocol address is ambiguous.
750 */
751 if (info->rti_ifp == NULL && ifpaddr != NULL &&
752 ifpaddr->sa_family == AF_LINK &&
753 (ifa = ifa_ifwithnet(ifpaddr)) != NULL)
754 info->rti_ifp = ifa->ifa_ifp;
755 if (info->rti_ifa == NULL && ifaaddr != NULL)
756 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
757 if (info->rti_ifa == NULL) {
758 struct sockaddr *sa;
759
760 sa = ifaaddr != NULL ? ifaaddr :
761 (gateway != NULL ? gateway : dst);
762 if (sa != NULL && info->rti_ifp != NULL)
763 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
764 else if (dst != NULL && gateway != NULL)
765 info->rti_ifa = ifa_ifwithroute_fib(flags, dst, gateway,
766 fibnum);
767 else if (sa != NULL)
768 info->rti_ifa = ifa_ifwithroute_fib(flags, sa, sa,
769 fibnum);
770 }
771 if ((ifa = info->rti_ifa) != NULL) {
772 if (info->rti_ifp == NULL)
773 info->rti_ifp = ifa->ifa_ifp;
774 } else
775 error = ENETUNREACH;
776 return (error);
777 }
778
779 /*
780 * Expunges references to a route that's about to be reclaimed.
781 * The route must be locked.
782 */
783 int
784 rtexpunge(struct rtentry *rt)
785 {
786 struct radix_node *rn;
787 struct radix_node_head *rnh;
788 struct ifaddr *ifa;
789 int error = 0;
790
791 RT_LOCK_ASSERT(rt);
792 #if 0
793 /*
794 * We cannot assume anything about the reference count
795 * because protocols call us in many situations; often
796 * before unwinding references to the table entry.
797 */
798 KASSERT(rt->rt_refcnt <= 1, ("bogus refcnt %ld", rt->rt_refcnt));
799 #endif
800 /*
801 * Find the correct routing tree to use for this Address Family
802 */
803 rnh = rt_tables[rt->rt_fibnum][rt_key(rt)->sa_family];
804 if (rnh == NULL)
805 return (EAFNOSUPPORT);
806
807 RADIX_NODE_HEAD_LOCK(rnh);
808
809 /*
810 * Remove the item from the tree; it should be there,
811 * but when callers invoke us blindly it may not (sigh).
812 */
813 rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), rnh);
814 if (rn == NULL) {
815 error = ESRCH;
816 goto bad;
817 }
818 KASSERT((rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) == 0,
819 ("unexpected flags 0x%x", rn->rn_flags));
820 KASSERT(rt == RNTORT(rn),
821 ("lookup mismatch, rt %p rn %p", rt, rn));
822
823 rt->rt_flags &= ~RTF_UP;
824
825 /*
826 * Now search what's left of the subtree for any cloned
827 * routes which might have been formed from this node.
828 */
829 if ((rt->rt_flags & RTF_CLONING) && rt_mask(rt))
830 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
831 rt_fixdelete, rt);
832
833 /*
834 * Remove any external references we may have.
835 * This might result in another rtentry being freed if
836 * we held its last reference.
837 */
838 if (rt->rt_gwroute) {
839 RTFREE(rt->rt_gwroute);
840 rt->rt_gwroute = NULL;
841 }
842
843 /*
844 * Give the protocol a chance to keep things in sync.
845 */
846 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) {
847 struct rt_addrinfo info;
848
849 bzero((caddr_t)&info, sizeof(info));
850 info.rti_flags = rt->rt_flags;
851 info.rti_info[RTAX_DST] = rt_key(rt);
852 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
853 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
854 ifa->ifa_rtrequest(RTM_DELETE, rt, &info);
855 }
856
857 /*
858 * one more rtentry floating around that is not
859 * linked to the routing table.
860 */
861 rttrash++;
862 bad:
863 RADIX_NODE_HEAD_UNLOCK(rnh);
864 return (error);
865 }
866
867 int
868 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
869 {
870 return (rtrequest1_fib(req, info, ret_nrt, 0));
871 }
872
873 int
874 rtrequest1_fib(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt,
875 u_int fibnum)
876 {
877 int error = 0;
878 register struct rtentry *rt;
879 register struct radix_node *rn;
880 register struct radix_node_head *rnh;
881 struct ifaddr *ifa;
882 struct sockaddr *ndst;
883 #define senderr(x) { error = x ; goto bad; }
884
885 KASSERT((fibnum < rt_numfibs), ("rtrequest1_fib: bad fibnum"));
886 if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */
887 fibnum = 0;
888 /*
889 * Find the correct routing tree to use for this Address Family
890 */
891 rnh = rt_tables[fibnum][dst->sa_family];
892 if (rnh == NULL)
893 return (EAFNOSUPPORT);
894 RADIX_NODE_HEAD_LOCK(rnh);
895 /*
896 * If we are adding a host route then we don't want to put
897 * a netmask in the tree, nor do we want to clone it.
898 */
899 if (flags & RTF_HOST) {
900 netmask = NULL;
901 flags &= ~RTF_CLONING;
902 }
903 switch (req) {
904 case RTM_DELETE:
905 /*
906 * Remove the item from the tree and return it.
907 * Complain if it is not there and do no more processing.
908 */
909 rn = rnh->rnh_deladdr(dst, netmask, rnh);
910 if (rn == NULL)
911 senderr(ESRCH);
912 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
913 panic ("rtrequest delete");
914 rt = RNTORT(rn);
915 RT_LOCK(rt);
916 RT_ADDREF(rt);
917 rt->rt_flags &= ~RTF_UP;
918
919 /*
920 * Now search what's left of the subtree for any cloned
921 * routes which might have been formed from this node.
922 */
923 if ((rt->rt_flags & RTF_CLONING) &&
924 rt_mask(rt)) {
925 rnh->rnh_walktree_from(rnh, dst, rt_mask(rt),
926 rt_fixdelete, rt);
927 }
928
929 /*
930 * Remove any external references we may have.
931 * This might result in another rtentry being freed if
932 * we held its last reference.
933 */
934 if (rt->rt_gwroute) {
935 RTFREE(rt->rt_gwroute);
936 rt->rt_gwroute = NULL;
937 }
938
939 /*
940 * give the protocol a chance to keep things in sync.
941 */
942 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
943 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
944
945 /*
946 * One more rtentry floating around that is not
947 * linked to the routing table. rttrash will be decremented
948 * when RTFREE(rt) is eventually called.
949 */
950 rttrash++;
951
952 /*
953 * If the caller wants it, then it can have it,
954 * but it's up to it to free the rtentry as we won't be
955 * doing it.
956 */
957 if (ret_nrt) {
958 *ret_nrt = rt;
959 RT_UNLOCK(rt);
960 } else
961 RTFREE_LOCKED(rt);
962 break;
963
964 case RTM_RESOLVE:
965 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
966 senderr(EINVAL);
967 ifa = rt->rt_ifa;
968 /* XXX locking? */
969 flags = rt->rt_flags &
970 ~(RTF_CLONING | RTF_STATIC);
971 flags |= RTF_WASCLONED;
972 gateway = rt->rt_gateway;
973 if ((netmask = rt->rt_genmask) == NULL)
974 flags |= RTF_HOST;
975 goto makeroute;
976
977 case RTM_ADD:
978 if ((flags & RTF_GATEWAY) && !gateway)
979 senderr(EINVAL);
980 if (dst && gateway && (dst->sa_family != gateway->sa_family) &&
981 (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK))
982 senderr(EINVAL);
983
984 if (info->rti_ifa == NULL && (error = rt_getifa_fib(info, fibnum)))
985 senderr(error);
986 ifa = info->rti_ifa;
987
988 makeroute:
989 rt = uma_zalloc(rtzone, M_NOWAIT | M_ZERO);
990 if (rt == NULL)
991 senderr(ENOBUFS);
992 RT_LOCK_INIT(rt);
993 rt->rt_flags = RTF_UP | flags;
994 rt->rt_fibnum = fibnum;
995 /*
996 * Add the gateway. Possibly re-malloc-ing the storage for it
997 * also add the rt_gwroute if possible.
998 */
999 RT_LOCK(rt);
1000 if ((error = rt_setgate(rt, dst, gateway)) != 0) {
1001 RT_LOCK_DESTROY(rt);
1002 uma_zfree(rtzone, rt);
1003 senderr(error);
1004 }
1005
1006 /*
1007 * point to the (possibly newly malloc'd) dest address.
1008 */
1009 ndst = (struct sockaddr *)rt_key(rt);
1010
1011 /*
1012 * make sure it contains the value we want (masked if needed).
1013 */
1014 if (netmask) {
1015 rt_maskedcopy(dst, ndst, netmask);
1016 } else
1017 bcopy(dst, ndst, dst->sa_len);
1018
1019 /*
1020 * Note that we now have a reference to the ifa.
1021 * This moved from below so that rnh->rnh_addaddr() can
1022 * examine the ifa and ifa->ifa_ifp if it so desires.
1023 */
1024 IFAREF(ifa);
1025 rt->rt_ifa = ifa;
1026 rt->rt_ifp = ifa->ifa_ifp;
1027
1028 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
1029 rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes);
1030 if (rn == NULL) {
1031 struct rtentry *rt2;
1032 /*
1033 * Uh-oh, we already have one of these in the tree.
1034 * We do a special hack: if the route that's already
1035 * there was generated by the cloning mechanism
1036 * then we just blow it away and retry the insertion
1037 * of the new one.
1038 */
1039 rt2 = rtalloc1_fib(dst, 0, 0, fibnum);
1040 if (rt2 && rt2->rt_parent) {
1041 rtexpunge(rt2);
1042 RT_UNLOCK(rt2);
1043 rn = rnh->rnh_addaddr(ndst, netmask,
1044 rnh, rt->rt_nodes);
1045 } else if (rt2) {
1046 /* undo the extra ref we got */
1047 RTFREE_LOCKED(rt2);
1048 }
1049 }
1050
1051 /*
1052 * If it still failed to go into the tree,
1053 * then un-make it (this should be a function)
1054 */
1055 if (rn == NULL) {
1056 if (rt->rt_gwroute)
1057 RTFREE(rt->rt_gwroute);
1058 if (rt->rt_ifa)
1059 IFAFREE(rt->rt_ifa);
1060 Free(rt_key(rt));
1061 RT_LOCK_DESTROY(rt);
1062 uma_zfree(rtzone, rt);
1063 senderr(EEXIST);
1064 }
1065
1066 rt->rt_parent = NULL;
1067
1068 /*
1069 * If we got here from RESOLVE, then we are cloning
1070 * so clone the rest, and note that we
1071 * are a clone (and increment the parent's references)
1072 */
1073 if (req == RTM_RESOLVE) {
1074 KASSERT(ret_nrt && *ret_nrt,
1075 ("no route to clone from"));
1076 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
1077 rt->rt_rmx.rmx_pksent = 0; /* reset packet counter */
1078 if ((*ret_nrt)->rt_flags & RTF_CLONING) {
1079 /*
1080 * NB: We do not bump the refcnt on the parent
1081 * entry under the assumption that it will
1082 * remain so long as we do. This is
1083 * important when deleting the parent route
1084 * as this operation requires traversing
1085 * the tree to delete all clones and futzing
1086 * with refcnts requires us to double-lock
1087 * parent through this back reference.
1088 */
1089 rt->rt_parent = *ret_nrt;
1090 }
1091 }
1092
1093 /*
1094 * If this protocol has something to add to this then
1095 * allow it to do that as well.
1096 */
1097 if (ifa->ifa_rtrequest)
1098 ifa->ifa_rtrequest(req, rt, info);
1099
1100 /*
1101 * We repeat the same procedure from rt_setgate() here because
1102 * it doesn't fire when we call it there because the node
1103 * hasn't been added to the tree yet.
1104 */
1105 if (req == RTM_ADD &&
1106 !(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1107 struct rtfc_arg arg;
1108 arg.rnh = rnh;
1109 arg.rt0 = rt;
1110 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
1111 rt_fixchange, &arg);
1112 }
1113
1114 /*
1115 * actually return a resultant rtentry and
1116 * give the caller a single reference.
1117 */
1118 if (ret_nrt) {
1119 *ret_nrt = rt;
1120 RT_ADDREF(rt);
1121 }
1122 RT_UNLOCK(rt);
1123 break;
1124 default:
1125 error = EOPNOTSUPP;
1126 }
1127 bad:
1128 RADIX_NODE_HEAD_UNLOCK(rnh);
1129 return (error);
1130 #undef senderr
1131 }
1132
1133 #undef dst
1134 #undef gateway
1135 #undef netmask
1136 #undef ifaaddr
1137 #undef ifpaddr
1138 #undef flags
1139
1140 /*
1141 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1142 * (i.e., the routes related to it by the operation of cloning). This
1143 * routine is iterated over all potential former-child-routes by way of
1144 * rnh->rnh_walktree_from() above, and those that actually are children of
1145 * the late parent (passed in as VP here) are themselves deleted.
1146 */
1147 static int
1148 rt_fixdelete(struct radix_node *rn, void *vp)
1149 {
1150 struct rtentry *rt = RNTORT(rn);
1151 struct rtentry *rt0 = vp;
1152
1153 if (rt->rt_parent == rt0 &&
1154 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING))) {
1155 return rtrequest_fib(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1156 rt->rt_flags, NULL, rt->rt_fibnum);
1157 }
1158 return 0;
1159 }
1160
1161 /*
1162 * This routine is called from rt_setgate() to do the analogous thing for
1163 * adds and changes. There is the added complication in this case of a
1164 * middle insert; i.e., insertion of a new network route between an older
1165 * network route and (cloned) host routes. For this reason, a simple check
1166 * of rt->rt_parent is insufficient; each candidate route must be tested
1167 * against the (mask, value) of the new route (passed as before in vp)
1168 * to see if the new route matches it.
1169 *
1170 * XXX - it may be possible to do fixdelete() for changes and reserve this
1171 * routine just for adds. I'm not sure why I thought it was necessary to do
1172 * changes this way.
1173 */
1174
1175 static int
1176 rt_fixchange(struct radix_node *rn, void *vp)
1177 {
1178 struct rtentry *rt = RNTORT(rn);
1179 struct rtfc_arg *ap = vp;
1180 struct rtentry *rt0 = ap->rt0;
1181 struct radix_node_head *rnh = ap->rnh;
1182 u_char *xk1, *xm1, *xk2, *xmp;
1183 int i, len, mlen;
1184
1185 /* make sure we have a parent, and route is not pinned or cloning */
1186 if (!rt->rt_parent ||
1187 (rt->rt_flags & (RTF_PINNED | RTF_CLONING)))
1188 return 0;
1189
1190 if (rt->rt_parent == rt0) /* parent match */
1191 goto delete_rt;
1192 /*
1193 * There probably is a function somewhere which does this...
1194 * if not, there should be.
1195 */
1196 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1197
1198 xk1 = (u_char *)rt_key(rt0);
1199 xm1 = (u_char *)rt_mask(rt0);
1200 xk2 = (u_char *)rt_key(rt);
1201
1202 /* avoid applying a less specific route */
1203 xmp = (u_char *)rt_mask(rt->rt_parent);
1204 mlen = rt_key(rt->rt_parent)->sa_len;
1205 if (mlen > rt_key(rt0)->sa_len) /* less specific route */
1206 return 0;
1207 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++)
1208 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i])
1209 return 0; /* less specific route */
1210
1211 for (i = rnh->rnh_treetop->rn_offset; i < len; i++)
1212 if ((xk2[i] & xm1[i]) != xk1[i])
1213 return 0; /* no match */
1214
1215 /*
1216 * OK, this node is a clone, and matches the node currently being
1217 * changed/added under the node's mask. So, get rid of it.
1218 */
1219 delete_rt:
1220 return rtrequest_fib(RTM_DELETE, rt_key(rt), NULL,
1221 rt_mask(rt), rt->rt_flags, NULL, rt->rt_fibnum);
1222 }
1223
1224 int
1225 rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate)
1226 {
1227 /* XXX dst may be overwritten, can we move this to below */
1228 struct radix_node_head *rnh = rt_tables[rt->rt_fibnum][dst->sa_family];
1229 int dlen = SA_SIZE(dst), glen = SA_SIZE(gate);
1230
1231 again:
1232 RT_LOCK_ASSERT(rt);
1233
1234 /*
1235 * A host route with the destination equal to the gateway
1236 * will interfere with keeping LLINFO in the routing
1237 * table, so disallow it.
1238 */
1239 if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) ==
1240 (RTF_HOST|RTF_GATEWAY)) &&
1241 dst->sa_len == gate->sa_len &&
1242 bcmp(dst, gate, dst->sa_len) == 0) {
1243 /*
1244 * The route might already exist if this is an RTM_CHANGE
1245 * or a routing redirect, so try to delete it.
1246 */
1247 if (rt_key(rt))
1248 rtexpunge(rt);
1249 return EADDRNOTAVAIL;
1250 }
1251
1252 /*
1253 * Cloning loop avoidance in case of bad configuration.
1254 */
1255 if (rt->rt_flags & RTF_GATEWAY) {
1256 struct rtentry *gwrt;
1257
1258 RT_UNLOCK(rt); /* XXX workaround LOR */
1259 gwrt = rtalloc1_fib(gate, 1, 0, rt->rt_fibnum);
1260 if (gwrt == rt) {
1261 RT_REMREF(rt);
1262 return (EADDRINUSE); /* failure */
1263 }
1264 /*
1265 * Try to reacquire the lock on rt, and if it fails,
1266 * clean state and restart from scratch.
1267 */
1268 if (!RT_TRYLOCK(rt)) {
1269 RTFREE_LOCKED(gwrt);
1270 RT_LOCK(rt);
1271 goto again;
1272 }
1273 /*
1274 * If there is already a gwroute, then drop it. If we
1275 * are asked to replace route with itself, then do
1276 * not leak its refcounter.
1277 */
1278 if (rt->rt_gwroute != NULL) {
1279 if (rt->rt_gwroute == gwrt) {
1280 RT_REMREF(rt->rt_gwroute);
1281 } else
1282 RTFREE(rt->rt_gwroute);
1283 }
1284
1285 if ((rt->rt_gwroute = gwrt) != NULL)
1286 RT_UNLOCK(rt->rt_gwroute);
1287 }
1288
1289 /*
1290 * Prepare to store the gateway in rt->rt_gateway.
1291 * Both dst and gateway are stored one after the other in the same
1292 * malloc'd chunk. If we have room, we can reuse the old buffer,
1293 * rt_gateway already points to the right place.
1294 * Otherwise, malloc a new block and update the 'dst' address.
1295 */
1296 if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway)) {
1297 caddr_t new;
1298
1299 R_Malloc(new, caddr_t, dlen + glen);
1300 if (new == NULL)
1301 return ENOBUFS;
1302 /*
1303 * XXX note, we copy from *dst and not *rt_key(rt) because
1304 * rt_setgate() can be called to initialize a newly
1305 * allocated route entry, in which case rt_key(rt) == NULL
1306 * (and also rt->rt_gateway == NULL).
1307 * Free()/free() handle a NULL argument just fine.
1308 */
1309 bcopy(dst, new, dlen);
1310 Free(rt_key(rt)); /* free old block, if any */
1311 rt_key(rt) = (struct sockaddr *)new;
1312 rt->rt_gateway = (struct sockaddr *)(new + dlen);
1313 }
1314
1315 /*
1316 * Copy the new gateway value into the memory chunk.
1317 */
1318 bcopy(gate, rt->rt_gateway, glen);
1319
1320 /*
1321 * This isn't going to do anything useful for host routes, so
1322 * don't bother. Also make sure we have a reasonable mask
1323 * (we don't yet have one during adds).
1324 */
1325 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
1326 struct rtfc_arg arg;
1327
1328 arg.rnh = rnh;
1329 arg.rt0 = rt;
1330 RT_UNLOCK(rt); /* XXX workaround LOR */
1331 RADIX_NODE_HEAD_LOCK(rnh);
1332 RT_LOCK(rt);
1333 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
1334 rt_fixchange, &arg);
1335 RADIX_NODE_HEAD_UNLOCK(rnh);
1336 }
1337
1338 return 0;
1339 }
1340
1341 static void
1342 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, struct sockaddr *netmask)
1343 {
1344 register u_char *cp1 = (u_char *)src;
1345 register u_char *cp2 = (u_char *)dst;
1346 register u_char *cp3 = (u_char *)netmask;
1347 u_char *cplim = cp2 + *cp3;
1348 u_char *cplim2 = cp2 + *cp1;
1349
1350 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1351 cp3 += 2;
1352 if (cplim > cplim2)
1353 cplim = cplim2;
1354 while (cp2 < cplim)
1355 *cp2++ = *cp1++ & *cp3++;
1356 if (cp2 < cplim2)
1357 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
1358 }
1359
1360 /*
1361 * Set up a routing table entry, normally
1362 * for an interface.
1363 */
1364 #define _SOCKADDR_TMPSIZE 128 /* Not too big.. kernel stack size is limited */
1365 static inline int
1366 rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
1367 {
1368 struct sockaddr *dst;
1369 struct sockaddr *netmask;
1370 struct rtentry *rt = NULL;
1371 struct rt_addrinfo info;
1372 int error = 0;
1373 int startfib, endfib;
1374 char tempbuf[_SOCKADDR_TMPSIZE];
1375 int didwork = 0;
1376 int a_failure = 0;
1377
1378 if (flags & RTF_HOST) {
1379 dst = ifa->ifa_dstaddr;
1380 netmask = NULL;
1381 } else {
1382 dst = ifa->ifa_addr;
1383 netmask = ifa->ifa_netmask;
1384 }
1385 if ( dst->sa_family != AF_INET)
1386 fibnum = 0;
1387 if (fibnum == -1) {
1388 if (rt_add_addr_allfibs == 0 && cmd == (int)RTM_ADD) {
1389 startfib = endfib = curthread->td_proc->p_fibnum;
1390 } else {
1391 startfib = 0;
1392 endfib = rt_numfibs - 1;
1393 }
1394 } else {
1395 KASSERT((fibnum < rt_numfibs), ("rtinit1: bad fibnum"));
1396 startfib = fibnum;
1397 endfib = fibnum;
1398 }
1399 if (dst->sa_len == 0)
1400 return(EINVAL);
1401
1402 /*
1403 * If it's a delete, check that if it exists,
1404 * it's on the correct interface or we might scrub
1405 * a route to another ifa which would
1406 * be confusing at best and possibly worse.
1407 */
1408 if (cmd == RTM_DELETE) {
1409 /*
1410 * It's a delete, so it should already exist..
1411 * If it's a net, mask off the host bits
1412 * (Assuming we have a mask)
1413 * XXX this is kinda inet specific..
1414 */
1415 if (netmask != NULL) {
1416 rt_maskedcopy(dst, (struct sockaddr *)tempbuf, netmask);
1417 dst = (struct sockaddr *)tempbuf;
1418 }
1419 }
1420 /*
1421 * Now go through all the requested tables (fibs) and do the
1422 * requested action. Realistically, this will either be fib 0
1423 * for protocols that don't do multiple tables or all the
1424 * tables for those that do. XXX For this version only AF_INET.
1425 * When that changes code should be refactored to protocol
1426 * independent parts and protocol dependent parts.
1427 */
1428 for ( fibnum = startfib; fibnum <= endfib; fibnum++) {
1429 if (cmd == RTM_DELETE) {
1430 struct radix_node_head *rnh;
1431 struct radix_node *rn;
1432 /*
1433 * Look up an rtentry that is in the routing tree and
1434 * contains the correct info.
1435 */
1436 if ((rnh = rt_tables[fibnum][dst->sa_family]) == NULL)
1437 /* this table doesn't exist but others might */
1438 continue;
1439 RADIX_NODE_HEAD_LOCK(rnh);
1440 rn = rnh->rnh_lookup(dst, netmask, rnh);
1441 error = (rn == NULL ||
1442 (rn->rn_flags & RNF_ROOT) ||
1443 RNTORT(rn)->rt_ifa != ifa ||
1444 !sa_equal((struct sockaddr *)rn->rn_key, dst));
1445 RADIX_NODE_HEAD_UNLOCK(rnh);
1446 if (error) {
1447 /* this is only an error if bad on ALL tables */
1448 continue;
1449 }
1450 }
1451 /*
1452 * Do the actual request
1453 */
1454 bzero((caddr_t)&info, sizeof(info));
1455 info.rti_ifa = ifa;
1456 info.rti_flags = flags | ifa->ifa_flags;
1457 info.rti_info[RTAX_DST] = dst;
1458 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1459 info.rti_info[RTAX_NETMASK] = netmask;
1460 error = rtrequest1_fib(cmd, &info, &rt, fibnum);
1461 if (error == 0 && rt != NULL) {
1462 /*
1463 * notify any listening routing agents of the change
1464 */
1465 RT_LOCK(rt);
1466 rt_newaddrmsg(cmd, ifa, error, rt);
1467 if (cmd == RTM_DELETE) {
1468 /*
1469 * If we are deleting, and we found an entry, then
1470 * it's been removed from the tree.. now throw it away.
1471 */
1472 RTFREE_LOCKED(rt);
1473 } else {
1474 if (cmd == RTM_ADD) {
1475 /*
1476 * We just wanted to add it.. we don't actually
1477 * need a reference.
1478 */
1479 RT_REMREF(rt);
1480 }
1481 RT_UNLOCK(rt);
1482 }
1483 didwork = 1;
1484 }
1485 if (error)
1486 a_failure = error;
1487 }
1488 if (cmd == RTM_DELETE) {
1489 if (didwork) {
1490 error = 0;
1491 } else {
1492 /* we only give an error if it wasn't in any table */
1493 error = ((flags & RTF_HOST) ?
1494 EHOSTUNREACH : ENETUNREACH);
1495 }
1496 } else {
1497 if (a_failure) {
1498 /* return an error if any of them failed */
1499 error = a_failure;
1500 }
1501 }
1502 return (error);
1503 }
1504
1505 /* special one for inet internal use. may not use. */
1506 int
1507 rtinit_fib(struct ifaddr *ifa, int cmd, int flags)
1508 {
1509 return (rtinit1(ifa, cmd, flags, -1));
1510 }
1511
1512 /*
1513 * Set up a routing table entry, normally
1514 * for an interface.
1515 */
1516 int
1517 rtinit(struct ifaddr *ifa, int cmd, int flags)
1518 {
1519 struct sockaddr *dst;
1520 int fib = 0;
1521
1522 if (flags & RTF_HOST) {
1523 dst = ifa->ifa_dstaddr;
1524 } else {
1525 dst = ifa->ifa_addr;
1526 }
1527
1528 if (dst->sa_family == AF_INET)
1529 fib = -1;
1530 return (rtinit1(ifa, cmd, flags, fib));
1531 }
1532
1533 /*
1534 * rt_check() is invoked on each layer 2 output path, prior to
1535 * encapsulating outbound packets.
1536 *
1537 * The function is mostly used to find a routing entry for the gateway,
1538 * which in some protocol families could also point to the link-level
1539 * address for the gateway itself (the side effect of revalidating the
1540 * route to the destination is rather pointless at this stage, we did it
1541 * already a moment before in the pr_output() routine to locate the ifp
1542 * and gateway to use).
1543 *
1544 * When we remove the layer-3 to layer-2 mapping tables from the
1545 * routing table, this function can be removed.
1546 *
1547 * === On input ===
1548 * *dst is the address of the NEXT HOP (which coincides with the
1549 * final destination if directly reachable);
1550 * *lrt0 points to the cached route to the final destination;
1551 * *lrt is not meaningful;
1552 * (*lrt0 has no ref held on it by us so REMREF is not needed.
1553 * Refs only account for major structural references and not usages,
1554 * which is actually a bit of a problem.)
1555 *
1556 * === Operation ===
1557 * If the route is marked down try to find a new route. If the route
1558 * to the gateway is gone, try to setup a new route. Otherwise,
1559 * if the route is marked for packets to be rejected, enforce that.
1560 * Note that rtalloc returns an rtentry with an extra REF that we may
1561 * need to lose.
1562 *
1563 * === On return ===
1564 * *dst is unchanged;
1565 * *lrt0 points to the (possibly new) route to the final destination
1566 * *lrt points to the route to the next hop [LOCKED]
1567 *
1568 * Their values are meaningful ONLY if no error is returned.
1569 *
1570 * To follow this you have to remember that:
1571 * RT_REMREF reduces the reference count by 1 but doesn't check it for 0 (!)
1572 * RTFREE_LOCKED includes an RT_REMREF (or an rtfree if refs == 1)
1573 * and an RT_UNLOCK
1574 * RTFREE does an RT_LOCK and an RTFREE_LOCKED
1575 * The gwroute pointer counts as a reference on the rtentry to which it points.
1576 * so when we add it we use the ref that rtalloc gives us and when we lose it
1577 * we need to remove the reference.
1578 * RT_TEMP_UNLOCK does an RT_ADDREF before freeing the lock, and
1579 * RT_RELOCK locks it (it can't have gone away due to the ref) and
1580 * drops the ref, possibly freeing it and zeroing the pointer if
1581 * the ref goes to 0 (unlocking in the process).
1582 */
1583 int
1584 rt_check(struct rtentry **lrt, struct rtentry **lrt0, struct sockaddr *dst)
1585 {
1586 struct rtentry *rt;
1587 struct rtentry *rt0;
1588 u_int fibnum;
1589
1590 KASSERT(*lrt0 != NULL, ("rt_check"));
1591 rt0 = *lrt0;
1592 rt = NULL;
1593 fibnum = rt0->rt_fibnum;
1594
1595 /* NB: the locking here is tortuous... */
1596 RT_LOCK(rt0);
1597 retry:
1598 if (rt0 && (rt0->rt_flags & RTF_UP) == 0) {
1599 /* Current rt0 is useless, try get a replacement. */
1600 RT_UNLOCK(rt0);
1601 rt0 = NULL;
1602 }
1603 if (rt0 == NULL) {
1604 rt0 = rtalloc1_fib(dst, 1, 0UL, fibnum);
1605 if (rt0 == NULL) {
1606 return (EHOSTUNREACH);
1607 }
1608 RT_REMREF(rt0); /* don't need the reference. */
1609 }
1610
1611 if (rt0->rt_flags & RTF_GATEWAY) {
1612 if ((rt = rt0->rt_gwroute) != NULL) {
1613 RT_LOCK(rt); /* NB: gwroute */
1614 if ((rt->rt_flags & RTF_UP) == 0) {
1615 /* gw route is dud. ignore/lose it */
1616 RTFREE_LOCKED(rt); /* unref (&unlock) gwroute */
1617 rt = rt0->rt_gwroute = NULL;
1618 }
1619 }
1620
1621 if (rt == NULL) { /* NOT AN ELSE CLAUSE */
1622 RT_TEMP_UNLOCK(rt0); /* MUST return to undo this */
1623 rt = rtalloc1_fib(rt0->rt_gateway, 1, 0UL, fibnum);
1624 if ((rt == rt0) || (rt == NULL)) {
1625 /* the best we can do is not good enough */
1626 if (rt) {
1627 RT_REMREF(rt); /* assumes ref > 0 */
1628 RT_UNLOCK(rt);
1629 }
1630 RTFREE(rt0); /* lock, unref, (unlock) */
1631 return (ENETUNREACH);
1632 }
1633 /*
1634 * Relock it and lose the added reference.
1635 * All sorts of things could have happenned while we
1636 * had no lock on it, so check for them.
1637 */
1638 RT_RELOCK(rt0);
1639 if (rt0 == NULL || ((rt0->rt_flags & RTF_UP) == 0))
1640 /* Ru-roh.. what we had is no longer any good */
1641 goto retry;
1642 /*
1643 * While we were away, someone replaced the gateway.
1644 * Since a reference count is involved we can't just
1645 * overwrite it.
1646 */
1647 if (rt0->rt_gwroute) {
1648 if (rt0->rt_gwroute != rt) {
1649 RTFREE_LOCKED(rt);
1650 goto retry;
1651 }
1652 } else {
1653 rt0->rt_gwroute = rt;
1654 }
1655 }
1656 RT_LOCK_ASSERT(rt);
1657 RT_UNLOCK(rt0);
1658 } else {
1659 /* think of rt as having the lock from now on.. */
1660 rt = rt0;
1661 }
1662 /* XXX why are we inspecting rmx_expire? */
1663 if ((rt->rt_flags & RTF_REJECT) &&
1664 (rt->rt_rmx.rmx_expire == 0 ||
1665 time_uptime < rt->rt_rmx.rmx_expire)) {
1666 RT_UNLOCK(rt);
1667 return (rt == rt0 ? EHOSTDOWN : EHOSTUNREACH);
1668 }
1669
1670 *lrt = rt;
1671 *lrt0 = rt0;
1672 return (0);
1673 }
1674
1675 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1676 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
Cache object: 020a7d44d260608c18c5219fe8475fc3
|