FreeBSD/Linux Kernel Cross Reference
sys/netinet/in_rmx.c
1 /*-
2 * Copyright 1994, 1995 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/10.2/sys/netinet/in_rmx.c 263478 2014-03-21 15:15:30Z glebius $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/socket.h>
38 #include <sys/mbuf.h>
39 #include <sys/syslog.h>
40 #include <sys/callout.h>
41
42 #include <net/if.h>
43 #include <net/route.h>
44 #include <net/vnet.h>
45
46 #include <netinet/in.h>
47 #include <netinet/in_var.h>
48 #include <netinet/ip.h>
49 #include <netinet/ip_icmp.h>
50 #include <netinet/ip_var.h>
51
52 extern int in_inithead(void **head, int off);
53 #ifdef VIMAGE
54 extern int in_detachhead(void **head, int off);
55 #endif
56
57 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
58
59 /*
60 * Do what we need to do when inserting a route.
61 */
62 static struct radix_node *
63 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
64 struct radix_node *treenodes)
65 {
66 struct rtentry *rt = (struct rtentry *)treenodes;
67 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
68
69 RADIX_NODE_HEAD_WLOCK_ASSERT(head);
70 /*
71 * A little bit of help for both IP output and input:
72 * For host routes, we make sure that RTF_BROADCAST
73 * is set for anything that looks like a broadcast address.
74 * This way, we can avoid an expensive call to in_broadcast()
75 * in ip_output() most of the time (because the route passed
76 * to ip_output() is almost always a host route).
77 *
78 * We also do the same for local addresses, with the thought
79 * that this might one day be used to speed up ip_input().
80 *
81 * We also mark routes to multicast addresses as such, because
82 * it's easy to do and might be useful (but this is much more
83 * dubious since it's so easy to inspect the address).
84 */
85 if (rt->rt_flags & RTF_HOST) {
86 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
87 rt->rt_flags |= RTF_BROADCAST;
88 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
89 sin->sin_addr.s_addr) {
90 rt->rt_flags |= RTF_LOCAL;
91 }
92 }
93 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
94 rt->rt_flags |= RTF_MULTICAST;
95
96 if (rt->rt_mtu == 0 && rt->rt_ifp != NULL)
97 rt->rt_mtu = rt->rt_ifp->if_mtu;
98
99 return (rn_addroute(v_arg, n_arg, head, treenodes));
100 }
101
102 /*
103 * This code is the inverse of in_clsroute: on first reference, if we
104 * were managing the route, stop doing so and set the expiration timer
105 * back off again.
106 */
107 static struct radix_node *
108 in_matroute(void *v_arg, struct radix_node_head *head)
109 {
110 struct radix_node *rn = rn_match(v_arg, head);
111 struct rtentry *rt = (struct rtentry *)rn;
112
113 if (rt) {
114 RT_LOCK(rt);
115 if (rt->rt_flags & RTPRF_OURS) {
116 rt->rt_flags &= ~RTPRF_OURS;
117 rt->rt_expire = 0;
118 }
119 RT_UNLOCK(rt);
120 }
121 return rn;
122 }
123
124 static VNET_DEFINE(int, rtq_reallyold) = 60*60; /* one hour is "really old" */
125 #define V_rtq_reallyold VNET(rtq_reallyold)
126 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
127 &VNET_NAME(rtq_reallyold), 0,
128 "Default expiration time on dynamically learned routes");
129
130 /* never automatically crank down to less */
131 static VNET_DEFINE(int, rtq_minreallyold) = 10;
132 #define V_rtq_minreallyold VNET(rtq_minreallyold)
133 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
134 &VNET_NAME(rtq_minreallyold), 0,
135 "Minimum time to attempt to hold onto dynamically learned routes");
136
137 /* 128 cached routes is "too many" */
138 static VNET_DEFINE(int, rtq_toomany) = 128;
139 #define V_rtq_toomany VNET(rtq_toomany)
140 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
141 &VNET_NAME(rtq_toomany), 0,
142 "Upper limit on dynamically learned routes");
143
144 /*
145 * On last reference drop, mark the route as belong to us so that it can be
146 * timed out.
147 */
148 static void
149 in_clsroute(struct radix_node *rn, struct radix_node_head *head)
150 {
151 struct rtentry *rt = (struct rtentry *)rn;
152
153 RT_LOCK_ASSERT(rt);
154
155 if (!(rt->rt_flags & RTF_UP))
156 return; /* prophylactic measures */
157
158 if (rt->rt_flags & RTPRF_OURS)
159 return;
160
161 if (!(rt->rt_flags & RTF_DYNAMIC))
162 return;
163
164 /*
165 * If rtq_reallyold is 0, just delete the route without
166 * waiting for a timeout cycle to kill it.
167 */
168 if (V_rtq_reallyold != 0) {
169 rt->rt_flags |= RTPRF_OURS;
170 rt->rt_expire = time_uptime + V_rtq_reallyold;
171 } else {
172 rtexpunge(rt);
173 }
174 }
175
176 struct rtqk_arg {
177 struct radix_node_head *rnh;
178 int draining;
179 int killed;
180 int found;
181 int updating;
182 time_t nextstop;
183 };
184
185 /*
186 * Get rid of old routes. When draining, this deletes everything, even when
187 * the timeout is not expired yet. When updating, this makes sure that
188 * nothing has a timeout longer than the current value of rtq_reallyold.
189 */
190 static int
191 in_rtqkill(struct radix_node *rn, void *rock)
192 {
193 struct rtqk_arg *ap = rock;
194 struct rtentry *rt = (struct rtentry *)rn;
195 int err;
196
197 RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
198
199 if (rt->rt_flags & RTPRF_OURS) {
200 ap->found++;
201
202 if (ap->draining || rt->rt_expire <= time_uptime) {
203 if (rt->rt_refcnt > 0)
204 panic("rtqkill route really not free");
205
206 err = in_rtrequest(RTM_DELETE,
207 (struct sockaddr *)rt_key(rt),
208 rt->rt_gateway, rt_mask(rt),
209 rt->rt_flags | RTF_RNH_LOCKED, 0,
210 rt->rt_fibnum);
211 if (err) {
212 log(LOG_WARNING, "in_rtqkill: error %d\n", err);
213 } else {
214 ap->killed++;
215 }
216 } else {
217 if (ap->updating &&
218 (rt->rt_expire - time_uptime > V_rtq_reallyold))
219 rt->rt_expire = time_uptime + V_rtq_reallyold;
220 ap->nextstop = lmin(ap->nextstop, rt->rt_expire);
221 }
222 }
223
224 return 0;
225 }
226
227 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
228 static VNET_DEFINE(int, rtq_timeout) = RTQ_TIMEOUT;
229 static VNET_DEFINE(struct callout, rtq_timer);
230
231 #define V_rtq_timeout VNET(rtq_timeout)
232 #define V_rtq_timer VNET(rtq_timer)
233
234 static void in_rtqtimo_one(void *rock);
235
236 static void
237 in_rtqtimo(void *rock)
238 {
239 CURVNET_SET((struct vnet *) rock);
240 int fibnum;
241 void *newrock;
242 struct timeval atv;
243
244 for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
245 newrock = rt_tables_get_rnh(fibnum, AF_INET);
246 if (newrock != NULL)
247 in_rtqtimo_one(newrock);
248 }
249 atv.tv_usec = 0;
250 atv.tv_sec = V_rtq_timeout;
251 callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
252 CURVNET_RESTORE();
253 }
254
255 static void
256 in_rtqtimo_one(void *rock)
257 {
258 struct radix_node_head *rnh = rock;
259 struct rtqk_arg arg;
260 static time_t last_adjusted_timeout = 0;
261
262 arg.found = arg.killed = 0;
263 arg.rnh = rnh;
264 arg.nextstop = time_uptime + V_rtq_timeout;
265 arg.draining = arg.updating = 0;
266 RADIX_NODE_HEAD_LOCK(rnh);
267 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
268 RADIX_NODE_HEAD_UNLOCK(rnh);
269
270 /*
271 * Attempt to be somewhat dynamic about this:
272 * If there are ``too many'' routes sitting around taking up space,
273 * then crank down the timeout, and see if we can't make some more
274 * go away. However, we make sure that we will never adjust more
275 * than once in rtq_timeout seconds, to keep from cranking down too
276 * hard.
277 */
278 if ((arg.found - arg.killed > V_rtq_toomany) &&
279 (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
280 V_rtq_reallyold > V_rtq_minreallyold) {
281 V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
282 if (V_rtq_reallyold < V_rtq_minreallyold) {
283 V_rtq_reallyold = V_rtq_minreallyold;
284 }
285
286 last_adjusted_timeout = time_uptime;
287 #ifdef DIAGNOSTIC
288 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
289 V_rtq_reallyold);
290 #endif
291 arg.found = arg.killed = 0;
292 arg.updating = 1;
293 RADIX_NODE_HEAD_LOCK(rnh);
294 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
295 RADIX_NODE_HEAD_UNLOCK(rnh);
296 }
297
298 }
299
300 void
301 in_rtqdrain(void)
302 {
303 VNET_ITERATOR_DECL(vnet_iter);
304 struct radix_node_head *rnh;
305 struct rtqk_arg arg;
306 int fibnum;
307
308 VNET_LIST_RLOCK_NOSLEEP();
309 VNET_FOREACH(vnet_iter) {
310 CURVNET_SET(vnet_iter);
311
312 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
313 rnh = rt_tables_get_rnh(fibnum, AF_INET);
314 arg.found = arg.killed = 0;
315 arg.rnh = rnh;
316 arg.nextstop = 0;
317 arg.draining = 1;
318 arg.updating = 0;
319 RADIX_NODE_HEAD_LOCK(rnh);
320 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
321 RADIX_NODE_HEAD_UNLOCK(rnh);
322 }
323 CURVNET_RESTORE();
324 }
325 VNET_LIST_RUNLOCK_NOSLEEP();
326 }
327
328 void
329 in_setmatchfunc(struct radix_node_head *rnh, int val)
330 {
331
332 rnh->rnh_matchaddr = (val != 0) ? rn_match : in_matroute;
333 }
334
335 static int _in_rt_was_here;
336 /*
337 * Initialize our routing tree.
338 */
339 int
340 in_inithead(void **head, int off)
341 {
342 struct radix_node_head *rnh;
343
344 /* XXX MRT
345 * This can be called from vfs_export.c too in which case 'off'
346 * will be 0. We know the correct value so just use that and
347 * return directly if it was 0.
348 * This is a hack that replaces an even worse hack on a bad hack
349 * on a bad design. After RELENG_7 this should be fixed but that
350 * will change the ABI, so for now do it this way.
351 */
352 if (!rn_inithead(head, 32))
353 return 0;
354
355 if (off == 0) /* XXX MRT see above */
356 return 1; /* only do the rest for a real routing table */
357
358 rnh = *head;
359 rnh->rnh_addaddr = in_addroute;
360 in_setmatchfunc(rnh, V_drop_redirect);
361 rnh->rnh_close = in_clsroute;
362 if (_in_rt_was_here == 0 ) {
363 callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
364 callout_reset(&V_rtq_timer, 1, in_rtqtimo, curvnet);
365 _in_rt_was_here = 1;
366 }
367 return 1;
368 }
369
370 #ifdef VIMAGE
371 int
372 in_detachhead(void **head, int off)
373 {
374
375 callout_drain(&V_rtq_timer);
376 return (1);
377 }
378 #endif
379
380 /*
381 * This zaps old routes when the interface goes down or interface
382 * address is deleted. In the latter case, it deletes static routes
383 * that point to this address. If we don't do this, we may end up
384 * using the old address in the future. The ones we always want to
385 * get rid of are things like ARP entries, since the user might down
386 * the interface, walk over to a completely different network, and
387 * plug back in.
388 */
389 struct in_ifadown_arg {
390 struct ifaddr *ifa;
391 int del;
392 };
393
394 static int
395 in_ifadownkill(struct radix_node *rn, void *xap)
396 {
397 struct in_ifadown_arg *ap = xap;
398 struct rtentry *rt = (struct rtentry *)rn;
399
400 RT_LOCK(rt);
401 if (rt->rt_ifa == ap->ifa &&
402 (ap->del || !(rt->rt_flags & RTF_STATIC))) {
403 /*
404 * Aquire a reference so that it can later be freed
405 * as the refcount would be 0 here in case of at least
406 * ap->del.
407 */
408 RT_ADDREF(rt);
409 /*
410 * Disconnect it from the tree and permit protocols
411 * to cleanup.
412 */
413 rtexpunge(rt);
414 /*
415 * At this point it is an rttrash node, and in case
416 * the above is the only reference we must free it.
417 * If we do not noone will have a pointer and the
418 * rtentry will be leaked forever.
419 * In case someone else holds a reference, we are
420 * fine as we only decrement the refcount. In that
421 * case if the other entity calls RT_REMREF, we
422 * will still be leaking but at least we tried.
423 */
424 RTFREE_LOCKED(rt);
425 return (0);
426 }
427 RT_UNLOCK(rt);
428 return 0;
429 }
430
431 int
432 in_ifadown(struct ifaddr *ifa, int delete)
433 {
434 struct in_ifadown_arg arg;
435 struct radix_node_head *rnh;
436 int fibnum;
437
438 if (ifa->ifa_addr->sa_family != AF_INET)
439 return 1;
440
441 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
442 rnh = rt_tables_get_rnh(fibnum, AF_INET);
443 arg.ifa = ifa;
444 arg.del = delete;
445 RADIX_NODE_HEAD_LOCK(rnh);
446 rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
447 RADIX_NODE_HEAD_UNLOCK(rnh);
448 ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */
449 }
450 return 0;
451 }
452
453 /*
454 * inet versions of rt functions. These have fib extensions and
455 * for now will just reference the _fib variants.
456 * eventually this order will be reversed,
457 */
458 void
459 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
460 {
461 rtalloc_ign_fib(ro, ignflags, fibnum);
462 }
463
464 int
465 in_rtrequest( int req,
466 struct sockaddr *dst,
467 struct sockaddr *gateway,
468 struct sockaddr *netmask,
469 int flags,
470 struct rtentry **ret_nrt,
471 u_int fibnum)
472 {
473 return (rtrequest_fib(req, dst, gateway, netmask,
474 flags, ret_nrt, fibnum));
475 }
476
477 struct rtentry *
478 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum)
479 {
480 return (rtalloc1_fib(dst, report, ignflags, fibnum));
481 }
482
483 void
484 in_rtredirect(struct sockaddr *dst,
485 struct sockaddr *gateway,
486 struct sockaddr *netmask,
487 int flags,
488 struct sockaddr *src,
489 u_int fibnum)
490 {
491 rtredirect_fib(dst, gateway, netmask, flags, src, fibnum);
492 }
493
494 void
495 in_rtalloc(struct route *ro, u_int fibnum)
496 {
497 rtalloc_ign_fib(ro, 0UL, fibnum);
498 }
499
500 #if 0
501 int in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
502 int in_rtioctl(u_long, caddr_t, u_int);
503 int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
504 #endif
505
506
Cache object: 637d3cbabc335926e5edecdff0a035b4
|