FreeBSD/Linux Kernel Cross Reference
sys/netinet/in_rmx.c
1 /*-
2 * Copyright 1994, 1995 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * This code does two things necessary for the enhanced TCP metrics to
32 * function in a useful manner:
33 * 1) It marks all non-host routes as `cloning', thus ensuring that
34 * every actual reference to such a route actually gets turned
35 * into a reference to a host route to the specific destination
36 * requested.
37 * 2) When such routes lose all their references, it arranges for them
38 * to be deleted in some random collection of circumstances, so that
39 * a large quantity of stale routing data is not kept in kernel memory
40 * indefinitely. See in_rtqtimo() below for the exact mechanism.
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/8.0/sys/netinet/in_rmx.c 196019 2009-08-01 19:26:27Z rwatson $");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51 #include <sys/mbuf.h>
52 #include <sys/syslog.h>
53 #include <sys/callout.h>
54
55 #include <net/if.h>
56 #include <net/route.h>
57 #include <net/vnet.h>
58
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62
63 extern int in_inithead(void **head, int off);
64 #ifdef VIMAGE
65 extern int in_detachhead(void **head, int off);
66 #endif
67
68 #define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
69
70 /*
71 * Do what we need to do when inserting a route.
72 */
73 static struct radix_node *
74 in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
75 struct radix_node *treenodes)
76 {
77 struct rtentry *rt = (struct rtentry *)treenodes;
78 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
79
80 RADIX_NODE_HEAD_WLOCK_ASSERT(head);
81 /*
82 * A little bit of help for both IP output and input:
83 * For host routes, we make sure that RTF_BROADCAST
84 * is set for anything that looks like a broadcast address.
85 * This way, we can avoid an expensive call to in_broadcast()
86 * in ip_output() most of the time (because the route passed
87 * to ip_output() is almost always a host route).
88 *
89 * We also do the same for local addresses, with the thought
90 * that this might one day be used to speed up ip_input().
91 *
92 * We also mark routes to multicast addresses as such, because
93 * it's easy to do and might be useful (but this is much more
94 * dubious since it's so easy to inspect the address).
95 */
96 if (rt->rt_flags & RTF_HOST) {
97 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
98 rt->rt_flags |= RTF_BROADCAST;
99 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
100 sin->sin_addr.s_addr) {
101 rt->rt_flags |= RTF_LOCAL;
102 }
103 }
104 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
105 rt->rt_flags |= RTF_MULTICAST;
106
107 if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp)
108 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
109
110 return (rn_addroute(v_arg, n_arg, head, treenodes));
111 }
112
113 /*
114 * This code is the inverse of in_clsroute: on first reference, if we
115 * were managing the route, stop doing so and set the expiration timer
116 * back off again.
117 */
118 static struct radix_node *
119 in_matroute(void *v_arg, struct radix_node_head *head)
120 {
121 struct radix_node *rn = rn_match(v_arg, head);
122 struct rtentry *rt = (struct rtentry *)rn;
123
124 /*XXX locking? */
125 if (rt && rt->rt_refcnt == 0) { /* this is first reference */
126 if (rt->rt_flags & RTPRF_OURS) {
127 rt->rt_flags &= ~RTPRF_OURS;
128 rt->rt_rmx.rmx_expire = 0;
129 }
130 }
131 return rn;
132 }
133
134 static VNET_DEFINE(int, rtq_reallyold);
135 static VNET_DEFINE(int, rtq_minreallyold);
136 static VNET_DEFINE(int, rtq_toomany);
137
138 #define V_rtq_reallyold VNET(rtq_reallyold)
139 #define V_rtq_minreallyold VNET(rtq_minreallyold)
140 #define V_rtq_toomany VNET(rtq_toomany)
141
142 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
143 &VNET_NAME(rtq_reallyold), 0,
144 "Default expiration time on dynamically learned routes");
145
146 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
147 &VNET_NAME(rtq_minreallyold), 0,
148 "Minimum time to attempt to hold onto dynamically learned routes");
149
150 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
151 &VNET_NAME(rtq_toomany), 0,
152 "Upper limit on dynamically learned routes");
153
154 /*
155 * On last reference drop, mark the route as belong to us so that it can be
156 * timed out.
157 */
158 static void
159 in_clsroute(struct radix_node *rn, struct radix_node_head *head)
160 {
161 struct rtentry *rt = (struct rtentry *)rn;
162
163 RT_LOCK_ASSERT(rt);
164
165 if (!(rt->rt_flags & RTF_UP))
166 return; /* prophylactic measures */
167
168 if (rt->rt_flags & RTPRF_OURS)
169 return;
170
171 if (!(rt->rt_flags & RTF_DYNAMIC))
172 return;
173
174 /*
175 * If rtq_reallyold is 0, just delete the route without
176 * waiting for a timeout cycle to kill it.
177 */
178 if (V_rtq_reallyold != 0) {
179 rt->rt_flags |= RTPRF_OURS;
180 rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold;
181 } else {
182 rtexpunge(rt);
183 }
184 }
185
186 struct rtqk_arg {
187 struct radix_node_head *rnh;
188 int draining;
189 int killed;
190 int found;
191 int updating;
192 time_t nextstop;
193 };
194
195 /*
196 * Get rid of old routes. When draining, this deletes everything, even when
197 * the timeout is not expired yet. When updating, this makes sure that
198 * nothing has a timeout longer than the current value of rtq_reallyold.
199 */
200 static int
201 in_rtqkill(struct radix_node *rn, void *rock)
202 {
203 struct rtqk_arg *ap = rock;
204 struct rtentry *rt = (struct rtentry *)rn;
205 int err;
206
207 RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
208
209 if (rt->rt_flags & RTPRF_OURS) {
210 ap->found++;
211
212 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
213 if (rt->rt_refcnt > 0)
214 panic("rtqkill route really not free");
215
216 err = in_rtrequest(RTM_DELETE,
217 (struct sockaddr *)rt_key(rt),
218 rt->rt_gateway, rt_mask(rt),
219 rt->rt_flags | RTF_RNH_LOCKED, 0,
220 rt->rt_fibnum);
221 if (err) {
222 log(LOG_WARNING, "in_rtqkill: error %d\n", err);
223 } else {
224 ap->killed++;
225 }
226 } else {
227 if (ap->updating &&
228 (rt->rt_rmx.rmx_expire - time_uptime >
229 V_rtq_reallyold)) {
230 rt->rt_rmx.rmx_expire =
231 time_uptime + V_rtq_reallyold;
232 }
233 ap->nextstop = lmin(ap->nextstop,
234 rt->rt_rmx.rmx_expire);
235 }
236 }
237
238 return 0;
239 }
240
241 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
242 static VNET_DEFINE(int, rtq_timeout);
243 static VNET_DEFINE(struct callout, rtq_timer);
244
245 #define V_rtq_timeout VNET(rtq_timeout)
246 #define V_rtq_timer VNET(rtq_timer)
247
248 static void in_rtqtimo_one(void *rock);
249
250 static void
251 in_rtqtimo(void *rock)
252 {
253 CURVNET_SET((struct vnet *) rock);
254 int fibnum;
255 void *newrock;
256 struct timeval atv;
257
258 for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
259 newrock = rt_tables_get_rnh(fibnum, AF_INET);
260 if (newrock != NULL)
261 in_rtqtimo_one(newrock);
262 }
263 atv.tv_usec = 0;
264 atv.tv_sec = V_rtq_timeout;
265 callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
266 CURVNET_RESTORE();
267 }
268
269 static void
270 in_rtqtimo_one(void *rock)
271 {
272 struct radix_node_head *rnh = rock;
273 struct rtqk_arg arg;
274 static time_t last_adjusted_timeout = 0;
275
276 arg.found = arg.killed = 0;
277 arg.rnh = rnh;
278 arg.nextstop = time_uptime + V_rtq_timeout;
279 arg.draining = arg.updating = 0;
280 RADIX_NODE_HEAD_LOCK(rnh);
281 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
282 RADIX_NODE_HEAD_UNLOCK(rnh);
283
284 /*
285 * Attempt to be somewhat dynamic about this:
286 * If there are ``too many'' routes sitting around taking up space,
287 * then crank down the timeout, and see if we can't make some more
288 * go away. However, we make sure that we will never adjust more
289 * than once in rtq_timeout seconds, to keep from cranking down too
290 * hard.
291 */
292 if ((arg.found - arg.killed > V_rtq_toomany) &&
293 (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
294 V_rtq_reallyold > V_rtq_minreallyold) {
295 V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
296 if (V_rtq_reallyold < V_rtq_minreallyold) {
297 V_rtq_reallyold = V_rtq_minreallyold;
298 }
299
300 last_adjusted_timeout = time_uptime;
301 #ifdef DIAGNOSTIC
302 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
303 V_rtq_reallyold);
304 #endif
305 arg.found = arg.killed = 0;
306 arg.updating = 1;
307 RADIX_NODE_HEAD_LOCK(rnh);
308 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
309 RADIX_NODE_HEAD_UNLOCK(rnh);
310 }
311
312 }
313
314 void
315 in_rtqdrain(void)
316 {
317 VNET_ITERATOR_DECL(vnet_iter);
318 struct radix_node_head *rnh;
319 struct rtqk_arg arg;
320 int fibnum;
321
322 VNET_LIST_RLOCK_NOSLEEP();
323 VNET_FOREACH(vnet_iter) {
324 CURVNET_SET(vnet_iter);
325
326 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
327 rnh = rt_tables_get_rnh(fibnum, AF_INET);
328 arg.found = arg.killed = 0;
329 arg.rnh = rnh;
330 arg.nextstop = 0;
331 arg.draining = 1;
332 arg.updating = 0;
333 RADIX_NODE_HEAD_LOCK(rnh);
334 rnh->rnh_walktree(rnh, in_rtqkill, &arg);
335 RADIX_NODE_HEAD_UNLOCK(rnh);
336 }
337 CURVNET_RESTORE();
338 }
339 VNET_LIST_RUNLOCK_NOSLEEP();
340 }
341
342 static int _in_rt_was_here;
343 /*
344 * Initialize our routing tree.
345 */
346 int
347 in_inithead(void **head, int off)
348 {
349 struct radix_node_head *rnh;
350
351 /* XXX MRT
352 * This can be called from vfs_export.c too in which case 'off'
353 * will be 0. We know the correct value so just use that and
354 * return directly if it was 0.
355 * This is a hack that replaces an even worse hack on a bad hack
356 * on a bad design. After RELENG_7 this should be fixed but that
357 * will change the ABI, so for now do it this way.
358 */
359 if (!rn_inithead(head, 32))
360 return 0;
361
362 if (off == 0) /* XXX MRT see above */
363 return 1; /* only do the rest for a real routing table */
364
365 V_rtq_reallyold = 60*60; /* one hour is "really old" */
366 V_rtq_minreallyold = 10; /* never automatically crank down to less */
367 V_rtq_toomany = 128; /* 128 cached routes is "too many" */
368 V_rtq_timeout = RTQ_TIMEOUT;
369
370 rnh = *head;
371 rnh->rnh_addaddr = in_addroute;
372 rnh->rnh_matchaddr = in_matroute;
373 rnh->rnh_close = in_clsroute;
374 if (_in_rt_was_here == 0 ) {
375 callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
376 callout_reset(&V_rtq_timer, 1, in_rtqtimo, curvnet);
377 _in_rt_was_here = 1;
378 }
379 return 1;
380 }
381
382 #ifdef VIMAGE
383 int
384 in_detachhead(void **head, int off)
385 {
386
387 callout_drain(&V_rtq_timer);
388 return (1);
389 }
390 #endif
391
392 /*
393 * This zaps old routes when the interface goes down or interface
394 * address is deleted. In the latter case, it deletes static routes
395 * that point to this address. If we don't do this, we may end up
396 * using the old address in the future. The ones we always want to
397 * get rid of are things like ARP entries, since the user might down
398 * the interface, walk over to a completely different network, and
399 * plug back in.
400 */
401 struct in_ifadown_arg {
402 struct ifaddr *ifa;
403 int del;
404 };
405
406 static int
407 in_ifadownkill(struct radix_node *rn, void *xap)
408 {
409 struct in_ifadown_arg *ap = xap;
410 struct rtentry *rt = (struct rtentry *)rn;
411
412 RT_LOCK(rt);
413 if (rt->rt_ifa == ap->ifa &&
414 (ap->del || !(rt->rt_flags & RTF_STATIC))) {
415 /*
416 * We need to disable the automatic prune that happens
417 * in this case in rtrequest() because it will blow
418 * away the pointers that rn_walktree() needs in order
419 * continue our descent. We will end up deleting all
420 * the routes that rtrequest() would have in any case,
421 * so that behavior is not needed there.
422 */
423 rtexpunge(rt);
424 }
425 RT_UNLOCK(rt);
426 return 0;
427 }
428
429 int
430 in_ifadown(struct ifaddr *ifa, int delete)
431 {
432 struct in_ifadown_arg arg;
433 struct radix_node_head *rnh;
434 int fibnum;
435
436 if (ifa->ifa_addr->sa_family != AF_INET)
437 return 1;
438
439 for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
440 rnh = rt_tables_get_rnh(fibnum, AF_INET);
441 arg.ifa = ifa;
442 arg.del = delete;
443 RADIX_NODE_HEAD_LOCK(rnh);
444 rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
445 RADIX_NODE_HEAD_UNLOCK(rnh);
446 ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */
447 }
448 return 0;
449 }
450
451 /*
452 * inet versions of rt functions. These have fib extensions and
453 * for now will just reference the _fib variants.
454 * eventually this order will be reversed,
455 */
456 void
457 in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
458 {
459 rtalloc_ign_fib(ro, ignflags, fibnum);
460 }
461
462 int
463 in_rtrequest( int req,
464 struct sockaddr *dst,
465 struct sockaddr *gateway,
466 struct sockaddr *netmask,
467 int flags,
468 struct rtentry **ret_nrt,
469 u_int fibnum)
470 {
471 return (rtrequest_fib(req, dst, gateway, netmask,
472 flags, ret_nrt, fibnum));
473 }
474
475 struct rtentry *
476 in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum)
477 {
478 return (rtalloc1_fib(dst, report, ignflags, fibnum));
479 }
480
481 void
482 in_rtredirect(struct sockaddr *dst,
483 struct sockaddr *gateway,
484 struct sockaddr *netmask,
485 int flags,
486 struct sockaddr *src,
487 u_int fibnum)
488 {
489 rtredirect_fib(dst, gateway, netmask, flags, src, fibnum);
490 }
491
492 void
493 in_rtalloc(struct route *ro, u_int fibnum)
494 {
495 rtalloc_ign_fib(ro, 0UL, fibnum);
496 }
497
498 #if 0
499 int in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
500 int in_rtioctl(u_long, caddr_t, u_int);
501 int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
502 #endif
503
504
Cache object: fb33cc929efa44ac30d0ec1e52b1b1b2
|