1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/10.0/sys/netinet/ip_input.c 255523 2013-09-13 18:45:10Z trociny $");
34
35 #include "opt_bootp.h"
36 #include "opt_ipfw.h"
37 #include "opt_ipstealth.h"
38 #include "opt_ipsec.h"
39 #include "opt_kdtrace.h"
40 #include "opt_route.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/time.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/syslog.h>
55 #include <sys/sysctl.h>
56
57 #include <net/pfil.h>
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/if_var.h>
61 #include <net/if_dl.h>
62 #include <net/route.h>
63 #include <net/netisr.h>
64 #include <net/vnet.h>
65 #include <net/flowtable.h>
66
67 #include <netinet/in.h>
68 #include <netinet/in_kdtrace.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip.h>
72 #include <netinet/in_pcb.h>
73 #include <netinet/ip_var.h>
74 #include <netinet/ip_fw.h>
75 #include <netinet/ip_icmp.h>
76 #include <netinet/ip_options.h>
77 #include <machine/in_cksum.h>
78 #include <netinet/ip_carp.h>
79 #ifdef IPSEC
80 #include <netinet/ip_ipsec.h>
81 #endif /* IPSEC */
82
83 #include <sys/socketvar.h>
84
85 #include <security/mac/mac_framework.h>
86
87 #ifdef CTASSERT
88 CTASSERT(sizeof(struct ip) == 20);
89 #endif
90
91 struct rwlock in_ifaddr_lock;
92 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
93
94 VNET_DEFINE(int, rsvp_on);
95
96 VNET_DEFINE(int, ipforwarding);
97 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
98 &VNET_NAME(ipforwarding), 0,
99 "Enable IP forwarding between interfaces");
100
101 static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */
102 #define V_ipsendredirects VNET(ipsendredirects)
103 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
104 &VNET_NAME(ipsendredirects), 0,
105 "Enable sending IP redirects");
106
107 static VNET_DEFINE(int, ip_keepfaith);
108 #define V_ip_keepfaith VNET(ip_keepfaith)
109 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
110 &VNET_NAME(ip_keepfaith), 0,
111 "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
112
113 static VNET_DEFINE(int, ip_sendsourcequench);
114 #define V_ip_sendsourcequench VNET(ip_sendsourcequench)
115 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
116 &VNET_NAME(ip_sendsourcequench), 0,
117 "Enable the transmission of source quench packets");
118
119 VNET_DEFINE(int, ip_do_randomid);
120 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
121 &VNET_NAME(ip_do_randomid), 0,
122 "Assign random ip_id values");
123
124 /*
125 * XXX - Setting ip_checkinterface mostly implements the receive side of
126 * the Strong ES model described in RFC 1122, but since the routing table
127 * and transmit implementation do not implement the Strong ES model,
128 * setting this to 1 results in an odd hybrid.
129 *
130 * XXX - ip_checkinterface currently must be disabled if you use ipnat
131 * to translate the destination address to another local interface.
132 *
133 * XXX - ip_checkinterface must be disabled if you add IP aliases
134 * to the loopback interface instead of the interface where the
135 * packets for those addresses are received.
136 */
137 static VNET_DEFINE(int, ip_checkinterface);
138 #define V_ip_checkinterface VNET(ip_checkinterface)
139 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
140 &VNET_NAME(ip_checkinterface), 0,
141 "Verify packet arrives on correct interface");
142
143 VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */
144
145 static struct netisr_handler ip_nh = {
146 .nh_name = "ip",
147 .nh_handler = ip_input,
148 .nh_proto = NETISR_IP,
149 .nh_policy = NETISR_POLICY_FLOW,
150 };
151
152 extern struct domain inetdomain;
153 extern struct protosw inetsw[];
154 u_char ip_protox[IPPROTO_MAX];
155 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */
156 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */
157 VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */
158
159 static VNET_DEFINE(uma_zone_t, ipq_zone);
160 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
161 static struct mtx ipqlock;
162
163 #define V_ipq_zone VNET(ipq_zone)
164 #define V_ipq VNET(ipq)
165
166 #define IPQ_LOCK() mtx_lock(&ipqlock)
167 #define IPQ_UNLOCK() mtx_unlock(&ipqlock)
168 #define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
169 #define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED)
170
171 static void maxnipq_update(void);
172 static void ipq_zone_change(void *);
173 static void ip_drain_locked(void);
174
175 static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */
176 static VNET_DEFINE(int, nipq); /* Total # of reass queues */
177 #define V_maxnipq VNET(maxnipq)
178 #define V_nipq VNET(nipq)
179 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD,
180 &VNET_NAME(nipq), 0,
181 "Current number of IPv4 fragment reassembly queue entries");
182
183 static VNET_DEFINE(int, maxfragsperpacket);
184 #define V_maxfragsperpacket VNET(maxfragsperpacket)
185 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
186 &VNET_NAME(maxfragsperpacket), 0,
187 "Maximum number of IPv4 fragments allowed per packet");
188
189 #ifdef IPCTL_DEFMTU
190 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
191 &ip_mtu, 0, "Default MTU");
192 #endif
193
194 #ifdef IPSTEALTH
195 VNET_DEFINE(int, ipstealth);
196 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
197 &VNET_NAME(ipstealth), 0,
198 "IP stealth mode, no TTL decrementation on forwarding");
199 #endif
200
201 #ifdef FLOWTABLE
202 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048;
203 VNET_DEFINE(struct flowtable *, ip_ft);
204 #define V_ip_output_flowtable_size VNET(ip_output_flowtable_size)
205
206 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN,
207 &VNET_NAME(ip_output_flowtable_size), 2048,
208 "number of entries in the per-cpu output flow caches");
209 #endif
210
211 static void ip_freef(struct ipqhead *, struct ipq *);
212
213 /*
214 * IP statistics are stored in the "array" of counter(9)s.
215 */
216 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat);
217 VNET_PCPUSTAT_SYSINIT(ipstat);
218 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat,
219 "IP statistics (struct ipstat, netinet/ip_var.h)");
220
221 #ifdef VIMAGE
222 VNET_PCPUSTAT_SYSUNINIT(ipstat);
223 #endif /* VIMAGE */
224
225 /*
226 * Kernel module interface for updating ipstat. The argument is an index
227 * into ipstat treated as an array.
228 */
229 void
230 kmod_ipstat_inc(int statnum)
231 {
232
233 counter_u64_add(VNET(ipstat)[statnum], 1);
234 }
235
236 void
237 kmod_ipstat_dec(int statnum)
238 {
239
240 counter_u64_add(VNET(ipstat)[statnum], -1);
241 }
242
243 static int
244 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
245 {
246 int error, qlimit;
247
248 netisr_getqlimit(&ip_nh, &qlimit);
249 error = sysctl_handle_int(oidp, &qlimit, 0, req);
250 if (error || !req->newptr)
251 return (error);
252 if (qlimit < 1)
253 return (EINVAL);
254 return (netisr_setqlimit(&ip_nh, qlimit));
255 }
256 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen,
257 CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I",
258 "Maximum size of the IP input queue");
259
260 static int
261 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS)
262 {
263 u_int64_t qdrops_long;
264 int error, qdrops;
265
266 netisr_getqdrops(&ip_nh, &qdrops_long);
267 qdrops = qdrops_long;
268 error = sysctl_handle_int(oidp, &qdrops, 0, req);
269 if (error || !req->newptr)
270 return (error);
271 if (qdrops != 0)
272 return (EINVAL);
273 netisr_clearqdrops(&ip_nh);
274 return (0);
275 }
276
277 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops,
278 CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I",
279 "Number of packets dropped from the IP input queue");
280
281 /*
282 * IP initialization: fill in IP protocol switch table.
283 * All protocols not implemented in kernel go to raw IP protocol handler.
284 */
285 void
286 ip_init(void)
287 {
288 struct protosw *pr;
289 int i;
290
291 V_ip_id = time_second & 0xffff;
292
293 TAILQ_INIT(&V_in_ifaddrhead);
294 V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
295
296 /* Initialize IP reassembly queue. */
297 for (i = 0; i < IPREASS_NHASH; i++)
298 TAILQ_INIT(&V_ipq[i]);
299 V_maxnipq = nmbclusters / 32;
300 V_maxfragsperpacket = 16;
301 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
302 NULL, UMA_ALIGN_PTR, 0);
303 maxnipq_update();
304
305 /* Initialize packet filter hooks. */
306 V_inet_pfil_hook.ph_type = PFIL_TYPE_AF;
307 V_inet_pfil_hook.ph_af = AF_INET;
308 if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0)
309 printf("%s: WARNING: unable to register pfil hook, "
310 "error %d\n", __func__, i);
311
312 #ifdef FLOWTABLE
313 if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size",
314 &V_ip_output_flowtable_size)) {
315 if (V_ip_output_flowtable_size < 256)
316 V_ip_output_flowtable_size = 256;
317 if (!powerof2(V_ip_output_flowtable_size)) {
318 printf("flowtable must be power of 2 size\n");
319 V_ip_output_flowtable_size = 2048;
320 }
321 } else {
322 /*
323 * round up to the next power of 2
324 */
325 V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1);
326 }
327 V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU);
328 #endif
329
330 /* Skip initialization of globals for non-default instances. */
331 if (!IS_DEFAULT_VNET(curvnet))
332 return;
333
334 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
335 if (pr == NULL)
336 panic("ip_init: PF_INET not found");
337
338 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
339 for (i = 0; i < IPPROTO_MAX; i++)
340 ip_protox[i] = pr - inetsw;
341 /*
342 * Cycle through IP protocols and put them into the appropriate place
343 * in ip_protox[].
344 */
345 for (pr = inetdomain.dom_protosw;
346 pr < inetdomain.dom_protoswNPROTOSW; pr++)
347 if (pr->pr_domain->dom_family == PF_INET &&
348 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
349 /* Be careful to only index valid IP protocols. */
350 if (pr->pr_protocol < IPPROTO_MAX)
351 ip_protox[pr->pr_protocol] = pr - inetsw;
352 }
353
354 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
355 NULL, EVENTHANDLER_PRI_ANY);
356
357 /* Initialize various other remaining things. */
358 IPQ_LOCK_INIT();
359 netisr_register(&ip_nh);
360 }
361
362 #ifdef VIMAGE
363 void
364 ip_destroy(void)
365 {
366 int i;
367
368 if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0)
369 printf("%s: WARNING: unable to unregister pfil hook, "
370 "error %d\n", __func__, i);
371
372 /* Cleanup in_ifaddr hash table; should be empty. */
373 hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask);
374
375 IPQ_LOCK();
376 ip_drain_locked();
377 IPQ_UNLOCK();
378
379 uma_zdestroy(V_ipq_zone);
380 }
381 #endif
382
383 /*
384 * Ip input routine. Checksum and byte swap header. If fragmented
385 * try to reassemble. Process options. Pass to next level.
386 */
387 void
388 ip_input(struct mbuf *m)
389 {
390 struct ip *ip = NULL;
391 struct in_ifaddr *ia = NULL;
392 struct ifaddr *ifa;
393 struct ifnet *ifp;
394 int checkif, hlen = 0;
395 uint16_t sum, ip_len;
396 int dchg = 0; /* dest changed after fw */
397 struct in_addr odst; /* original dst address */
398
399 M_ASSERTPKTHDR(m);
400
401 if (m->m_flags & M_FASTFWD_OURS) {
402 m->m_flags &= ~M_FASTFWD_OURS;
403 /* Set up some basics that will be used later. */
404 ip = mtod(m, struct ip *);
405 hlen = ip->ip_hl << 2;
406 ip_len = ntohs(ip->ip_len);
407 goto ours;
408 }
409
410 IPSTAT_INC(ips_total);
411
412 if (m->m_pkthdr.len < sizeof(struct ip))
413 goto tooshort;
414
415 if (m->m_len < sizeof (struct ip) &&
416 (m = m_pullup(m, sizeof (struct ip))) == NULL) {
417 IPSTAT_INC(ips_toosmall);
418 return;
419 }
420 ip = mtod(m, struct ip *);
421
422 if (ip->ip_v != IPVERSION) {
423 IPSTAT_INC(ips_badvers);
424 goto bad;
425 }
426
427 hlen = ip->ip_hl << 2;
428 if (hlen < sizeof(struct ip)) { /* minimum header length */
429 IPSTAT_INC(ips_badhlen);
430 goto bad;
431 }
432 if (hlen > m->m_len) {
433 if ((m = m_pullup(m, hlen)) == NULL) {
434 IPSTAT_INC(ips_badhlen);
435 return;
436 }
437 ip = mtod(m, struct ip *);
438 }
439
440 IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL);
441
442 /* 127/8 must not appear on wire - RFC1122 */
443 ifp = m->m_pkthdr.rcvif;
444 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
445 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
446 if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
447 IPSTAT_INC(ips_badaddr);
448 goto bad;
449 }
450 }
451
452 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
453 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
454 } else {
455 if (hlen == sizeof(struct ip)) {
456 sum = in_cksum_hdr(ip);
457 } else {
458 sum = in_cksum(m, hlen);
459 }
460 }
461 if (sum) {
462 IPSTAT_INC(ips_badsum);
463 goto bad;
464 }
465
466 #ifdef ALTQ
467 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
468 /* packet is dropped by traffic conditioner */
469 return;
470 #endif
471
472 ip_len = ntohs(ip->ip_len);
473 if (ip_len < hlen) {
474 IPSTAT_INC(ips_badlen);
475 goto bad;
476 }
477
478 /*
479 * Check that the amount of data in the buffers
480 * is as at least much as the IP header would have us expect.
481 * Trim mbufs if longer than we expect.
482 * Drop packet if shorter than we expect.
483 */
484 if (m->m_pkthdr.len < ip_len) {
485 tooshort:
486 IPSTAT_INC(ips_tooshort);
487 goto bad;
488 }
489 if (m->m_pkthdr.len > ip_len) {
490 if (m->m_len == m->m_pkthdr.len) {
491 m->m_len = ip_len;
492 m->m_pkthdr.len = ip_len;
493 } else
494 m_adj(m, ip_len - m->m_pkthdr.len);
495 }
496 #ifdef IPSEC
497 /*
498 * Bypass packet filtering for packets previously handled by IPsec.
499 */
500 if (ip_ipsec_filtertunnel(m))
501 goto passin;
502 #endif /* IPSEC */
503
504 /*
505 * Run through list of hooks for input packets.
506 *
507 * NB: Beware of the destination address changing (e.g.
508 * by NAT rewriting). When this happens, tell
509 * ip_forward to do the right thing.
510 */
511
512 /* Jump over all PFIL processing if hooks are not active. */
513 if (!PFIL_HOOKED(&V_inet_pfil_hook))
514 goto passin;
515
516 odst = ip->ip_dst;
517 if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0)
518 return;
519 if (m == NULL) /* consumed by filter */
520 return;
521
522 ip = mtod(m, struct ip *);
523 dchg = (odst.s_addr != ip->ip_dst.s_addr);
524 ifp = m->m_pkthdr.rcvif;
525
526 if (m->m_flags & M_FASTFWD_OURS) {
527 m->m_flags &= ~M_FASTFWD_OURS;
528 goto ours;
529 }
530 if (m->m_flags & M_IP_NEXTHOP) {
531 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL);
532 if (dchg != 0) {
533 /*
534 * Directly ship the packet on. This allows
535 * forwarding packets originally destined to us
536 * to some other directly connected host.
537 */
538 ip_forward(m, 1);
539 return;
540 }
541 }
542 passin:
543
544 /*
545 * Process options and, if not destined for us,
546 * ship it on. ip_dooptions returns 1 when an
547 * error was detected (causing an icmp message
548 * to be sent and the original packet to be freed).
549 */
550 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
551 return;
552
553 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
554 * matter if it is destined to another node, or whether it is
555 * a multicast one, RSVP wants it! and prevents it from being forwarded
556 * anywhere else. Also checks if the rsvp daemon is running before
557 * grabbing the packet.
558 */
559 if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
560 goto ours;
561
562 /*
563 * Check our list of addresses, to see if the packet is for us.
564 * If we don't have any addresses, assume any unicast packet
565 * we receive might be for us (and let the upper layers deal
566 * with it).
567 */
568 if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
569 (m->m_flags & (M_MCAST|M_BCAST)) == 0)
570 goto ours;
571
572 /*
573 * Enable a consistency check between the destination address
574 * and the arrival interface for a unicast packet (the RFC 1122
575 * strong ES model) if IP forwarding is disabled and the packet
576 * is not locally generated and the packet is not subject to
577 * 'ipfw fwd'.
578 *
579 * XXX - Checking also should be disabled if the destination
580 * address is ipnat'ed to a different interface.
581 *
582 * XXX - Checking is incompatible with IP aliases added
583 * to the loopback interface instead of the interface where
584 * the packets are received.
585 *
586 * XXX - This is the case for carp vhost IPs as well so we
587 * insert a workaround. If the packet got here, we already
588 * checked with carp_iamatch() and carp_forus().
589 */
590 checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
591 ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
592 ifp->if_carp == NULL && (dchg == 0);
593
594 /*
595 * Check for exact addresses in the hash bucket.
596 */
597 /* IN_IFADDR_RLOCK(); */
598 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
599 /*
600 * If the address matches, verify that the packet
601 * arrived via the correct interface if checking is
602 * enabled.
603 */
604 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
605 (!checkif || ia->ia_ifp == ifp)) {
606 ifa_ref(&ia->ia_ifa);
607 /* IN_IFADDR_RUNLOCK(); */
608 goto ours;
609 }
610 }
611 /* IN_IFADDR_RUNLOCK(); */
612
613 /*
614 * Check for broadcast addresses.
615 *
616 * Only accept broadcast packets that arrive via the matching
617 * interface. Reception of forwarded directed broadcasts would
618 * be handled via ip_forward() and ether_output() with the loopback
619 * into the stack for SIMPLEX interfaces handled by ether_output().
620 */
621 if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
622 IF_ADDR_RLOCK(ifp);
623 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
624 if (ifa->ifa_addr->sa_family != AF_INET)
625 continue;
626 ia = ifatoia(ifa);
627 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
628 ip->ip_dst.s_addr) {
629 ifa_ref(ifa);
630 IF_ADDR_RUNLOCK(ifp);
631 goto ours;
632 }
633 #ifdef BOOTP_COMPAT
634 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
635 ifa_ref(ifa);
636 IF_ADDR_RUNLOCK(ifp);
637 goto ours;
638 }
639 #endif
640 }
641 IF_ADDR_RUNLOCK(ifp);
642 ia = NULL;
643 }
644 /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
645 if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
646 IPSTAT_INC(ips_cantforward);
647 m_freem(m);
648 return;
649 }
650 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
651 if (V_ip_mrouter) {
652 /*
653 * If we are acting as a multicast router, all
654 * incoming multicast packets are passed to the
655 * kernel-level multicast forwarding function.
656 * The packet is returned (relatively) intact; if
657 * ip_mforward() returns a non-zero value, the packet
658 * must be discarded, else it may be accepted below.
659 */
660 if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) {
661 IPSTAT_INC(ips_cantforward);
662 m_freem(m);
663 return;
664 }
665
666 /*
667 * The process-level routing daemon needs to receive
668 * all multicast IGMP packets, whether or not this
669 * host belongs to their destination groups.
670 */
671 if (ip->ip_p == IPPROTO_IGMP)
672 goto ours;
673 IPSTAT_INC(ips_forward);
674 }
675 /*
676 * Assume the packet is for us, to avoid prematurely taking
677 * a lock on the in_multi hash. Protocols must perform
678 * their own filtering and update statistics accordingly.
679 */
680 goto ours;
681 }
682 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
683 goto ours;
684 if (ip->ip_dst.s_addr == INADDR_ANY)
685 goto ours;
686
687 /*
688 * FAITH(Firewall Aided Internet Translator)
689 */
690 if (ifp && ifp->if_type == IFT_FAITH) {
691 if (V_ip_keepfaith) {
692 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
693 goto ours;
694 }
695 m_freem(m);
696 return;
697 }
698
699 /*
700 * Not for us; forward if possible and desirable.
701 */
702 if (V_ipforwarding == 0) {
703 IPSTAT_INC(ips_cantforward);
704 m_freem(m);
705 } else {
706 #ifdef IPSEC
707 if (ip_ipsec_fwd(m))
708 goto bad;
709 #endif /* IPSEC */
710 ip_forward(m, dchg);
711 }
712 return;
713
714 ours:
715 #ifdef IPSTEALTH
716 /*
717 * IPSTEALTH: Process non-routing options only
718 * if the packet is destined for us.
719 */
720 if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) {
721 if (ia != NULL)
722 ifa_free(&ia->ia_ifa);
723 return;
724 }
725 #endif /* IPSTEALTH */
726
727 /* Count the packet in the ip address stats */
728 if (ia != NULL) {
729 ia->ia_ifa.if_ipackets++;
730 ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
731 ifa_free(&ia->ia_ifa);
732 }
733
734 /*
735 * Attempt reassembly; if it succeeds, proceed.
736 * ip_reass() will return a different mbuf.
737 */
738 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) {
739 m = ip_reass(m);
740 if (m == NULL)
741 return;
742 ip = mtod(m, struct ip *);
743 /* Get the header length of the reassembled packet */
744 hlen = ip->ip_hl << 2;
745 }
746
747 #ifdef IPSEC
748 /*
749 * enforce IPsec policy checking if we are seeing last header.
750 * note that we do not visit this with protocols with pcb layer
751 * code - like udp/tcp/raw ip.
752 */
753 if (ip_ipsec_input(m))
754 goto bad;
755 #endif /* IPSEC */
756
757 /*
758 * Switch out to protocol's input routine.
759 */
760 IPSTAT_INC(ips_delivered);
761
762 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
763 return;
764 bad:
765 m_freem(m);
766 }
767
768 /*
769 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
770 * max has slightly different semantics than the sysctl, for historical
771 * reasons.
772 */
773 static void
774 maxnipq_update(void)
775 {
776
777 /*
778 * -1 for unlimited allocation.
779 */
780 if (V_maxnipq < 0)
781 uma_zone_set_max(V_ipq_zone, 0);
782 /*
783 * Positive number for specific bound.
784 */
785 if (V_maxnipq > 0)
786 uma_zone_set_max(V_ipq_zone, V_maxnipq);
787 /*
788 * Zero specifies no further fragment queue allocation -- set the
789 * bound very low, but rely on implementation elsewhere to actually
790 * prevent allocation and reclaim current queues.
791 */
792 if (V_maxnipq == 0)
793 uma_zone_set_max(V_ipq_zone, 1);
794 }
795
796 static void
797 ipq_zone_change(void *tag)
798 {
799
800 if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
801 V_maxnipq = nmbclusters / 32;
802 maxnipq_update();
803 }
804 }
805
806 static int
807 sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
808 {
809 int error, i;
810
811 i = V_maxnipq;
812 error = sysctl_handle_int(oidp, &i, 0, req);
813 if (error || !req->newptr)
814 return (error);
815
816 /*
817 * XXXRW: Might be a good idea to sanity check the argument and place
818 * an extreme upper bound.
819 */
820 if (i < -1)
821 return (EINVAL);
822 V_maxnipq = i;
823 maxnipq_update();
824 return (0);
825 }
826
827 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
828 NULL, 0, sysctl_maxnipq, "I",
829 "Maximum number of IPv4 fragment reassembly queue entries");
830
831 /*
832 * Take incoming datagram fragment and try to reassemble it into
833 * whole datagram. If the argument is the first fragment or one
834 * in between the function will return NULL and store the mbuf
835 * in the fragment chain. If the argument is the last fragment
836 * the packet will be reassembled and the pointer to the new
837 * mbuf returned for further processing. Only m_tags attached
838 * to the first packet/fragment are preserved.
839 * The IP header is *NOT* adjusted out of iplen.
840 */
841 struct mbuf *
842 ip_reass(struct mbuf *m)
843 {
844 struct ip *ip;
845 struct mbuf *p, *q, *nq, *t;
846 struct ipq *fp = NULL;
847 struct ipqhead *head;
848 int i, hlen, next;
849 u_int8_t ecn, ecn0;
850 u_short hash;
851
852 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
853 if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
854 IPSTAT_INC(ips_fragments);
855 IPSTAT_INC(ips_fragdropped);
856 m_freem(m);
857 return (NULL);
858 }
859
860 ip = mtod(m, struct ip *);
861 hlen = ip->ip_hl << 2;
862
863 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
864 head = &V_ipq[hash];
865 IPQ_LOCK();
866
867 /*
868 * Look for queue of fragments
869 * of this datagram.
870 */
871 TAILQ_FOREACH(fp, head, ipq_list)
872 if (ip->ip_id == fp->ipq_id &&
873 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
874 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
875 #ifdef MAC
876 mac_ipq_match(m, fp) &&
877 #endif
878 ip->ip_p == fp->ipq_p)
879 goto found;
880
881 fp = NULL;
882
883 /*
884 * Attempt to trim the number of allocated fragment queues if it
885 * exceeds the administrative limit.
886 */
887 if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
888 /*
889 * drop something from the tail of the current queue
890 * before proceeding further
891 */
892 struct ipq *q = TAILQ_LAST(head, ipqhead);
893 if (q == NULL) { /* gak */
894 for (i = 0; i < IPREASS_NHASH; i++) {
895 struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
896 if (r) {
897 IPSTAT_ADD(ips_fragtimeout,
898 r->ipq_nfrags);
899 ip_freef(&V_ipq[i], r);
900 break;
901 }
902 }
903 } else {
904 IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags);
905 ip_freef(head, q);
906 }
907 }
908
909 found:
910 /*
911 * Adjust ip_len to not reflect header,
912 * convert offset of this to bytes.
913 */
914 ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
915 if (ip->ip_off & htons(IP_MF)) {
916 /*
917 * Make sure that fragments have a data length
918 * that's a non-zero multiple of 8 bytes.
919 */
920 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) {
921 IPSTAT_INC(ips_toosmall); /* XXX */
922 goto dropfrag;
923 }
924 m->m_flags |= M_IP_FRAG;
925 } else
926 m->m_flags &= ~M_IP_FRAG;
927 ip->ip_off = htons(ntohs(ip->ip_off) << 3);
928
929 /*
930 * Attempt reassembly; if it succeeds, proceed.
931 * ip_reass() will return a different mbuf.
932 */
933 IPSTAT_INC(ips_fragments);
934 m->m_pkthdr.PH_loc.ptr = ip;
935
936 /* Previous ip_reass() started here. */
937 /*
938 * Presence of header sizes in mbufs
939 * would confuse code below.
940 */
941 m->m_data += hlen;
942 m->m_len -= hlen;
943
944 /*
945 * If first fragment to arrive, create a reassembly queue.
946 */
947 if (fp == NULL) {
948 fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
949 if (fp == NULL)
950 goto dropfrag;
951 #ifdef MAC
952 if (mac_ipq_init(fp, M_NOWAIT) != 0) {
953 uma_zfree(V_ipq_zone, fp);
954 fp = NULL;
955 goto dropfrag;
956 }
957 mac_ipq_create(m, fp);
958 #endif
959 TAILQ_INSERT_HEAD(head, fp, ipq_list);
960 V_nipq++;
961 fp->ipq_nfrags = 1;
962 fp->ipq_ttl = IPFRAGTTL;
963 fp->ipq_p = ip->ip_p;
964 fp->ipq_id = ip->ip_id;
965 fp->ipq_src = ip->ip_src;
966 fp->ipq_dst = ip->ip_dst;
967 fp->ipq_frags = m;
968 m->m_nextpkt = NULL;
969 goto done;
970 } else {
971 fp->ipq_nfrags++;
972 #ifdef MAC
973 mac_ipq_update(m, fp);
974 #endif
975 }
976
977 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
978
979 /*
980 * Handle ECN by comparing this segment with the first one;
981 * if CE is set, do not lose CE.
982 * drop if CE and not-ECT are mixed for the same packet.
983 */
984 ecn = ip->ip_tos & IPTOS_ECN_MASK;
985 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
986 if (ecn == IPTOS_ECN_CE) {
987 if (ecn0 == IPTOS_ECN_NOTECT)
988 goto dropfrag;
989 if (ecn0 != IPTOS_ECN_CE)
990 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
991 }
992 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
993 goto dropfrag;
994
995 /*
996 * Find a segment which begins after this one does.
997 */
998 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
999 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
1000 break;
1001
1002 /*
1003 * If there is a preceding segment, it may provide some of
1004 * our data already. If so, drop the data from the incoming
1005 * segment. If it provides all of our data, drop us, otherwise
1006 * stick new segment in the proper place.
1007 *
1008 * If some of the data is dropped from the preceding
1009 * segment, then it's checksum is invalidated.
1010 */
1011 if (p) {
1012 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
1013 ntohs(ip->ip_off);
1014 if (i > 0) {
1015 if (i >= ntohs(ip->ip_len))
1016 goto dropfrag;
1017 m_adj(m, i);
1018 m->m_pkthdr.csum_flags = 0;
1019 ip->ip_off = htons(ntohs(ip->ip_off) + i);
1020 ip->ip_len = htons(ntohs(ip->ip_len) - i);
1021 }
1022 m->m_nextpkt = p->m_nextpkt;
1023 p->m_nextpkt = m;
1024 } else {
1025 m->m_nextpkt = fp->ipq_frags;
1026 fp->ipq_frags = m;
1027 }
1028
1029 /*
1030 * While we overlap succeeding segments trim them or,
1031 * if they are completely covered, dequeue them.
1032 */
1033 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
1034 ntohs(GETIP(q)->ip_off); q = nq) {
1035 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
1036 ntohs(GETIP(q)->ip_off);
1037 if (i < ntohs(GETIP(q)->ip_len)) {
1038 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
1039 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
1040 m_adj(q, i);
1041 q->m_pkthdr.csum_flags = 0;
1042 break;
1043 }
1044 nq = q->m_nextpkt;
1045 m->m_nextpkt = nq;
1046 IPSTAT_INC(ips_fragdropped);
1047 fp->ipq_nfrags--;
1048 m_freem(q);
1049 }
1050
1051 /*
1052 * Check for complete reassembly and perform frag per packet
1053 * limiting.
1054 *
1055 * Frag limiting is performed here so that the nth frag has
1056 * a chance to complete the packet before we drop the packet.
1057 * As a result, n+1 frags are actually allowed per packet, but
1058 * only n will ever be stored. (n = maxfragsperpacket.)
1059 *
1060 */
1061 next = 0;
1062 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1063 if (ntohs(GETIP(q)->ip_off) != next) {
1064 if (fp->ipq_nfrags > V_maxfragsperpacket) {
1065 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1066 ip_freef(head, fp);
1067 }
1068 goto done;
1069 }
1070 next += ntohs(GETIP(q)->ip_len);
1071 }
1072 /* Make sure the last packet didn't have the IP_MF flag */
1073 if (p->m_flags & M_IP_FRAG) {
1074 if (fp->ipq_nfrags > V_maxfragsperpacket) {
1075 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1076 ip_freef(head, fp);
1077 }
1078 goto done;
1079 }
1080
1081 /*
1082 * Reassembly is complete. Make sure the packet is a sane size.
1083 */
1084 q = fp->ipq_frags;
1085 ip = GETIP(q);
1086 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
1087 IPSTAT_INC(ips_toolong);
1088 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1089 ip_freef(head, fp);
1090 goto done;
1091 }
1092
1093 /*
1094 * Concatenate fragments.
1095 */
1096 m = q;
1097 t = m->m_next;
1098 m->m_next = NULL;
1099 m_cat(m, t);
1100 nq = q->m_nextpkt;
1101 q->m_nextpkt = NULL;
1102 for (q = nq; q != NULL; q = nq) {
1103 nq = q->m_nextpkt;
1104 q->m_nextpkt = NULL;
1105 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1106 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1107 m_cat(m, q);
1108 }
1109 /*
1110 * In order to do checksumming faster we do 'end-around carry' here
1111 * (and not in for{} loop), though it implies we are not going to
1112 * reassemble more than 64k fragments.
1113 */
1114 m->m_pkthdr.csum_data =
1115 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16);
1116 #ifdef MAC
1117 mac_ipq_reassemble(fp, m);
1118 mac_ipq_destroy(fp);
1119 #endif
1120
1121 /*
1122 * Create header for new ip packet by modifying header of first
1123 * packet; dequeue and discard fragment reassembly header.
1124 * Make header visible.
1125 */
1126 ip->ip_len = htons((ip->ip_hl << 2) + next);
1127 ip->ip_src = fp->ipq_src;
1128 ip->ip_dst = fp->ipq_dst;
1129 TAILQ_REMOVE(head, fp, ipq_list);
1130 V_nipq--;
1131 uma_zfree(V_ipq_zone, fp);
1132 m->m_len += (ip->ip_hl << 2);
1133 m->m_data -= (ip->ip_hl << 2);
1134 /* some debugging cruft by sklower, below, will go away soon */
1135 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
1136 m_fixhdr(m);
1137 IPSTAT_INC(ips_reassembled);
1138 IPQ_UNLOCK();
1139 return (m);
1140
1141 dropfrag:
1142 IPSTAT_INC(ips_fragdropped);
1143 if (fp != NULL)
1144 fp->ipq_nfrags--;
1145 m_freem(m);
1146 done:
1147 IPQ_UNLOCK();
1148 return (NULL);
1149
1150 #undef GETIP
1151 }
1152
1153 /*
1154 * Free a fragment reassembly header and all
1155 * associated datagrams.
1156 */
1157 static void
1158 ip_freef(struct ipqhead *fhp, struct ipq *fp)
1159 {
1160 struct mbuf *q;
1161
1162 IPQ_LOCK_ASSERT();
1163
1164 while (fp->ipq_frags) {
1165 q = fp->ipq_frags;
1166 fp->ipq_frags = q->m_nextpkt;
1167 m_freem(q);
1168 }
1169 TAILQ_REMOVE(fhp, fp, ipq_list);
1170 uma_zfree(V_ipq_zone, fp);
1171 V_nipq--;
1172 }
1173
1174 /*
1175 * IP timer processing;
1176 * if a timer expires on a reassembly
1177 * queue, discard it.
1178 */
1179 void
1180 ip_slowtimo(void)
1181 {
1182 VNET_ITERATOR_DECL(vnet_iter);
1183 struct ipq *fp;
1184 int i;
1185
1186 VNET_LIST_RLOCK_NOSLEEP();
1187 IPQ_LOCK();
1188 VNET_FOREACH(vnet_iter) {
1189 CURVNET_SET(vnet_iter);
1190 for (i = 0; i < IPREASS_NHASH; i++) {
1191 for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
1192 struct ipq *fpp;
1193
1194 fpp = fp;
1195 fp = TAILQ_NEXT(fp, ipq_list);
1196 if(--fpp->ipq_ttl == 0) {
1197 IPSTAT_ADD(ips_fragtimeout,
1198 fpp->ipq_nfrags);
1199 ip_freef(&V_ipq[i], fpp);
1200 }
1201 }
1202 }
1203 /*
1204 * If we are over the maximum number of fragments
1205 * (due to the limit being lowered), drain off
1206 * enough to get down to the new limit.
1207 */
1208 if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
1209 for (i = 0; i < IPREASS_NHASH; i++) {
1210 while (V_nipq > V_maxnipq &&
1211 !TAILQ_EMPTY(&V_ipq[i])) {
1212 IPSTAT_ADD(ips_fragdropped,
1213 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1214 ip_freef(&V_ipq[i],
1215 TAILQ_FIRST(&V_ipq[i]));
1216 }
1217 }
1218 }
1219 CURVNET_RESTORE();
1220 }
1221 IPQ_UNLOCK();
1222 VNET_LIST_RUNLOCK_NOSLEEP();
1223 }
1224
1225 /*
1226 * Drain off all datagram fragments.
1227 */
1228 static void
1229 ip_drain_locked(void)
1230 {
1231 int i;
1232
1233 IPQ_LOCK_ASSERT();
1234
1235 for (i = 0; i < IPREASS_NHASH; i++) {
1236 while(!TAILQ_EMPTY(&V_ipq[i])) {
1237 IPSTAT_ADD(ips_fragdropped,
1238 TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1239 ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
1240 }
1241 }
1242 }
1243
1244 void
1245 ip_drain(void)
1246 {
1247 VNET_ITERATOR_DECL(vnet_iter);
1248
1249 VNET_LIST_RLOCK_NOSLEEP();
1250 IPQ_LOCK();
1251 VNET_FOREACH(vnet_iter) {
1252 CURVNET_SET(vnet_iter);
1253 ip_drain_locked();
1254 CURVNET_RESTORE();
1255 }
1256 IPQ_UNLOCK();
1257 VNET_LIST_RUNLOCK_NOSLEEP();
1258 in_rtqdrain();
1259 }
1260
1261 /*
1262 * The protocol to be inserted into ip_protox[] must be already registered
1263 * in inetsw[], either statically or through pf_proto_register().
1264 */
1265 int
1266 ipproto_register(short ipproto)
1267 {
1268 struct protosw *pr;
1269
1270 /* Sanity checks. */
1271 if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1272 return (EPROTONOSUPPORT);
1273
1274 /*
1275 * The protocol slot must not be occupied by another protocol
1276 * already. An index pointing to IPPROTO_RAW is unused.
1277 */
1278 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1279 if (pr == NULL)
1280 return (EPFNOSUPPORT);
1281 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */
1282 return (EEXIST);
1283
1284 /* Find the protocol position in inetsw[] and set the index. */
1285 for (pr = inetdomain.dom_protosw;
1286 pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1287 if (pr->pr_domain->dom_family == PF_INET &&
1288 pr->pr_protocol && pr->pr_protocol == ipproto) {
1289 ip_protox[pr->pr_protocol] = pr - inetsw;
1290 return (0);
1291 }
1292 }
1293 return (EPROTONOSUPPORT);
1294 }
1295
1296 int
1297 ipproto_unregister(short ipproto)
1298 {
1299 struct protosw *pr;
1300
1301 /* Sanity checks. */
1302 if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1303 return (EPROTONOSUPPORT);
1304
1305 /* Check if the protocol was indeed registered. */
1306 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1307 if (pr == NULL)
1308 return (EPFNOSUPPORT);
1309 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */
1310 return (ENOENT);
1311
1312 /* Reset the protocol slot to IPPROTO_RAW. */
1313 ip_protox[ipproto] = pr - inetsw;
1314 return (0);
1315 }
1316
1317 /*
1318 * Given address of next destination (final or next hop), return (referenced)
1319 * internet address info of interface to be used to get there.
1320 */
1321 struct in_ifaddr *
1322 ip_rtaddr(struct in_addr dst, u_int fibnum)
1323 {
1324 struct route sro;
1325 struct sockaddr_in *sin;
1326 struct in_ifaddr *ia;
1327
1328 bzero(&sro, sizeof(sro));
1329 sin = (struct sockaddr_in *)&sro.ro_dst;
1330 sin->sin_family = AF_INET;
1331 sin->sin_len = sizeof(*sin);
1332 sin->sin_addr = dst;
1333 in_rtalloc_ign(&sro, 0, fibnum);
1334
1335 if (sro.ro_rt == NULL)
1336 return (NULL);
1337
1338 ia = ifatoia(sro.ro_rt->rt_ifa);
1339 ifa_ref(&ia->ia_ifa);
1340 RTFREE(sro.ro_rt);
1341 return (ia);
1342 }
1343
1344 u_char inetctlerrmap[PRC_NCMDS] = {
1345 0, 0, 0, 0,
1346 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
1347 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
1348 EMSGSIZE, EHOSTUNREACH, 0, 0,
1349 0, 0, EHOSTUNREACH, 0,
1350 ENOPROTOOPT, ECONNREFUSED
1351 };
1352
1353 /*
1354 * Forward a packet. If some error occurs return the sender
1355 * an icmp packet. Note we can't always generate a meaningful
1356 * icmp message because icmp doesn't have a large enough repertoire
1357 * of codes and types.
1358 *
1359 * If not forwarding, just drop the packet. This could be confusing
1360 * if ipforwarding was zero but some routing protocol was advancing
1361 * us as a gateway to somewhere. However, we must let the routing
1362 * protocol deal with that.
1363 *
1364 * The srcrt parameter indicates whether the packet is being forwarded
1365 * via a source route.
1366 */
1367 void
1368 ip_forward(struct mbuf *m, int srcrt)
1369 {
1370 struct ip *ip = mtod(m, struct ip *);
1371 struct in_ifaddr *ia;
1372 struct mbuf *mcopy;
1373 struct in_addr dest;
1374 struct route ro;
1375 int error, type = 0, code = 0, mtu = 0;
1376
1377 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1378 IPSTAT_INC(ips_cantforward);
1379 m_freem(m);
1380 return;
1381 }
1382 #ifdef IPSTEALTH
1383 if (!V_ipstealth) {
1384 #endif
1385 if (ip->ip_ttl <= IPTTLDEC) {
1386 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1387 0, 0);
1388 return;
1389 }
1390 #ifdef IPSTEALTH
1391 }
1392 #endif
1393
1394 ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
1395 #ifndef IPSEC
1396 /*
1397 * 'ia' may be NULL if there is no route for this destination.
1398 * In case of IPsec, Don't discard it just yet, but pass it to
1399 * ip_output in case of outgoing IPsec policy.
1400 */
1401 if (!srcrt && ia == NULL) {
1402 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1403 return;
1404 }
1405 #endif
1406
1407 /*
1408 * Save the IP header and at most 8 bytes of the payload,
1409 * in case we need to generate an ICMP message to the src.
1410 *
1411 * XXX this can be optimized a lot by saving the data in a local
1412 * buffer on the stack (72 bytes at most), and only allocating the
1413 * mbuf if really necessary. The vast majority of the packets
1414 * are forwarded without having to send an ICMP back (either
1415 * because unnecessary, or because rate limited), so we are
1416 * really we are wasting a lot of work here.
1417 *
1418 * We don't use m_copy() because it might return a reference
1419 * to a shared cluster. Both this function and ip_output()
1420 * assume exclusive access to the IP header in `m', so any
1421 * data in a cluster may change before we reach icmp_error().
1422 */
1423 mcopy = m_gethdr(M_NOWAIT, m->m_type);
1424 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) {
1425 /*
1426 * It's probably ok if the pkthdr dup fails (because
1427 * the deep copy of the tag chain failed), but for now
1428 * be conservative and just discard the copy since
1429 * code below may some day want the tags.
1430 */
1431 m_free(mcopy);
1432 mcopy = NULL;
1433 }
1434 if (mcopy != NULL) {
1435 mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy));
1436 mcopy->m_pkthdr.len = mcopy->m_len;
1437 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1438 }
1439
1440 #ifdef IPSTEALTH
1441 if (!V_ipstealth) {
1442 #endif
1443 ip->ip_ttl -= IPTTLDEC;
1444 #ifdef IPSTEALTH
1445 }
1446 #endif
1447
1448 /*
1449 * If forwarding packet using same interface that it came in on,
1450 * perhaps should send a redirect to sender to shortcut a hop.
1451 * Only send redirect if source is sending directly to us,
1452 * and if packet was not source routed (or has any options).
1453 * Also, don't send redirect if forwarding using a default route
1454 * or a route modified by a redirect.
1455 */
1456 dest.s_addr = 0;
1457 if (!srcrt && V_ipsendredirects &&
1458 ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) {
1459 struct sockaddr_in *sin;
1460 struct rtentry *rt;
1461
1462 bzero(&ro, sizeof(ro));
1463 sin = (struct sockaddr_in *)&ro.ro_dst;
1464 sin->sin_family = AF_INET;
1465 sin->sin_len = sizeof(*sin);
1466 sin->sin_addr = ip->ip_dst;
1467 in_rtalloc_ign(&ro, 0, M_GETFIB(m));
1468
1469 rt = ro.ro_rt;
1470
1471 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1472 satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1473 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa))
1474 u_long src = ntohl(ip->ip_src.s_addr);
1475
1476 if (RTA(rt) &&
1477 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1478 if (rt->rt_flags & RTF_GATEWAY)
1479 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1480 else
1481 dest.s_addr = ip->ip_dst.s_addr;
1482 /* Router requirements says to only send host redirects */
1483 type = ICMP_REDIRECT;
1484 code = ICMP_REDIRECT_HOST;
1485 }
1486 }
1487 if (rt)
1488 RTFREE(rt);
1489 }
1490
1491 /*
1492 * Try to cache the route MTU from ip_output so we can consider it for
1493 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191.
1494 */
1495 bzero(&ro, sizeof(ro));
1496
1497 error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL);
1498
1499 if (error == EMSGSIZE && ro.ro_rt)
1500 mtu = ro.ro_rt->rt_rmx.rmx_mtu;
1501 RO_RTFREE(&ro);
1502
1503 if (error)
1504 IPSTAT_INC(ips_cantforward);
1505 else {
1506 IPSTAT_INC(ips_forward);
1507 if (type)
1508 IPSTAT_INC(ips_redirectsent);
1509 else {
1510 if (mcopy)
1511 m_freem(mcopy);
1512 if (ia != NULL)
1513 ifa_free(&ia->ia_ifa);
1514 return;
1515 }
1516 }
1517 if (mcopy == NULL) {
1518 if (ia != NULL)
1519 ifa_free(&ia->ia_ifa);
1520 return;
1521 }
1522
1523 switch (error) {
1524
1525 case 0: /* forwarded, but need redirect */
1526 /* type, code set above */
1527 break;
1528
1529 case ENETUNREACH:
1530 case EHOSTUNREACH:
1531 case ENETDOWN:
1532 case EHOSTDOWN:
1533 default:
1534 type = ICMP_UNREACH;
1535 code = ICMP_UNREACH_HOST;
1536 break;
1537
1538 case EMSGSIZE:
1539 type = ICMP_UNREACH;
1540 code = ICMP_UNREACH_NEEDFRAG;
1541
1542 #ifdef IPSEC
1543 /*
1544 * If IPsec is configured for this path,
1545 * override any possibly mtu value set by ip_output.
1546 */
1547 mtu = ip_ipsec_mtu(mcopy, mtu);
1548 #endif /* IPSEC */
1549 /*
1550 * If the MTU was set before make sure we are below the
1551 * interface MTU.
1552 * If the MTU wasn't set before use the interface mtu or
1553 * fall back to the next smaller mtu step compared to the
1554 * current packet size.
1555 */
1556 if (mtu != 0) {
1557 if (ia != NULL)
1558 mtu = min(mtu, ia->ia_ifp->if_mtu);
1559 } else {
1560 if (ia != NULL)
1561 mtu = ia->ia_ifp->if_mtu;
1562 else
1563 mtu = ip_next_mtu(ntohs(ip->ip_len), 0);
1564 }
1565 IPSTAT_INC(ips_cantfrag);
1566 break;
1567
1568 case ENOBUFS:
1569 /*
1570 * A router should not generate ICMP_SOURCEQUENCH as
1571 * required in RFC1812 Requirements for IP Version 4 Routers.
1572 * Source quench could be a big problem under DoS attacks,
1573 * or if the underlying interface is rate-limited.
1574 * Those who need source quench packets may re-enable them
1575 * via the net.inet.ip.sendsourcequench sysctl.
1576 */
1577 if (V_ip_sendsourcequench == 0) {
1578 m_freem(mcopy);
1579 if (ia != NULL)
1580 ifa_free(&ia->ia_ifa);
1581 return;
1582 } else {
1583 type = ICMP_SOURCEQUENCH;
1584 code = 0;
1585 }
1586 break;
1587
1588 case EACCES: /* ipfw denied packet */
1589 m_freem(mcopy);
1590 if (ia != NULL)
1591 ifa_free(&ia->ia_ifa);
1592 return;
1593 }
1594 if (ia != NULL)
1595 ifa_free(&ia->ia_ifa);
1596 icmp_error(mcopy, type, code, dest.s_addr, mtu);
1597 }
1598
1599 void
1600 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1601 struct mbuf *m)
1602 {
1603
1604 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1605 struct bintime bt;
1606
1607 bintime(&bt);
1608 if (inp->inp_socket->so_options & SO_BINTIME) {
1609 *mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt),
1610 SCM_BINTIME, SOL_SOCKET);
1611 if (*mp)
1612 mp = &(*mp)->m_next;
1613 }
1614 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1615 struct timeval tv;
1616
1617 bintime2timeval(&bt, &tv);
1618 *mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv),
1619 SCM_TIMESTAMP, SOL_SOCKET);
1620 if (*mp)
1621 mp = &(*mp)->m_next;
1622 }
1623 }
1624 if (inp->inp_flags & INP_RECVDSTADDR) {
1625 *mp = sbcreatecontrol((caddr_t)&ip->ip_dst,
1626 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1627 if (*mp)
1628 mp = &(*mp)->m_next;
1629 }
1630 if (inp->inp_flags & INP_RECVTTL) {
1631 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl,
1632 sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1633 if (*mp)
1634 mp = &(*mp)->m_next;
1635 }
1636 #ifdef notyet
1637 /* XXX
1638 * Moving these out of udp_input() made them even more broken
1639 * than they already were.
1640 */
1641 /* options were tossed already */
1642 if (inp->inp_flags & INP_RECVOPTS) {
1643 *mp = sbcreatecontrol((caddr_t)opts_deleted_above,
1644 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1645 if (*mp)
1646 mp = &(*mp)->m_next;
1647 }
1648 /* ip_srcroute doesn't do what we want here, need to fix */
1649 if (inp->inp_flags & INP_RECVRETOPTS) {
1650 *mp = sbcreatecontrol((caddr_t)ip_srcroute(m),
1651 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1652 if (*mp)
1653 mp = &(*mp)->m_next;
1654 }
1655 #endif
1656 if (inp->inp_flags & INP_RECVIF) {
1657 struct ifnet *ifp;
1658 struct sdlbuf {
1659 struct sockaddr_dl sdl;
1660 u_char pad[32];
1661 } sdlbuf;
1662 struct sockaddr_dl *sdp;
1663 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1664
1665 if ((ifp = m->m_pkthdr.rcvif) &&
1666 ifp->if_index && ifp->if_index <= V_if_index) {
1667 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1668 /*
1669 * Change our mind and don't try copy.
1670 */
1671 if (sdp->sdl_family != AF_LINK ||
1672 sdp->sdl_len > sizeof(sdlbuf)) {
1673 goto makedummy;
1674 }
1675 bcopy(sdp, sdl2, sdp->sdl_len);
1676 } else {
1677 makedummy:
1678 sdl2->sdl_len =
1679 offsetof(struct sockaddr_dl, sdl_data[0]);
1680 sdl2->sdl_family = AF_LINK;
1681 sdl2->sdl_index = 0;
1682 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1683 }
1684 *mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len,
1685 IP_RECVIF, IPPROTO_IP);
1686 if (*mp)
1687 mp = &(*mp)->m_next;
1688 }
1689 if (inp->inp_flags & INP_RECVTOS) {
1690 *mp = sbcreatecontrol((caddr_t)&ip->ip_tos,
1691 sizeof(u_char), IP_RECVTOS, IPPROTO_IP);
1692 if (*mp)
1693 mp = &(*mp)->m_next;
1694 }
1695 }
1696
1697 /*
1698 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the
1699 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on
1700 * locking. This code remains in ip_input.c as ip_mroute.c is optionally
1701 * compiled.
1702 */
1703 static VNET_DEFINE(int, ip_rsvp_on);
1704 VNET_DEFINE(struct socket *, ip_rsvpd);
1705
1706 #define V_ip_rsvp_on VNET(ip_rsvp_on)
1707
1708 int
1709 ip_rsvp_init(struct socket *so)
1710 {
1711
1712 if (so->so_type != SOCK_RAW ||
1713 so->so_proto->pr_protocol != IPPROTO_RSVP)
1714 return EOPNOTSUPP;
1715
1716 if (V_ip_rsvpd != NULL)
1717 return EADDRINUSE;
1718
1719 V_ip_rsvpd = so;
1720 /*
1721 * This may seem silly, but we need to be sure we don't over-increment
1722 * the RSVP counter, in case something slips up.
1723 */
1724 if (!V_ip_rsvp_on) {
1725 V_ip_rsvp_on = 1;
1726 V_rsvp_on++;
1727 }
1728
1729 return 0;
1730 }
1731
1732 int
1733 ip_rsvp_done(void)
1734 {
1735
1736 V_ip_rsvpd = NULL;
1737 /*
1738 * This may seem silly, but we need to be sure we don't over-decrement
1739 * the RSVP counter, in case something slips up.
1740 */
1741 if (V_ip_rsvp_on) {
1742 V_ip_rsvp_on = 0;
1743 V_rsvp_on--;
1744 }
1745 return 0;
1746 }
1747
1748 void
1749 rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */
1750 {
1751
1752 if (rsvp_input_p) { /* call the real one if loaded */
1753 rsvp_input_p(m, off);
1754 return;
1755 }
1756
1757 /* Can still get packets with rsvp_on = 0 if there is a local member
1758 * of the group to which the RSVP packet is addressed. But in this
1759 * case we want to throw the packet away.
1760 */
1761
1762 if (!V_rsvp_on) {
1763 m_freem(m);
1764 return;
1765 }
1766
1767 if (V_ip_rsvpd != NULL) {
1768 rip_input(m, off);
1769 return;
1770 }
1771 /* Drop the packet */
1772 m_freem(m);
1773 }
Cache object: 5c0766acf316f16fc0bc7f0ad5ced4ea
|