FreeBSD/Linux Kernel Cross Reference
sys/netinet/raw_ip.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41
42 #include <sys/param.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/eventhandler.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/protosw.h>
52 #include <sys/rmlock.h>
53 #include <sys/rwlock.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
60
61 #include <vm/uma.h>
62
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/route.h>
66 #include <net/vnet.h>
67
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip_var.h>
75 #include <netinet/ip_mroute.h>
76 #include <netinet/ip_icmp.h>
77
78 #include <netipsec/ipsec_support.h>
79
80 #include <machine/stdarg.h>
81 #include <security/mac/mac_framework.h>
82
83 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
84 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
85 &VNET_NAME(ip_defttl), 0,
86 "Maximum TTL on IP packets");
87
88 VNET_DEFINE(struct inpcbhead, ripcb);
89 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
90
91 #define V_ripcb VNET(ripcb)
92 #define V_ripcbinfo VNET(ripcbinfo)
93
94 /*
95 * Control and data hooks for ipfw, dummynet, divert and so on.
96 * The data hooks are not used here but it is convenient
97 * to keep them all in one place.
98 */
99 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
100 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
101
102 int (*ip_dn_ctl_ptr)(struct sockopt *);
103 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
104 void (*ip_divert_ptr)(struct mbuf *, int);
105 int (*ng_ipfw_input_p)(struct mbuf **, int,
106 struct ip_fw_args *, int);
107
108 #ifdef INET
109 /*
110 * Hooks for multicast routing. They all default to NULL, so leave them not
111 * initialized and rely on BSS being set to 0.
112 */
113
114 /*
115 * The socket used to communicate with the multicast routing daemon.
116 */
117 VNET_DEFINE(struct socket *, ip_mrouter);
118
119 /*
120 * The various mrouter and rsvp functions.
121 */
122 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
123 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
124 int (*ip_mrouter_done)(void);
125 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
126 struct ip_moptions *);
127 int (*mrt_ioctl)(u_long, caddr_t, int);
128 int (*legal_vif_num)(int);
129 u_long (*ip_mcast_src)(int);
130
131 int (*rsvp_input_p)(struct mbuf **, int *, int);
132 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
133 void (*ip_rsvp_force_done)(struct socket *);
134 #endif /* INET */
135
136 extern struct protosw inetsw[];
137
138 u_long rip_sendspace = 9216;
139 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
140 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
141
142 u_long rip_recvspace = 9216;
143 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
144 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
145
146 /*
147 * Hash functions
148 */
149
150 #define INP_PCBHASH_RAW_SIZE 256
151 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
152 (((proto) + (laddr) + (faddr)) % (mask) + 1)
153
154 #ifdef INET
155 static void
156 rip_inshash(struct inpcb *inp)
157 {
158 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
159 struct inpcbhead *pcbhash;
160 int hash;
161
162 INP_INFO_WLOCK_ASSERT(pcbinfo);
163 INP_WLOCK_ASSERT(inp);
164
165 if (inp->inp_ip_p != 0 &&
166 inp->inp_laddr.s_addr != INADDR_ANY &&
167 inp->inp_faddr.s_addr != INADDR_ANY) {
168 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
169 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
170 } else
171 hash = 0;
172 pcbhash = &pcbinfo->ipi_hashbase[hash];
173 CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
174 }
175
176 static void
177 rip_delhash(struct inpcb *inp)
178 {
179
180 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
181 INP_WLOCK_ASSERT(inp);
182
183 CK_LIST_REMOVE(inp, inp_hash);
184 }
185 #endif /* INET */
186
187 /*
188 * Raw interface to IP protocol.
189 */
190
191 /*
192 * Initialize raw connection block q.
193 */
194 static void
195 rip_zone_change(void *tag)
196 {
197
198 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
199 }
200
201 static int
202 rip_inpcb_init(void *mem, int size, int flags)
203 {
204 struct inpcb *inp = mem;
205
206 INP_LOCK_INIT(inp, "inp", "rawinp");
207 return (0);
208 }
209
210 void
211 rip_init(void)
212 {
213
214 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
215 1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE);
216 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
217 EVENTHANDLER_PRI_ANY);
218 }
219
220 #ifdef VIMAGE
221 static void
222 rip_destroy(void *unused __unused)
223 {
224
225 in_pcbinfo_destroy(&V_ripcbinfo);
226 }
227 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
228 #endif
229
230 #ifdef INET
231 static int
232 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
233 struct sockaddr_in *ripsrc)
234 {
235 int policyfail = 0;
236
237 INP_LOCK_ASSERT(last);
238
239 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
240 /* check AH/ESP integrity. */
241 if (IPSEC_ENABLED(ipv4)) {
242 if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
243 policyfail = 1;
244 }
245 #endif /* IPSEC */
246 #ifdef MAC
247 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
248 policyfail = 1;
249 #endif
250 /* Check the minimum TTL for socket. */
251 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
252 policyfail = 1;
253 if (!policyfail) {
254 struct mbuf *opts = NULL;
255 struct socket *so;
256
257 so = last->inp_socket;
258 if ((last->inp_flags & INP_CONTROLOPTS) ||
259 (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
260 ip_savecontrol(last, &opts, ip, n);
261 SOCKBUF_LOCK(&so->so_rcv);
262 if (sbappendaddr_locked(&so->so_rcv,
263 (struct sockaddr *)ripsrc, n, opts) == 0) {
264 soroverflow_locked(so);
265 m_freem(n);
266 if (opts)
267 m_freem(opts);
268 } else
269 sorwakeup_locked(so);
270 } else
271 m_freem(n);
272 return (policyfail);
273 }
274
275 /*
276 * Setup generic address and protocol structures for raw_input routine, then
277 * pass them along with mbuf chain.
278 */
279 int
280 rip_input(struct mbuf **mp, int *offp, int proto)
281 {
282 struct ifnet *ifp;
283 struct mbuf *m = *mp;
284 struct ip *ip = mtod(m, struct ip *);
285 struct inpcb *inp, *last;
286 struct sockaddr_in ripsrc;
287 struct epoch_tracker et;
288 int hash;
289
290 *mp = NULL;
291
292 bzero(&ripsrc, sizeof(ripsrc));
293 ripsrc.sin_len = sizeof(ripsrc);
294 ripsrc.sin_family = AF_INET;
295 ripsrc.sin_addr = ip->ip_src;
296 last = NULL;
297
298 ifp = m->m_pkthdr.rcvif;
299
300 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
301 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
302 INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
303 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
304 if (inp->inp_ip_p != proto)
305 continue;
306 #ifdef INET6
307 /* XXX inp locking */
308 if ((inp->inp_vflag & INP_IPV4) == 0)
309 continue;
310 #endif
311 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
312 continue;
313 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
314 continue;
315 if (last != NULL) {
316 struct mbuf *n;
317
318 n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
319 if (n != NULL)
320 (void) rip_append(last, ip, n, &ripsrc);
321 /* XXX count dropped packet */
322 INP_RUNLOCK(last);
323 last = NULL;
324 }
325 INP_RLOCK(inp);
326 if (__predict_false(inp->inp_flags2 & INP_FREED))
327 goto skip_1;
328 if (jailed_without_vnet(inp->inp_cred)) {
329 /*
330 * XXX: If faddr was bound to multicast group,
331 * jailed raw socket will drop datagram.
332 */
333 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
334 goto skip_1;
335 }
336 last = inp;
337 continue;
338 skip_1:
339 INP_RUNLOCK(inp);
340 }
341 CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
342 if (inp->inp_ip_p && inp->inp_ip_p != proto)
343 continue;
344 #ifdef INET6
345 /* XXX inp locking */
346 if ((inp->inp_vflag & INP_IPV4) == 0)
347 continue;
348 #endif
349 if (!in_nullhost(inp->inp_laddr) &&
350 !in_hosteq(inp->inp_laddr, ip->ip_dst))
351 continue;
352 if (!in_nullhost(inp->inp_faddr) &&
353 !in_hosteq(inp->inp_faddr, ip->ip_src))
354 continue;
355 if (last != NULL) {
356 struct mbuf *n;
357
358 n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
359 if (n != NULL)
360 (void) rip_append(last, ip, n, &ripsrc);
361 /* XXX count dropped packet */
362 INP_RUNLOCK(last);
363 last = NULL;
364 }
365 INP_RLOCK(inp);
366 if (__predict_false(inp->inp_flags2 & INP_FREED))
367 goto skip_2;
368 if (jailed_without_vnet(inp->inp_cred)) {
369 /*
370 * Allow raw socket in jail to receive multicast;
371 * assume process had PRIV_NETINET_RAW at attach,
372 * and fall through into normal filter path if so.
373 */
374 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
375 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
376 goto skip_2;
377 }
378 /*
379 * If this raw socket has multicast state, and we
380 * have received a multicast, check if this socket
381 * should receive it, as multicast filtering is now
382 * the responsibility of the transport layer.
383 */
384 if (inp->inp_moptions != NULL &&
385 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
386 /*
387 * If the incoming datagram is for IGMP, allow it
388 * through unconditionally to the raw socket.
389 *
390 * In the case of IGMPv2, we may not have explicitly
391 * joined the group, and may have set IFF_ALLMULTI
392 * on the interface. imo_multi_filter() may discard
393 * control traffic we actually need to see.
394 *
395 * Userland multicast routing daemons should continue
396 * filter the control traffic appropriately.
397 */
398 int blocked;
399
400 blocked = MCAST_PASS;
401 if (proto != IPPROTO_IGMP) {
402 struct sockaddr_in group;
403
404 bzero(&group, sizeof(struct sockaddr_in));
405 group.sin_len = sizeof(struct sockaddr_in);
406 group.sin_family = AF_INET;
407 group.sin_addr = ip->ip_dst;
408
409 blocked = imo_multi_filter(inp->inp_moptions,
410 ifp,
411 (struct sockaddr *)&group,
412 (struct sockaddr *)&ripsrc);
413 }
414
415 if (blocked != MCAST_PASS) {
416 IPSTAT_INC(ips_notmember);
417 goto skip_2;
418 }
419 }
420 last = inp;
421 continue;
422 skip_2:
423 INP_RUNLOCK(inp);
424 }
425 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
426 if (last != NULL) {
427 if (rip_append(last, ip, m, &ripsrc) != 0)
428 IPSTAT_INC(ips_delivered);
429 INP_RUNLOCK(last);
430 } else {
431 if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
432 IPSTAT_INC(ips_noproto);
433 IPSTAT_DEC(ips_delivered);
434 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
435 } else {
436 m_freem(m);
437 }
438 }
439 return (IPPROTO_DONE);
440 }
441
442 /*
443 * Generate IP header and pass packet to ip_output. Tack on options user may
444 * have setup with control call.
445 */
446 int
447 rip_output(struct mbuf *m, struct socket *so, ...)
448 {
449 struct ip *ip;
450 int error;
451 struct inpcb *inp = sotoinpcb(so);
452 va_list ap;
453 u_long dst;
454 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
455 IP_ALLOWBROADCAST;
456 int cnt, hlen;
457 u_char opttype, optlen, *cp;
458
459 va_start(ap, so);
460 dst = va_arg(ap, u_long);
461 va_end(ap);
462
463 /*
464 * If the user handed us a complete IP packet, use it. Otherwise,
465 * allocate an mbuf for a header and fill it in.
466 */
467 if ((inp->inp_flags & INP_HDRINCL) == 0) {
468 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
469 m_freem(m);
470 return(EMSGSIZE);
471 }
472 M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
473 if (m == NULL)
474 return(ENOBUFS);
475
476 INP_RLOCK(inp);
477 ip = mtod(m, struct ip *);
478 ip->ip_tos = inp->inp_ip_tos;
479 if (inp->inp_flags & INP_DONTFRAG)
480 ip->ip_off = htons(IP_DF);
481 else
482 ip->ip_off = htons(0);
483 ip->ip_p = inp->inp_ip_p;
484 ip->ip_len = htons(m->m_pkthdr.len);
485 ip->ip_src = inp->inp_laddr;
486 ip->ip_dst.s_addr = dst;
487 if (jailed(inp->inp_cred)) {
488 /*
489 * prison_local_ip4() would be good enough but would
490 * let a source of INADDR_ANY pass, which we do not
491 * want to see from jails.
492 */
493 if (ip->ip_src.s_addr == INADDR_ANY) {
494 error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src,
495 inp->inp_cred);
496 } else {
497 error = prison_local_ip4(inp->inp_cred,
498 &ip->ip_src);
499 }
500 if (error != 0) {
501 INP_RUNLOCK(inp);
502 m_freem(m);
503 return (error);
504 }
505 }
506 ip->ip_ttl = inp->inp_ip_ttl;
507 } else {
508 if (m->m_pkthdr.len > IP_MAXPACKET) {
509 m_freem(m);
510 return (EMSGSIZE);
511 }
512 if (m->m_pkthdr.len < sizeof(*ip)) {
513 m_freem(m);
514 return (EINVAL);
515 }
516 m = m_pullup(m, sizeof(*ip));
517 if (m == NULL)
518 return (ENOMEM);
519 ip = mtod(m, struct ip *);
520 hlen = ip->ip_hl << 2;
521 if (m->m_len < hlen) {
522 m = m_pullup(m, hlen);
523 if (m == NULL)
524 return (EINVAL);
525 ip = mtod(m, struct ip *);
526 }
527
528 INP_RLOCK(inp);
529 /*
530 * Don't allow both user specified and setsockopt options,
531 * and don't allow packet length sizes that will crash.
532 */
533 if ((hlen < sizeof (*ip))
534 || ((hlen > sizeof (*ip)) && inp->inp_options)
535 || (ntohs(ip->ip_len) != m->m_pkthdr.len)) {
536 INP_RUNLOCK(inp);
537 m_freem(m);
538 return (EINVAL);
539 }
540 error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
541 if (error != 0) {
542 INP_RUNLOCK(inp);
543 m_freem(m);
544 return (error);
545 }
546 /*
547 * Don't allow IP options which do not have the required
548 * structure as specified in section 3.1 of RFC 791 on
549 * pages 15-23.
550 */
551 cp = (u_char *)(ip + 1);
552 cnt = hlen - sizeof (struct ip);
553 for (; cnt > 0; cnt -= optlen, cp += optlen) {
554 opttype = cp[IPOPT_OPTVAL];
555 if (opttype == IPOPT_EOL)
556 break;
557 if (opttype == IPOPT_NOP) {
558 optlen = 1;
559 continue;
560 }
561 if (cnt < IPOPT_OLEN + sizeof(u_char)) {
562 INP_RUNLOCK(inp);
563 m_freem(m);
564 return (EINVAL);
565 }
566 optlen = cp[IPOPT_OLEN];
567 if (optlen < IPOPT_OLEN + sizeof(u_char) ||
568 optlen > cnt) {
569 INP_RUNLOCK(inp);
570 m_freem(m);
571 return (EINVAL);
572 }
573 }
574 /*
575 * This doesn't allow application to specify ID of zero,
576 * but we got this limitation from the beginning of history.
577 */
578 if (ip->ip_id == 0)
579 ip_fillid(ip);
580
581 /*
582 * XXX prevent ip_output from overwriting header fields.
583 */
584 flags |= IP_RAWOUTPUT;
585 IPSTAT_INC(ips_rawout);
586 }
587
588 if (inp->inp_flags & INP_ONESBCAST)
589 flags |= IP_SENDONES;
590
591 #ifdef MAC
592 mac_inpcb_create_mbuf(inp, m);
593 #endif
594
595 error = ip_output(m, inp->inp_options, NULL, flags,
596 inp->inp_moptions, inp);
597 INP_RUNLOCK(inp);
598 return (error);
599 }
600
601 /*
602 * Raw IP socket option processing.
603 *
604 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
605 * only be created by a privileged process, and as such, socket option
606 * operations to manage system properties on any raw socket were allowed to
607 * take place without explicit additional access control checks. However,
608 * raw sockets can now also be created in jail(), and therefore explicit
609 * checks are now required. Likewise, raw sockets can be used by a process
610 * after it gives up privilege, so some caution is required. For options
611 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
612 * performed in ip_ctloutput() and therefore no check occurs here.
613 * Unilaterally checking priv_check() here breaks normal IP socket option
614 * operations on raw sockets.
615 *
616 * When adding new socket options here, make sure to add access control
617 * checks here as necessary.
618 *
619 * XXX-BZ inp locking?
620 */
621 int
622 rip_ctloutput(struct socket *so, struct sockopt *sopt)
623 {
624 struct inpcb *inp = sotoinpcb(so);
625 int error, optval;
626
627 if (sopt->sopt_level != IPPROTO_IP) {
628 if ((sopt->sopt_level == SOL_SOCKET) &&
629 (sopt->sopt_name == SO_SETFIB)) {
630 inp->inp_inc.inc_fibnum = so->so_fibnum;
631 return (0);
632 }
633 return (EINVAL);
634 }
635
636 error = 0;
637 switch (sopt->sopt_dir) {
638 case SOPT_GET:
639 switch (sopt->sopt_name) {
640 case IP_HDRINCL:
641 optval = inp->inp_flags & INP_HDRINCL;
642 error = sooptcopyout(sopt, &optval, sizeof optval);
643 break;
644
645 case IP_FW3: /* generic ipfw v.3 functions */
646 case IP_FW_ADD: /* ADD actually returns the body... */
647 case IP_FW_GET:
648 case IP_FW_TABLE_GETSIZE:
649 case IP_FW_TABLE_LIST:
650 case IP_FW_NAT_GET_CONFIG:
651 case IP_FW_NAT_GET_LOG:
652 if (V_ip_fw_ctl_ptr != NULL)
653 error = V_ip_fw_ctl_ptr(sopt);
654 else
655 error = ENOPROTOOPT;
656 break;
657
658 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
659 case IP_DUMMYNET_GET:
660 if (ip_dn_ctl_ptr != NULL)
661 error = ip_dn_ctl_ptr(sopt);
662 else
663 error = ENOPROTOOPT;
664 break ;
665
666 case MRT_INIT:
667 case MRT_DONE:
668 case MRT_ADD_VIF:
669 case MRT_DEL_VIF:
670 case MRT_ADD_MFC:
671 case MRT_DEL_MFC:
672 case MRT_VERSION:
673 case MRT_ASSERT:
674 case MRT_API_SUPPORT:
675 case MRT_API_CONFIG:
676 case MRT_ADD_BW_UPCALL:
677 case MRT_DEL_BW_UPCALL:
678 error = priv_check(curthread, PRIV_NETINET_MROUTE);
679 if (error != 0)
680 return (error);
681 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
682 EOPNOTSUPP;
683 break;
684
685 default:
686 error = ip_ctloutput(so, sopt);
687 break;
688 }
689 break;
690
691 case SOPT_SET:
692 switch (sopt->sopt_name) {
693 case IP_HDRINCL:
694 error = sooptcopyin(sopt, &optval, sizeof optval,
695 sizeof optval);
696 if (error)
697 break;
698 if (optval)
699 inp->inp_flags |= INP_HDRINCL;
700 else
701 inp->inp_flags &= ~INP_HDRINCL;
702 break;
703
704 case IP_FW3: /* generic ipfw v.3 functions */
705 case IP_FW_ADD:
706 case IP_FW_DEL:
707 case IP_FW_FLUSH:
708 case IP_FW_ZERO:
709 case IP_FW_RESETLOG:
710 case IP_FW_TABLE_ADD:
711 case IP_FW_TABLE_DEL:
712 case IP_FW_TABLE_FLUSH:
713 case IP_FW_NAT_CFG:
714 case IP_FW_NAT_DEL:
715 if (V_ip_fw_ctl_ptr != NULL)
716 error = V_ip_fw_ctl_ptr(sopt);
717 else
718 error = ENOPROTOOPT;
719 break;
720
721 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
722 case IP_DUMMYNET_CONFIGURE:
723 case IP_DUMMYNET_DEL:
724 case IP_DUMMYNET_FLUSH:
725 if (ip_dn_ctl_ptr != NULL)
726 error = ip_dn_ctl_ptr(sopt);
727 else
728 error = ENOPROTOOPT ;
729 break ;
730
731 case IP_RSVP_ON:
732 error = priv_check(curthread, PRIV_NETINET_MROUTE);
733 if (error != 0)
734 return (error);
735 error = ip_rsvp_init(so);
736 break;
737
738 case IP_RSVP_OFF:
739 error = priv_check(curthread, PRIV_NETINET_MROUTE);
740 if (error != 0)
741 return (error);
742 error = ip_rsvp_done();
743 break;
744
745 case IP_RSVP_VIF_ON:
746 case IP_RSVP_VIF_OFF:
747 error = priv_check(curthread, PRIV_NETINET_MROUTE);
748 if (error != 0)
749 return (error);
750 error = ip_rsvp_vif ?
751 ip_rsvp_vif(so, sopt) : EINVAL;
752 break;
753
754 case MRT_INIT:
755 case MRT_DONE:
756 case MRT_ADD_VIF:
757 case MRT_DEL_VIF:
758 case MRT_ADD_MFC:
759 case MRT_DEL_MFC:
760 case MRT_VERSION:
761 case MRT_ASSERT:
762 case MRT_API_SUPPORT:
763 case MRT_API_CONFIG:
764 case MRT_ADD_BW_UPCALL:
765 case MRT_DEL_BW_UPCALL:
766 error = priv_check(curthread, PRIV_NETINET_MROUTE);
767 if (error != 0)
768 return (error);
769 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
770 EOPNOTSUPP;
771 break;
772
773 default:
774 error = ip_ctloutput(so, sopt);
775 break;
776 }
777 break;
778 }
779
780 return (error);
781 }
782
783 /*
784 * This function exists solely to receive the PRC_IFDOWN messages which are
785 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls
786 * in_ifadown() to remove all routes corresponding to that address. It also
787 * receives the PRC_IFUP messages from if_up() and reinstalls the interface
788 * routes.
789 */
790 void
791 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
792 {
793 struct rm_priotracker in_ifa_tracker;
794 struct in_ifaddr *ia;
795 struct ifnet *ifp;
796 int err;
797 int flags;
798
799 switch (cmd) {
800 case PRC_IFDOWN:
801 IN_IFADDR_RLOCK(&in_ifa_tracker);
802 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
803 if (ia->ia_ifa.ifa_addr == sa
804 && (ia->ia_flags & IFA_ROUTE)) {
805 ifa_ref(&ia->ia_ifa);
806 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
807 /*
808 * in_scrubprefix() kills the interface route.
809 */
810 in_scrubprefix(ia, 0);
811 /*
812 * in_ifadown gets rid of all the rest of the
813 * routes. This is not quite the right thing
814 * to do, but at least if we are running a
815 * routing process they will come back.
816 */
817 in_ifadown(&ia->ia_ifa, 0);
818 ifa_free(&ia->ia_ifa);
819 break;
820 }
821 }
822 if (ia == NULL) /* If ia matched, already unlocked. */
823 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
824 break;
825
826 case PRC_IFUP:
827 IN_IFADDR_RLOCK(&in_ifa_tracker);
828 CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
829 if (ia->ia_ifa.ifa_addr == sa)
830 break;
831 }
832 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
833 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
834 return;
835 }
836 ifa_ref(&ia->ia_ifa);
837 IN_IFADDR_RUNLOCK(&in_ifa_tracker);
838 flags = RTF_UP;
839 ifp = ia->ia_ifa.ifa_ifp;
840
841 if ((ifp->if_flags & IFF_LOOPBACK)
842 || (ifp->if_flags & IFF_POINTOPOINT))
843 flags |= RTF_HOST;
844
845 err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
846
847 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
848 if (err == 0)
849 ia->ia_flags |= IFA_ROUTE;
850
851 err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
852
853 ifa_free(&ia->ia_ifa);
854 break;
855 }
856 }
857
858 static int
859 rip_attach(struct socket *so, int proto, struct thread *td)
860 {
861 struct inpcb *inp;
862 int error;
863
864 inp = sotoinpcb(so);
865 KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
866
867 error = priv_check(td, PRIV_NETINET_RAW);
868 if (error)
869 return (error);
870 if (proto >= IPPROTO_MAX || proto < 0)
871 return EPROTONOSUPPORT;
872 error = soreserve(so, rip_sendspace, rip_recvspace);
873 if (error)
874 return (error);
875 INP_INFO_WLOCK(&V_ripcbinfo);
876 error = in_pcballoc(so, &V_ripcbinfo);
877 if (error) {
878 INP_INFO_WUNLOCK(&V_ripcbinfo);
879 return (error);
880 }
881 inp = (struct inpcb *)so->so_pcb;
882 inp->inp_vflag |= INP_IPV4;
883 inp->inp_ip_p = proto;
884 inp->inp_ip_ttl = V_ip_defttl;
885 rip_inshash(inp);
886 INP_INFO_WUNLOCK(&V_ripcbinfo);
887 INP_WUNLOCK(inp);
888 return (0);
889 }
890
891 static void
892 rip_detach(struct socket *so)
893 {
894 struct inpcb *inp;
895
896 inp = sotoinpcb(so);
897 KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
898 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
899 ("rip_detach: not closed"));
900
901 INP_INFO_WLOCK(&V_ripcbinfo);
902 INP_WLOCK(inp);
903 rip_delhash(inp);
904 if (so == V_ip_mrouter && ip_mrouter_done)
905 ip_mrouter_done();
906 if (ip_rsvp_force_done)
907 ip_rsvp_force_done(so);
908 if (so == V_ip_rsvpd)
909 ip_rsvp_done();
910 in_pcbdetach(inp);
911 in_pcbfree(inp);
912 INP_INFO_WUNLOCK(&V_ripcbinfo);
913 }
914
915 static void
916 rip_dodisconnect(struct socket *so, struct inpcb *inp)
917 {
918 struct inpcbinfo *pcbinfo;
919
920 pcbinfo = inp->inp_pcbinfo;
921 INP_INFO_WLOCK(pcbinfo);
922 INP_WLOCK(inp);
923 rip_delhash(inp);
924 inp->inp_faddr.s_addr = INADDR_ANY;
925 rip_inshash(inp);
926 SOCK_LOCK(so);
927 so->so_state &= ~SS_ISCONNECTED;
928 SOCK_UNLOCK(so);
929 INP_WUNLOCK(inp);
930 INP_INFO_WUNLOCK(pcbinfo);
931 }
932
933 static void
934 rip_abort(struct socket *so)
935 {
936 struct inpcb *inp;
937
938 inp = sotoinpcb(so);
939 KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
940
941 rip_dodisconnect(so, inp);
942 }
943
944 static void
945 rip_close(struct socket *so)
946 {
947 struct inpcb *inp;
948
949 inp = sotoinpcb(so);
950 KASSERT(inp != NULL, ("rip_close: inp == NULL"));
951
952 rip_dodisconnect(so, inp);
953 }
954
955 static int
956 rip_disconnect(struct socket *so)
957 {
958 struct inpcb *inp;
959
960 if ((so->so_state & SS_ISCONNECTED) == 0)
961 return (ENOTCONN);
962
963 inp = sotoinpcb(so);
964 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
965
966 rip_dodisconnect(so, inp);
967 return (0);
968 }
969
970 static int
971 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
972 {
973 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
974 struct inpcb *inp;
975 int error;
976
977 if (nam->sa_len != sizeof(*addr))
978 return (EINVAL);
979
980 error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
981 if (error != 0)
982 return (error);
983
984 inp = sotoinpcb(so);
985 KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
986
987 if (CK_STAILQ_EMPTY(&V_ifnet) ||
988 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
989 (addr->sin_addr.s_addr &&
990 (inp->inp_flags & INP_BINDANY) == 0 &&
991 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
992 return (EADDRNOTAVAIL);
993
994 INP_INFO_WLOCK(&V_ripcbinfo);
995 INP_WLOCK(inp);
996 rip_delhash(inp);
997 inp->inp_laddr = addr->sin_addr;
998 rip_inshash(inp);
999 INP_WUNLOCK(inp);
1000 INP_INFO_WUNLOCK(&V_ripcbinfo);
1001 return (0);
1002 }
1003
1004 static int
1005 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1006 {
1007 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1008 struct inpcb *inp;
1009
1010 if (nam->sa_len != sizeof(*addr))
1011 return (EINVAL);
1012 if (CK_STAILQ_EMPTY(&V_ifnet))
1013 return (EADDRNOTAVAIL);
1014 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
1015 return (EAFNOSUPPORT);
1016
1017 inp = sotoinpcb(so);
1018 KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
1019
1020 INP_INFO_WLOCK(&V_ripcbinfo);
1021 INP_WLOCK(inp);
1022 rip_delhash(inp);
1023 inp->inp_faddr = addr->sin_addr;
1024 rip_inshash(inp);
1025 soisconnected(so);
1026 INP_WUNLOCK(inp);
1027 INP_INFO_WUNLOCK(&V_ripcbinfo);
1028 return (0);
1029 }
1030
1031 static int
1032 rip_shutdown(struct socket *so)
1033 {
1034 struct inpcb *inp;
1035
1036 inp = sotoinpcb(so);
1037 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1038
1039 INP_WLOCK(inp);
1040 socantsendmore(so);
1041 INP_WUNLOCK(inp);
1042 return (0);
1043 }
1044
1045 static int
1046 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1047 struct mbuf *control, struct thread *td)
1048 {
1049 struct inpcb *inp;
1050 u_long dst;
1051
1052 inp = sotoinpcb(so);
1053 KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1054
1055 /*
1056 * Note: 'dst' reads below are unlocked.
1057 */
1058 if (so->so_state & SS_ISCONNECTED) {
1059 if (nam) {
1060 m_freem(m);
1061 return (EISCONN);
1062 }
1063 dst = inp->inp_faddr.s_addr; /* Unlocked read. */
1064 } else {
1065 if (nam == NULL) {
1066 m_freem(m);
1067 return (ENOTCONN);
1068 }
1069 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1070 }
1071 return (rip_output(m, so, dst));
1072 }
1073 #endif /* INET */
1074
1075 static int
1076 rip_pcblist(SYSCTL_HANDLER_ARGS)
1077 {
1078 int error, i, n;
1079 struct inpcb *inp, **inp_list;
1080 inp_gen_t gencnt;
1081 struct xinpgen xig;
1082 struct epoch_tracker et;
1083
1084 /*
1085 * The process of preparing the TCB list is too time-consuming and
1086 * resource-intensive to repeat twice on every request.
1087 */
1088 if (req->oldptr == 0) {
1089 n = V_ripcbinfo.ipi_count;
1090 n += imax(n / 8, 10);
1091 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1092 return (0);
1093 }
1094
1095 if (req->newptr != 0)
1096 return (EPERM);
1097
1098 /*
1099 * OK, now we're committed to doing something.
1100 */
1101 INP_INFO_WLOCK(&V_ripcbinfo);
1102 gencnt = V_ripcbinfo.ipi_gencnt;
1103 n = V_ripcbinfo.ipi_count;
1104 INP_INFO_WUNLOCK(&V_ripcbinfo);
1105
1106 bzero(&xig, sizeof(xig));
1107 xig.xig_len = sizeof xig;
1108 xig.xig_count = n;
1109 xig.xig_gen = gencnt;
1110 xig.xig_sogen = so_gencnt;
1111 error = SYSCTL_OUT(req, &xig, sizeof xig);
1112 if (error)
1113 return (error);
1114
1115 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1116
1117 INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1118 for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1119 inp = CK_LIST_NEXT(inp, inp_list)) {
1120 INP_WLOCK(inp);
1121 if (inp->inp_gencnt <= gencnt &&
1122 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1123 in_pcbref(inp);
1124 inp_list[i++] = inp;
1125 }
1126 INP_WUNLOCK(inp);
1127 }
1128 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1129 n = i;
1130
1131 error = 0;
1132 for (i = 0; i < n; i++) {
1133 inp = inp_list[i];
1134 INP_RLOCK(inp);
1135 if (inp->inp_gencnt <= gencnt) {
1136 struct xinpcb xi;
1137
1138 in_pcbtoxinpcb(inp, &xi);
1139 INP_RUNLOCK(inp);
1140 error = SYSCTL_OUT(req, &xi, sizeof xi);
1141 } else
1142 INP_RUNLOCK(inp);
1143 }
1144 INP_INFO_WLOCK(&V_ripcbinfo);
1145 for (i = 0; i < n; i++) {
1146 inp = inp_list[i];
1147 INP_RLOCK(inp);
1148 if (!in_pcbrele_rlocked(inp))
1149 INP_RUNLOCK(inp);
1150 }
1151 INP_INFO_WUNLOCK(&V_ripcbinfo);
1152
1153 if (!error) {
1154 struct epoch_tracker et;
1155 /*
1156 * Give the user an updated idea of our state. If the
1157 * generation differs from what we told her before, she knows
1158 * that something happened while we were processing this
1159 * request, and it might be necessary to retry.
1160 */
1161 INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1162 xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1163 xig.xig_sogen = so_gencnt;
1164 xig.xig_count = V_ripcbinfo.ipi_count;
1165 INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1166 error = SYSCTL_OUT(req, &xig, sizeof xig);
1167 }
1168 free(inp_list, M_TEMP);
1169 return (error);
1170 }
1171
1172 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1173 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1174 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1175
1176 #ifdef INET
1177 struct pr_usrreqs rip_usrreqs = {
1178 .pru_abort = rip_abort,
1179 .pru_attach = rip_attach,
1180 .pru_bind = rip_bind,
1181 .pru_connect = rip_connect,
1182 .pru_control = in_control,
1183 .pru_detach = rip_detach,
1184 .pru_disconnect = rip_disconnect,
1185 .pru_peeraddr = in_getpeeraddr,
1186 .pru_send = rip_send,
1187 .pru_shutdown = rip_shutdown,
1188 .pru_sockaddr = in_getsockaddr,
1189 .pru_sosetlabel = in_pcbsosetlabel,
1190 .pru_close = rip_close,
1191 };
1192 #endif /* INET */
Cache object: 4b55316e4288a4c864b87f416c868d13
|