FreeBSD/Linux Kernel Cross Reference
sys/netinet/raw_ip.c
1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.4/sys/netinet/raw_ip.c 231391 2012-02-10 17:49:14Z tuexen $");
35
36 #include "opt_inet6.h"
37 #include "opt_ipsec.h"
38
39 #include <sys/param.h>
40 #include <sys/jail.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/priv.h>
46 #include <sys/proc.h>
47 #include <sys/protosw.h>
48 #include <sys/rwlock.h>
49 #include <sys/signalvar.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sx.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55
56 #include <vm/uma.h>
57
58 #include <net/if.h>
59 #include <net/route.h>
60 #include <net/vnet.h>
61
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/in_var.h>
66 #include <netinet/if_ether.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip_var.h>
69 #include <netinet/ip_mroute.h>
70
71 #ifdef IPSEC
72 #include <netipsec/ipsec.h>
73 #endif /*IPSEC*/
74
75 #include <security/mac/mac_framework.h>
76
77 VNET_DEFINE(struct inpcbhead, ripcb);
78 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
79
80 #define V_ripcb VNET(ripcb)
81 #define V_ripcbinfo VNET(ripcbinfo)
82
83 /*
84 * Control and data hooks for ipfw, dummynet, divert and so on.
85 * The data hooks are not used here but it is convenient
86 * to keep them all in one place.
87 */
88 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
89 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
90
91 int (*ip_dn_ctl_ptr)(struct sockopt *);
92 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
93 void (*ip_divert_ptr)(struct mbuf *, int);
94 int (*ng_ipfw_input_p)(struct mbuf **, int,
95 struct ip_fw_args *, int);
96
97 /*
98 * Hooks for multicast routing. They all default to NULL, so leave them not
99 * initialized and rely on BSS being set to 0.
100 */
101
102 /*
103 * The socket used to communicate with the multicast routing daemon.
104 */
105 VNET_DEFINE(struct socket *, ip_mrouter);
106
107 /*
108 * The various mrouter and rsvp functions.
109 */
110 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
111 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
112 int (*ip_mrouter_done)(void);
113 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
114 struct ip_moptions *);
115 int (*mrt_ioctl)(u_long, caddr_t, int);
116 int (*legal_vif_num)(int);
117 u_long (*ip_mcast_src)(int);
118
119 void (*rsvp_input_p)(struct mbuf *m, int off);
120 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
121 void (*ip_rsvp_force_done)(struct socket *);
122
123 /*
124 * Hash functions
125 */
126
127 #define INP_PCBHASH_RAW_SIZE 256
128 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
129 (((proto) + (laddr) + (faddr)) % (mask) + 1)
130
131 static void
132 rip_inshash(struct inpcb *inp)
133 {
134 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
135 struct inpcbhead *pcbhash;
136 int hash;
137
138 INP_INFO_WLOCK_ASSERT(pcbinfo);
139 INP_WLOCK_ASSERT(inp);
140
141 if (inp->inp_ip_p != 0 &&
142 inp->inp_laddr.s_addr != INADDR_ANY &&
143 inp->inp_faddr.s_addr != INADDR_ANY) {
144 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
145 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
146 } else
147 hash = 0;
148 pcbhash = &pcbinfo->ipi_hashbase[hash];
149 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
150 }
151
152 static void
153 rip_delhash(struct inpcb *inp)
154 {
155
156 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
157 INP_WLOCK_ASSERT(inp);
158
159 LIST_REMOVE(inp, inp_hash);
160 }
161
162 /*
163 * Raw interface to IP protocol.
164 */
165
166 /*
167 * Initialize raw connection block q.
168 */
169 static void
170 rip_zone_change(void *tag)
171 {
172
173 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
174 }
175
176 static int
177 rip_inpcb_init(void *mem, int size, int flags)
178 {
179 struct inpcb *inp = mem;
180
181 INP_LOCK_INIT(inp, "inp", "rawinp");
182 return (0);
183 }
184
185 void
186 rip_init(void)
187 {
188
189 INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
190 LIST_INIT(&V_ripcb);
191 #ifdef VIMAGE
192 V_ripcbinfo.ipi_vnet = curvnet;
193 #endif
194 V_ripcbinfo.ipi_listhead = &V_ripcb;
195 V_ripcbinfo.ipi_hashbase =
196 hashinit(INP_PCBHASH_RAW_SIZE, M_PCB, &V_ripcbinfo.ipi_hashmask);
197 V_ripcbinfo.ipi_porthashbase =
198 hashinit(1, M_PCB, &V_ripcbinfo.ipi_porthashmask);
199 V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
200 NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
201 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
202 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
203 EVENTHANDLER_PRI_ANY);
204 }
205
206 #ifdef VIMAGE
207 void
208 rip_destroy(void)
209 {
210
211 hashdestroy(V_ripcbinfo.ipi_hashbase, M_PCB,
212 V_ripcbinfo.ipi_hashmask);
213 hashdestroy(V_ripcbinfo.ipi_porthashbase, M_PCB,
214 V_ripcbinfo.ipi_porthashmask);
215 }
216 #endif
217
218 static int
219 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
220 struct sockaddr_in *ripsrc)
221 {
222 int policyfail = 0;
223
224 INP_RLOCK_ASSERT(last);
225
226 #ifdef IPSEC
227 /* check AH/ESP integrity. */
228 if (ipsec4_in_reject(n, last)) {
229 policyfail = 1;
230 }
231 #endif /* IPSEC */
232 #ifdef MAC
233 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
234 policyfail = 1;
235 #endif
236 /* Check the minimum TTL for socket. */
237 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
238 policyfail = 1;
239 if (!policyfail) {
240 struct mbuf *opts = NULL;
241 struct socket *so;
242
243 so = last->inp_socket;
244 if ((last->inp_flags & INP_CONTROLOPTS) ||
245 (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
246 ip_savecontrol(last, &opts, ip, n);
247 SOCKBUF_LOCK(&so->so_rcv);
248 if (sbappendaddr_locked(&so->so_rcv,
249 (struct sockaddr *)ripsrc, n, opts) == 0) {
250 /* should notify about lost packet */
251 m_freem(n);
252 if (opts)
253 m_freem(opts);
254 SOCKBUF_UNLOCK(&so->so_rcv);
255 } else
256 sorwakeup_locked(so);
257 } else
258 m_freem(n);
259 return (policyfail);
260 }
261
262 /*
263 * Setup generic address and protocol structures for raw_input routine, then
264 * pass them along with mbuf chain.
265 */
266 void
267 rip_input(struct mbuf *m, int off)
268 {
269 struct ifnet *ifp;
270 struct ip *ip = mtod(m, struct ip *);
271 int proto = ip->ip_p;
272 struct inpcb *inp, *last;
273 struct sockaddr_in ripsrc;
274 int hash;
275
276 bzero(&ripsrc, sizeof(ripsrc));
277 ripsrc.sin_len = sizeof(ripsrc);
278 ripsrc.sin_family = AF_INET;
279 ripsrc.sin_addr = ip->ip_src;
280 last = NULL;
281
282 ifp = m->m_pkthdr.rcvif;
283
284 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
285 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
286 INP_INFO_RLOCK(&V_ripcbinfo);
287 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
288 if (inp->inp_ip_p != proto)
289 continue;
290 #ifdef INET6
291 /* XXX inp locking */
292 if ((inp->inp_vflag & INP_IPV4) == 0)
293 continue;
294 #endif
295 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
296 continue;
297 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
298 continue;
299 if (jailed_without_vnet(inp->inp_cred)) {
300 /*
301 * XXX: If faddr was bound to multicast group,
302 * jailed raw socket will drop datagram.
303 */
304 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
305 continue;
306 }
307 if (last != NULL) {
308 struct mbuf *n;
309
310 n = m_copy(m, 0, (int)M_COPYALL);
311 if (n != NULL)
312 (void) rip_append(last, ip, n, &ripsrc);
313 /* XXX count dropped packet */
314 INP_RUNLOCK(last);
315 }
316 INP_RLOCK(inp);
317 last = inp;
318 }
319 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
320 if (inp->inp_ip_p && inp->inp_ip_p != proto)
321 continue;
322 #ifdef INET6
323 /* XXX inp locking */
324 if ((inp->inp_vflag & INP_IPV4) == 0)
325 continue;
326 #endif
327 if (!in_nullhost(inp->inp_laddr) &&
328 !in_hosteq(inp->inp_laddr, ip->ip_dst))
329 continue;
330 if (!in_nullhost(inp->inp_faddr) &&
331 !in_hosteq(inp->inp_faddr, ip->ip_src))
332 continue;
333 if (jailed_without_vnet(inp->inp_cred)) {
334 /*
335 * Allow raw socket in jail to receive multicast;
336 * assume process had PRIV_NETINET_RAW at attach,
337 * and fall through into normal filter path if so.
338 */
339 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
340 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
341 continue;
342 }
343 /*
344 * If this raw socket has multicast state, and we
345 * have received a multicast, check if this socket
346 * should receive it, as multicast filtering is now
347 * the responsibility of the transport layer.
348 */
349 if (inp->inp_moptions != NULL &&
350 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
351 /*
352 * If the incoming datagram is for IGMP, allow it
353 * through unconditionally to the raw socket.
354 *
355 * In the case of IGMPv2, we may not have explicitly
356 * joined the group, and may have set IFF_ALLMULTI
357 * on the interface. imo_multi_filter() may discard
358 * control traffic we actually need to see.
359 *
360 * Userland multicast routing daemons should continue
361 * filter the control traffic appropriately.
362 */
363 int blocked;
364
365 blocked = MCAST_PASS;
366 if (proto != IPPROTO_IGMP) {
367 struct sockaddr_in group;
368
369 bzero(&group, sizeof(struct sockaddr_in));
370 group.sin_len = sizeof(struct sockaddr_in);
371 group.sin_family = AF_INET;
372 group.sin_addr = ip->ip_dst;
373
374 blocked = imo_multi_filter(inp->inp_moptions,
375 ifp,
376 (struct sockaddr *)&group,
377 (struct sockaddr *)&ripsrc);
378 }
379
380 if (blocked != MCAST_PASS) {
381 IPSTAT_INC(ips_notmember);
382 continue;
383 }
384 }
385 if (last != NULL) {
386 struct mbuf *n;
387
388 n = m_copy(m, 0, (int)M_COPYALL);
389 if (n != NULL)
390 (void) rip_append(last, ip, n, &ripsrc);
391 /* XXX count dropped packet */
392 INP_RUNLOCK(last);
393 }
394 INP_RLOCK(inp);
395 last = inp;
396 }
397 INP_INFO_RUNLOCK(&V_ripcbinfo);
398 if (last != NULL) {
399 if (rip_append(last, ip, m, &ripsrc) != 0)
400 IPSTAT_INC(ips_delivered);
401 INP_RUNLOCK(last);
402 } else {
403 m_freem(m);
404 IPSTAT_INC(ips_noproto);
405 IPSTAT_DEC(ips_delivered);
406 }
407 }
408
409 /*
410 * Generate IP header and pass packet to ip_output. Tack on options user may
411 * have setup with control call.
412 */
413 int
414 rip_output(struct mbuf *m, struct socket *so, u_long dst)
415 {
416 struct ip *ip;
417 int error;
418 struct inpcb *inp = sotoinpcb(so);
419 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
420 IP_ALLOWBROADCAST;
421
422 /*
423 * If the user handed us a complete IP packet, use it. Otherwise,
424 * allocate an mbuf for a header and fill it in.
425 */
426 if ((inp->inp_flags & INP_HDRINCL) == 0) {
427 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
428 m_freem(m);
429 return(EMSGSIZE);
430 }
431 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
432 if (m == NULL)
433 return(ENOBUFS);
434
435 INP_RLOCK(inp);
436 ip = mtod(m, struct ip *);
437 ip->ip_tos = inp->inp_ip_tos;
438 if (inp->inp_flags & INP_DONTFRAG)
439 ip->ip_off = IP_DF;
440 else
441 ip->ip_off = 0;
442 ip->ip_p = inp->inp_ip_p;
443 ip->ip_len = m->m_pkthdr.len;
444 ip->ip_src = inp->inp_laddr;
445 if (jailed(inp->inp_cred)) {
446 /*
447 * prison_local_ip4() would be good enough but would
448 * let a source of INADDR_ANY pass, which we do not
449 * want to see from jails. We do not go through the
450 * pain of in_pcbladdr() for raw sockets.
451 */
452 if (ip->ip_src.s_addr == INADDR_ANY)
453 error = prison_get_ip4(inp->inp_cred,
454 &ip->ip_src);
455 else
456 error = prison_local_ip4(inp->inp_cred,
457 &ip->ip_src);
458 if (error != 0) {
459 INP_RUNLOCK(inp);
460 m_freem(m);
461 return (error);
462 }
463 }
464 ip->ip_dst.s_addr = dst;
465 ip->ip_ttl = inp->inp_ip_ttl;
466 } else {
467 if (m->m_pkthdr.len > IP_MAXPACKET) {
468 m_freem(m);
469 return(EMSGSIZE);
470 }
471 INP_RLOCK(inp);
472 ip = mtod(m, struct ip *);
473 error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
474 if (error != 0) {
475 INP_RUNLOCK(inp);
476 m_freem(m);
477 return (error);
478 }
479
480 /*
481 * Don't allow both user specified and setsockopt options,
482 * and don't allow packet length sizes that will crash.
483 */
484 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
485 || (ip->ip_len > m->m_pkthdr.len)
486 || (ip->ip_len < (ip->ip_hl << 2))) {
487 INP_RUNLOCK(inp);
488 m_freem(m);
489 return (EINVAL);
490 }
491 if (ip->ip_id == 0)
492 ip->ip_id = ip_newid();
493
494 /*
495 * XXX prevent ip_output from overwriting header fields.
496 */
497 flags |= IP_RAWOUTPUT;
498 IPSTAT_INC(ips_rawout);
499 }
500
501 if (inp->inp_flags & INP_ONESBCAST)
502 flags |= IP_SENDONES;
503
504 #ifdef MAC
505 mac_inpcb_create_mbuf(inp, m);
506 #endif
507
508 error = ip_output(m, inp->inp_options, NULL, flags,
509 inp->inp_moptions, inp);
510 INP_RUNLOCK(inp);
511 return (error);
512 }
513
514 /*
515 * Raw IP socket option processing.
516 *
517 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
518 * only be created by a privileged process, and as such, socket option
519 * operations to manage system properties on any raw socket were allowed to
520 * take place without explicit additional access control checks. However,
521 * raw sockets can now also be created in jail(), and therefore explicit
522 * checks are now required. Likewise, raw sockets can be used by a process
523 * after it gives up privilege, so some caution is required. For options
524 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
525 * performed in ip_ctloutput() and therefore no check occurs here.
526 * Unilaterally checking priv_check() here breaks normal IP socket option
527 * operations on raw sockets.
528 *
529 * When adding new socket options here, make sure to add access control
530 * checks here as necessary.
531 */
532 int
533 rip_ctloutput(struct socket *so, struct sockopt *sopt)
534 {
535 struct inpcb *inp = sotoinpcb(so);
536 int error, optval;
537
538 if (sopt->sopt_level != IPPROTO_IP) {
539 if ((sopt->sopt_level == SOL_SOCKET) &&
540 (sopt->sopt_name == SO_SETFIB)) {
541 inp->inp_inc.inc_fibnum = so->so_fibnum;
542 return (0);
543 }
544 return (EINVAL);
545 }
546
547 error = 0;
548 switch (sopt->sopt_dir) {
549 case SOPT_GET:
550 switch (sopt->sopt_name) {
551 case IP_HDRINCL:
552 optval = inp->inp_flags & INP_HDRINCL;
553 error = sooptcopyout(sopt, &optval, sizeof optval);
554 break;
555
556 case IP_FW3: /* generic ipfw v.3 functions */
557 case IP_FW_ADD: /* ADD actually returns the body... */
558 case IP_FW_GET:
559 case IP_FW_TABLE_GETSIZE:
560 case IP_FW_TABLE_LIST:
561 case IP_FW_NAT_GET_CONFIG:
562 case IP_FW_NAT_GET_LOG:
563 if (V_ip_fw_ctl_ptr != NULL)
564 error = V_ip_fw_ctl_ptr(sopt);
565 else
566 error = ENOPROTOOPT;
567 break;
568
569 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
570 case IP_DUMMYNET_GET:
571 if (ip_dn_ctl_ptr != NULL)
572 error = ip_dn_ctl_ptr(sopt);
573 else
574 error = ENOPROTOOPT;
575 break ;
576
577 case MRT_INIT:
578 case MRT_DONE:
579 case MRT_ADD_VIF:
580 case MRT_DEL_VIF:
581 case MRT_ADD_MFC:
582 case MRT_DEL_MFC:
583 case MRT_VERSION:
584 case MRT_ASSERT:
585 case MRT_API_SUPPORT:
586 case MRT_API_CONFIG:
587 case MRT_ADD_BW_UPCALL:
588 case MRT_DEL_BW_UPCALL:
589 error = priv_check(curthread, PRIV_NETINET_MROUTE);
590 if (error != 0)
591 return (error);
592 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
593 EOPNOTSUPP;
594 break;
595
596 default:
597 error = ip_ctloutput(so, sopt);
598 break;
599 }
600 break;
601
602 case SOPT_SET:
603 switch (sopt->sopt_name) {
604 case IP_HDRINCL:
605 error = sooptcopyin(sopt, &optval, sizeof optval,
606 sizeof optval);
607 if (error)
608 break;
609 if (optval)
610 inp->inp_flags |= INP_HDRINCL;
611 else
612 inp->inp_flags &= ~INP_HDRINCL;
613 break;
614
615 case IP_FW3: /* generic ipfw v.3 functions */
616 case IP_FW_ADD:
617 case IP_FW_DEL:
618 case IP_FW_FLUSH:
619 case IP_FW_ZERO:
620 case IP_FW_RESETLOG:
621 case IP_FW_TABLE_ADD:
622 case IP_FW_TABLE_DEL:
623 case IP_FW_TABLE_FLUSH:
624 case IP_FW_NAT_CFG:
625 case IP_FW_NAT_DEL:
626 if (V_ip_fw_ctl_ptr != NULL)
627 error = V_ip_fw_ctl_ptr(sopt);
628 else
629 error = ENOPROTOOPT;
630 break;
631
632 case IP_DUMMYNET3: /* generic dummynet v.3 functions */
633 case IP_DUMMYNET_CONFIGURE:
634 case IP_DUMMYNET_DEL:
635 case IP_DUMMYNET_FLUSH:
636 if (ip_dn_ctl_ptr != NULL)
637 error = ip_dn_ctl_ptr(sopt);
638 else
639 error = ENOPROTOOPT ;
640 break ;
641
642 case IP_RSVP_ON:
643 error = priv_check(curthread, PRIV_NETINET_MROUTE);
644 if (error != 0)
645 return (error);
646 error = ip_rsvp_init(so);
647 break;
648
649 case IP_RSVP_OFF:
650 error = priv_check(curthread, PRIV_NETINET_MROUTE);
651 if (error != 0)
652 return (error);
653 error = ip_rsvp_done();
654 break;
655
656 case IP_RSVP_VIF_ON:
657 case IP_RSVP_VIF_OFF:
658 error = priv_check(curthread, PRIV_NETINET_MROUTE);
659 if (error != 0)
660 return (error);
661 error = ip_rsvp_vif ?
662 ip_rsvp_vif(so, sopt) : EINVAL;
663 break;
664
665 case MRT_INIT:
666 case MRT_DONE:
667 case MRT_ADD_VIF:
668 case MRT_DEL_VIF:
669 case MRT_ADD_MFC:
670 case MRT_DEL_MFC:
671 case MRT_VERSION:
672 case MRT_ASSERT:
673 case MRT_API_SUPPORT:
674 case MRT_API_CONFIG:
675 case MRT_ADD_BW_UPCALL:
676 case MRT_DEL_BW_UPCALL:
677 error = priv_check(curthread, PRIV_NETINET_MROUTE);
678 if (error != 0)
679 return (error);
680 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
681 EOPNOTSUPP;
682 break;
683
684 default:
685 error = ip_ctloutput(so, sopt);
686 break;
687 }
688 break;
689 }
690
691 return (error);
692 }
693
694 /*
695 * This function exists solely to receive the PRC_IFDOWN messages which are
696 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls
697 * in_ifadown() to remove all routes corresponding to that address. It also
698 * receives the PRC_IFUP messages from if_up() and reinstalls the interface
699 * routes.
700 */
701 void
702 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
703 {
704 struct in_ifaddr *ia;
705 struct ifnet *ifp;
706 int err;
707 int flags;
708
709 switch (cmd) {
710 case PRC_IFDOWN:
711 IN_IFADDR_RLOCK();
712 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
713 if (ia->ia_ifa.ifa_addr == sa
714 && (ia->ia_flags & IFA_ROUTE)) {
715 ifa_ref(&ia->ia_ifa);
716 IN_IFADDR_RUNLOCK();
717 /*
718 * in_ifscrub kills the interface route.
719 */
720 in_ifscrub(ia->ia_ifp, ia, 0);
721 /*
722 * in_ifadown gets rid of all the rest of the
723 * routes. This is not quite the right thing
724 * to do, but at least if we are running a
725 * routing process they will come back.
726 */
727 in_ifadown(&ia->ia_ifa, 0);
728 ifa_free(&ia->ia_ifa);
729 break;
730 }
731 }
732 if (ia == NULL) /* If ia matched, already unlocked. */
733 IN_IFADDR_RUNLOCK();
734 break;
735
736 case PRC_IFUP:
737 IN_IFADDR_RLOCK();
738 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
739 if (ia->ia_ifa.ifa_addr == sa)
740 break;
741 }
742 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
743 IN_IFADDR_RUNLOCK();
744 return;
745 }
746 ifa_ref(&ia->ia_ifa);
747 IN_IFADDR_RUNLOCK();
748 flags = RTF_UP;
749 ifp = ia->ia_ifa.ifa_ifp;
750
751 if ((ifp->if_flags & IFF_LOOPBACK)
752 || (ifp->if_flags & IFF_POINTOPOINT))
753 flags |= RTF_HOST;
754
755 err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
756 if (err == 0)
757 ia->ia_flags &= ~IFA_RTSELF;
758
759 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
760 if (err == 0)
761 ia->ia_flags |= IFA_ROUTE;
762
763 err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
764 if (err == 0)
765 ia->ia_flags |= IFA_RTSELF;
766
767 ifa_free(&ia->ia_ifa);
768 break;
769 }
770 }
771
772 u_long rip_sendspace = 9216;
773 u_long rip_recvspace = 9216;
774
775 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
776 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
777 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
778 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
779
780 static int
781 rip_attach(struct socket *so, int proto, struct thread *td)
782 {
783 struct inpcb *inp;
784 int error;
785
786 inp = sotoinpcb(so);
787 KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
788
789 error = priv_check(td, PRIV_NETINET_RAW);
790 if (error)
791 return (error);
792 if (proto >= IPPROTO_MAX || proto < 0)
793 return EPROTONOSUPPORT;
794 error = soreserve(so, rip_sendspace, rip_recvspace);
795 if (error)
796 return (error);
797 INP_INFO_WLOCK(&V_ripcbinfo);
798 error = in_pcballoc(so, &V_ripcbinfo);
799 if (error) {
800 INP_INFO_WUNLOCK(&V_ripcbinfo);
801 return (error);
802 }
803 inp = (struct inpcb *)so->so_pcb;
804 inp->inp_vflag |= INP_IPV4;
805 inp->inp_ip_p = proto;
806 inp->inp_ip_ttl = V_ip_defttl;
807 rip_inshash(inp);
808 INP_INFO_WUNLOCK(&V_ripcbinfo);
809 INP_WUNLOCK(inp);
810 return (0);
811 }
812
813 static void
814 rip_detach(struct socket *so)
815 {
816 struct inpcb *inp;
817
818 inp = sotoinpcb(so);
819 KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
820 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
821 ("rip_detach: not closed"));
822
823 INP_INFO_WLOCK(&V_ripcbinfo);
824 INP_WLOCK(inp);
825 rip_delhash(inp);
826 if (so == V_ip_mrouter && ip_mrouter_done)
827 ip_mrouter_done();
828 if (ip_rsvp_force_done)
829 ip_rsvp_force_done(so);
830 if (so == V_ip_rsvpd)
831 ip_rsvp_done();
832 in_pcbdetach(inp);
833 in_pcbfree(inp);
834 INP_INFO_WUNLOCK(&V_ripcbinfo);
835 }
836
837 static void
838 rip_dodisconnect(struct socket *so, struct inpcb *inp)
839 {
840
841 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
842 INP_WLOCK_ASSERT(inp);
843
844 rip_delhash(inp);
845 inp->inp_faddr.s_addr = INADDR_ANY;
846 rip_inshash(inp);
847 SOCK_LOCK(so);
848 so->so_state &= ~SS_ISCONNECTED;
849 SOCK_UNLOCK(so);
850 }
851
852 static void
853 rip_abort(struct socket *so)
854 {
855 struct inpcb *inp;
856
857 inp = sotoinpcb(so);
858 KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
859
860 INP_INFO_WLOCK(&V_ripcbinfo);
861 INP_WLOCK(inp);
862 rip_dodisconnect(so, inp);
863 INP_WUNLOCK(inp);
864 INP_INFO_WUNLOCK(&V_ripcbinfo);
865 }
866
867 static void
868 rip_close(struct socket *so)
869 {
870 struct inpcb *inp;
871
872 inp = sotoinpcb(so);
873 KASSERT(inp != NULL, ("rip_close: inp == NULL"));
874
875 INP_INFO_WLOCK(&V_ripcbinfo);
876 INP_WLOCK(inp);
877 rip_dodisconnect(so, inp);
878 INP_WUNLOCK(inp);
879 INP_INFO_WUNLOCK(&V_ripcbinfo);
880 }
881
882 static int
883 rip_disconnect(struct socket *so)
884 {
885 struct inpcb *inp;
886
887 if ((so->so_state & SS_ISCONNECTED) == 0)
888 return (ENOTCONN);
889
890 inp = sotoinpcb(so);
891 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
892
893 INP_INFO_WLOCK(&V_ripcbinfo);
894 INP_WLOCK(inp);
895 rip_dodisconnect(so, inp);
896 INP_WUNLOCK(inp);
897 INP_INFO_WUNLOCK(&V_ripcbinfo);
898 return (0);
899 }
900
901 static int
902 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
903 {
904 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
905 struct inpcb *inp;
906 int error;
907
908 if (nam->sa_len != sizeof(*addr))
909 return (EINVAL);
910
911 error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
912 if (error != 0)
913 return (error);
914
915 inp = sotoinpcb(so);
916 KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
917
918 if (TAILQ_EMPTY(&V_ifnet) ||
919 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
920 (addr->sin_addr.s_addr &&
921 (inp->inp_flags & INP_BINDANY) == 0 &&
922 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
923 return (EADDRNOTAVAIL);
924
925 INP_INFO_WLOCK(&V_ripcbinfo);
926 INP_WLOCK(inp);
927 rip_delhash(inp);
928 inp->inp_laddr = addr->sin_addr;
929 rip_inshash(inp);
930 INP_WUNLOCK(inp);
931 INP_INFO_WUNLOCK(&V_ripcbinfo);
932 return (0);
933 }
934
935 static int
936 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
937 {
938 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
939 struct inpcb *inp;
940
941 if (nam->sa_len != sizeof(*addr))
942 return (EINVAL);
943 if (TAILQ_EMPTY(&V_ifnet))
944 return (EADDRNOTAVAIL);
945 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
946 return (EAFNOSUPPORT);
947
948 inp = sotoinpcb(so);
949 KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
950
951 INP_INFO_WLOCK(&V_ripcbinfo);
952 INP_WLOCK(inp);
953 rip_delhash(inp);
954 inp->inp_faddr = addr->sin_addr;
955 rip_inshash(inp);
956 soisconnected(so);
957 INP_WUNLOCK(inp);
958 INP_INFO_WUNLOCK(&V_ripcbinfo);
959 return (0);
960 }
961
962 static int
963 rip_shutdown(struct socket *so)
964 {
965 struct inpcb *inp;
966
967 inp = sotoinpcb(so);
968 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
969
970 INP_WLOCK(inp);
971 socantsendmore(so);
972 INP_WUNLOCK(inp);
973 return (0);
974 }
975
976 static int
977 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
978 struct mbuf *control, struct thread *td)
979 {
980 struct inpcb *inp;
981 u_long dst;
982
983 inp = sotoinpcb(so);
984 KASSERT(inp != NULL, ("rip_send: inp == NULL"));
985
986 /*
987 * Note: 'dst' reads below are unlocked.
988 */
989 if (so->so_state & SS_ISCONNECTED) {
990 if (nam) {
991 m_freem(m);
992 return (EISCONN);
993 }
994 dst = inp->inp_faddr.s_addr; /* Unlocked read. */
995 } else {
996 if (nam == NULL) {
997 m_freem(m);
998 return (ENOTCONN);
999 }
1000 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1001 }
1002 return (rip_output(m, so, dst));
1003 }
1004
1005 static int
1006 rip_pcblist(SYSCTL_HANDLER_ARGS)
1007 {
1008 int error, i, n;
1009 struct inpcb *inp, **inp_list;
1010 inp_gen_t gencnt;
1011 struct xinpgen xig;
1012
1013 /*
1014 * The process of preparing the TCB list is too time-consuming and
1015 * resource-intensive to repeat twice on every request.
1016 */
1017 if (req->oldptr == 0) {
1018 n = V_ripcbinfo.ipi_count;
1019 n += imax(n / 8, 10);
1020 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1021 return (0);
1022 }
1023
1024 if (req->newptr != 0)
1025 return (EPERM);
1026
1027 /*
1028 * OK, now we're committed to doing something.
1029 */
1030 INP_INFO_RLOCK(&V_ripcbinfo);
1031 gencnt = V_ripcbinfo.ipi_gencnt;
1032 n = V_ripcbinfo.ipi_count;
1033 INP_INFO_RUNLOCK(&V_ripcbinfo);
1034
1035 xig.xig_len = sizeof xig;
1036 xig.xig_count = n;
1037 xig.xig_gen = gencnt;
1038 xig.xig_sogen = so_gencnt;
1039 error = SYSCTL_OUT(req, &xig, sizeof xig);
1040 if (error)
1041 return (error);
1042
1043 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1044 if (inp_list == 0)
1045 return (ENOMEM);
1046
1047 INP_INFO_RLOCK(&V_ripcbinfo);
1048 for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1049 inp = LIST_NEXT(inp, inp_list)) {
1050 INP_WLOCK(inp);
1051 if (inp->inp_gencnt <= gencnt &&
1052 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1053 in_pcbref(inp);
1054 inp_list[i++] = inp;
1055 }
1056 INP_WUNLOCK(inp);
1057 }
1058 INP_INFO_RUNLOCK(&V_ripcbinfo);
1059 n = i;
1060
1061 error = 0;
1062 for (i = 0; i < n; i++) {
1063 inp = inp_list[i];
1064 INP_RLOCK(inp);
1065 if (inp->inp_gencnt <= gencnt) {
1066 struct xinpcb xi;
1067
1068 bzero(&xi, sizeof(xi));
1069 xi.xi_len = sizeof xi;
1070 /* XXX should avoid extra copy */
1071 bcopy(inp, &xi.xi_inp, sizeof *inp);
1072 if (inp->inp_socket)
1073 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1074 INP_RUNLOCK(inp);
1075 error = SYSCTL_OUT(req, &xi, sizeof xi);
1076 } else
1077 INP_RUNLOCK(inp);
1078 }
1079 INP_INFO_WLOCK(&V_ripcbinfo);
1080 for (i = 0; i < n; i++) {
1081 inp = inp_list[i];
1082 INP_WLOCK(inp);
1083 if (!in_pcbrele(inp))
1084 INP_WUNLOCK(inp);
1085 }
1086 INP_INFO_WUNLOCK(&V_ripcbinfo);
1087
1088 if (!error) {
1089 /*
1090 * Give the user an updated idea of our state. If the
1091 * generation differs from what we told her before, she knows
1092 * that something happened while we were processing this
1093 * request, and it might be necessary to retry.
1094 */
1095 INP_INFO_RLOCK(&V_ripcbinfo);
1096 xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1097 xig.xig_sogen = so_gencnt;
1098 xig.xig_count = V_ripcbinfo.ipi_count;
1099 INP_INFO_RUNLOCK(&V_ripcbinfo);
1100 error = SYSCTL_OUT(req, &xig, sizeof xig);
1101 }
1102 free(inp_list, M_TEMP);
1103 return (error);
1104 }
1105
1106 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1107 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1108 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1109
1110 struct pr_usrreqs rip_usrreqs = {
1111 .pru_abort = rip_abort,
1112 .pru_attach = rip_attach,
1113 .pru_bind = rip_bind,
1114 .pru_connect = rip_connect,
1115 .pru_control = in_control,
1116 .pru_detach = rip_detach,
1117 .pru_disconnect = rip_disconnect,
1118 .pru_peeraddr = in_getpeeraddr,
1119 .pru_send = rip_send,
1120 .pru_shutdown = rip_shutdown,
1121 .pru_sockaddr = in_getsockaddr,
1122 .pru_sosetlabel = in_pcbsosetlabel,
1123 .pru_close = rip_close,
1124 };
Cache object: c6a2f65c5db6d7e448905334dba6722a
|