1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2004 The FreeBSD Foundation
5 * Copyright (c) 2004-2008 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
33 */
34
35 /*
36 * Comments on the socket life cycle:
37 *
38 * soalloc() sets of socket layer state for a socket, called only by
39 * socreate() and sonewconn(). Socket layer private.
40 *
41 * sodealloc() tears down socket layer state for a socket, called only by
42 * sofree() and sonewconn(). Socket layer private.
43 *
44 * pru_attach() associates protocol layer state with an allocated socket;
45 * called only once, may fail, aborting socket allocation. This is called
46 * from socreate() and sonewconn(). Socket layer private.
47 *
48 * pru_detach() disassociates protocol layer state from an attached socket,
49 * and will be called exactly once for sockets in which pru_attach() has
50 * been successfully called. If pru_attach() returned an error,
51 * pru_detach() will not be called. Socket layer private.
52 *
53 * pru_abort() and pru_close() notify the protocol layer that the last
54 * consumer of a socket is starting to tear down the socket, and that the
55 * protocol should terminate the connection. Historically, pru_abort() also
56 * detached protocol state from the socket state, but this is no longer the
57 * case.
58 *
59 * socreate() creates a socket and attaches protocol state. This is a public
60 * interface that may be used by socket layer consumers to create new
61 * sockets.
62 *
63 * sonewconn() creates a socket and attaches protocol state. This is a
64 * public interface that may be used by protocols to create new sockets when
65 * a new connection is received and will be available for accept() on a
66 * listen socket.
67 *
68 * soclose() destroys a socket after possibly waiting for it to disconnect.
69 * This is a public interface that socket consumers should use to close and
70 * release a socket when done with it.
71 *
72 * soabort() destroys a socket without waiting for it to disconnect (used
73 * only for incoming connections that are already partially or fully
74 * connected). This is used internally by the socket layer when clearing
75 * listen socket queues (due to overflow or close on the listen socket), but
76 * is also a public interface protocols may use to abort connections in
77 * their incomplete listen queues should they no longer be required. Sockets
78 * placed in completed connection listen queues should not be aborted for
79 * reasons described in the comment above the soclose() implementation. This
80 * is not a general purpose close routine, and except in the specific
81 * circumstances described here, should not be used.
82 *
83 * sofree() will free a socket and its protocol state if all references on
84 * the socket have been released, and is the public interface to attempt to
85 * free a socket when a reference is removed. This is a socket layer private
86 * interface.
87 *
88 * NOTE: In addition to socreate() and soclose(), which provide a single
89 * socket reference to the consumer to be managed as required, there are two
90 * calls to explicitly manage socket references, soref(), and sorele().
91 * Currently, these are generally required only when transitioning a socket
92 * from a listen queue to a file descriptor, in order to prevent garbage
93 * collection of the socket at an untimely moment. For a number of reasons,
94 * these interfaces are not preferred, and should be avoided.
95 *
96 * NOTE: With regard to VNETs the general rule is that callers do not set
97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(),
98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn()
99 * and sorflush(), which are usually called from a pre-set VNET context.
100 * sopoll() currently does not need a VNET context to be set.
101 */
102
103 #include <sys/cdefs.h>
104 __FBSDID("$FreeBSD$");
105
106 #include "opt_inet.h"
107 #include "opt_inet6.h"
108 #include "opt_compat.h"
109
110 #include <sys/param.h>
111 #include <sys/systm.h>
112 #include <sys/fcntl.h>
113 #include <sys/limits.h>
114 #include <sys/lock.h>
115 #include <sys/mac.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/domain.h>
120 #include <sys/file.h> /* for struct knote */
121 #include <sys/hhook.h>
122 #include <sys/kernel.h>
123 #include <sys/khelp.h>
124 #include <sys/event.h>
125 #include <sys/eventhandler.h>
126 #include <sys/poll.h>
127 #include <sys/proc.h>
128 #include <sys/protosw.h>
129 #include <sys/socket.h>
130 #include <sys/socketvar.h>
131 #include <sys/resourcevar.h>
132 #include <net/route.h>
133 #include <sys/signalvar.h>
134 #include <sys/stat.h>
135 #include <sys/sx.h>
136 #include <sys/sysctl.h>
137 #include <sys/taskqueue.h>
138 #include <sys/uio.h>
139 #include <sys/jail.h>
140 #include <sys/syslog.h>
141 #include <netinet/in.h>
142
143 #include <net/vnet.h>
144
145 #include <security/mac/mac_framework.h>
146
147 #include <vm/uma.h>
148
149 #ifdef COMPAT_FREEBSD32
150 #include <sys/mount.h>
151 #include <sys/sysent.h>
152 #include <compat/freebsd32/freebsd32.h>
153 #endif
154
155 static int soreceive_rcvoob(struct socket *so, struct uio *uio,
156 int flags);
157
158 static void filt_sordetach(struct knote *kn);
159 static int filt_soread(struct knote *kn, long hint);
160 static void filt_sowdetach(struct knote *kn);
161 static int filt_sowrite(struct knote *kn, long hint);
162 static int filt_solisten(struct knote *kn, long hint);
163 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id);
164 fo_kqfilter_t soo_kqfilter;
165
166 static struct filterops solisten_filtops = {
167 .f_isfd = 1,
168 .f_detach = filt_sordetach,
169 .f_event = filt_solisten,
170 };
171 static struct filterops soread_filtops = {
172 .f_isfd = 1,
173 .f_detach = filt_sordetach,
174 .f_event = filt_soread,
175 };
176 static struct filterops sowrite_filtops = {
177 .f_isfd = 1,
178 .f_detach = filt_sowdetach,
179 .f_event = filt_sowrite,
180 };
181
182 so_gen_t so_gencnt; /* generation count for sockets */
183
184 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
185 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
186
187 #define VNET_SO_ASSERT(so) \
188 VNET_ASSERT(curvnet != NULL, \
189 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
190
191 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]);
192 #define V_socket_hhh VNET(socket_hhh)
193
194 /*
195 * Limit on the number of connections in the listen queue waiting
196 * for accept(2).
197 * NB: The original sysctl somaxconn is still available but hidden
198 * to prevent confusion about the actual purpose of this number.
199 */
200 static u_int somaxconn = SOMAXCONN;
201
202 static int
203 sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
204 {
205 int error;
206 int val;
207
208 val = somaxconn;
209 error = sysctl_handle_int(oidp, &val, 0, req);
210 if (error || !req->newptr )
211 return (error);
212
213 /*
214 * The purpose of the UINT_MAX / 3 limit, is so that the formula
215 * 3 * so_qlimit / 2
216 * below, will not overflow.
217 */
218
219 if (val < 1 || val > UINT_MAX / 3)
220 return (EINVAL);
221
222 somaxconn = val;
223 return (0);
224 }
225 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW,
226 0, sizeof(int), sysctl_somaxconn, "I",
227 "Maximum listen socket pending connection accept queue size");
228 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn,
229 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP,
230 0, sizeof(int), sysctl_somaxconn, "I",
231 "Maximum listen socket pending connection accept queue size (compat)");
232
233 static int numopensockets;
234 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
235 &numopensockets, 0, "Number of open sockets");
236
237 /*
238 * accept_mtx locks down per-socket fields relating to accept queues. See
239 * socketvar.h for an annotation of the protected fields of struct socket.
240 */
241 struct mtx accept_mtx;
242 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
243
244 /*
245 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
246 * so_gencnt field.
247 */
248 static struct mtx so_global_mtx;
249 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
250
251 /*
252 * General IPC sysctl name space, used by sockets and a variety of other IPC
253 * types.
254 */
255 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
256
257 /*
258 * Initialize the socket subsystem and set up the socket
259 * memory allocator.
260 */
261 static uma_zone_t socket_zone;
262 int maxsockets;
263
264 static void
265 socket_zone_change(void *tag)
266 {
267
268 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
269 }
270
271 static void
272 socket_hhook_register(int subtype)
273 {
274
275 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype,
276 &V_socket_hhh[subtype],
277 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0)
278 printf("%s: WARNING: unable to register hook\n", __func__);
279 }
280
281 static void
282 socket_hhook_deregister(int subtype)
283 {
284
285 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0)
286 printf("%s: WARNING: unable to deregister hook\n", __func__);
287 }
288
289 static void
290 socket_init(void *tag)
291 {
292
293 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL,
294 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
295 maxsockets = uma_zone_set_max(socket_zone, maxsockets);
296 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached");
297 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL,
298 EVENTHANDLER_PRI_FIRST);
299 }
300 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL);
301
302 static void
303 socket_vnet_init(const void *unused __unused)
304 {
305 int i;
306
307 /* We expect a contiguous range */
308 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
309 socket_hhook_register(i);
310 }
311 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
312 socket_vnet_init, NULL);
313
314 static void
315 socket_vnet_uninit(const void *unused __unused)
316 {
317 int i;
318
319 for (i = 0; i <= HHOOK_SOCKET_LAST; i++)
320 socket_hhook_deregister(i);
321 }
322 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY,
323 socket_vnet_uninit, NULL);
324
325 /*
326 * Initialise maxsockets. This SYSINIT must be run after
327 * tunable_mbinit().
328 */
329 static void
330 init_maxsockets(void *ignored)
331 {
332
333 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
334 maxsockets = imax(maxsockets, maxfiles);
335 }
336 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
337
338 /*
339 * Sysctl to get and set the maximum global sockets limit. Notify protocols
340 * of the change so that they can update their dependent limits as required.
341 */
342 static int
343 sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
344 {
345 int error, newmaxsockets;
346
347 newmaxsockets = maxsockets;
348 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
349 if (error == 0 && req->newptr) {
350 if (newmaxsockets > maxsockets &&
351 newmaxsockets <= maxfiles) {
352 maxsockets = newmaxsockets;
353 EVENTHANDLER_INVOKE(maxsockets_change);
354 } else
355 error = EINVAL;
356 }
357 return (error);
358 }
359 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
360 &maxsockets, 0, sysctl_maxsockets, "IU",
361 "Maximum number of sockets available");
362
363 /*
364 * Socket operation routines. These routines are called by the routines in
365 * sys_socket.c or from a system process, and implement the semantics of
366 * socket operations by switching out to the protocol specific routines.
367 */
368
369 /*
370 * Get a socket structure from our zone, and initialize it. Note that it
371 * would probably be better to allocate socket and PCB at the same time, but
372 * I'm not convinced that all the protocols can be easily modified to do
373 * this.
374 *
375 * soalloc() returns a socket with a ref count of 0.
376 */
377 static struct socket *
378 soalloc(struct vnet *vnet)
379 {
380 struct socket *so;
381
382 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
383 if (so == NULL)
384 return (NULL);
385 #ifdef MAC
386 if (mac_socket_init(so, M_NOWAIT) != 0) {
387 uma_zfree(socket_zone, so);
388 return (NULL);
389 }
390 #endif
391 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) {
392 uma_zfree(socket_zone, so);
393 return (NULL);
394 }
395
396 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
397 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
398 sx_init(&so->so_snd.sb_sx, "so_snd_sx");
399 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
400 TAILQ_INIT(&so->so_snd.sb_aiojobq);
401 TAILQ_INIT(&so->so_rcv.sb_aiojobq);
402 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so);
403 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so);
404 #ifdef VIMAGE
405 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p",
406 __func__, __LINE__, so));
407 so->so_vnet = vnet;
408 #endif
409 /* We shouldn't need the so_global_mtx */
410 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) {
411 /* Do we need more comprehensive error returns? */
412 uma_zfree(socket_zone, so);
413 return (NULL);
414 }
415 mtx_lock(&so_global_mtx);
416 so->so_gencnt = ++so_gencnt;
417 ++numopensockets;
418 #ifdef VIMAGE
419 vnet->vnet_sockcnt++;
420 #endif
421 mtx_unlock(&so_global_mtx);
422
423 return (so);
424 }
425
426 /*
427 * Free the storage associated with a socket at the socket layer, tear down
428 * locks, labels, etc. All protocol state is assumed already to have been
429 * torn down (and possibly never set up) by the caller.
430 */
431 static void
432 sodealloc(struct socket *so)
433 {
434
435 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
436 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
437
438 mtx_lock(&so_global_mtx);
439 so->so_gencnt = ++so_gencnt;
440 --numopensockets; /* Could be below, but faster here. */
441 #ifdef VIMAGE
442 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p",
443 __func__, __LINE__, so));
444 so->so_vnet->vnet_sockcnt--;
445 #endif
446 mtx_unlock(&so_global_mtx);
447 if (so->so_rcv.sb_hiwat)
448 (void)chgsbsize(so->so_cred->cr_uidinfo,
449 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
450 if (so->so_snd.sb_hiwat)
451 (void)chgsbsize(so->so_cred->cr_uidinfo,
452 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
453 /* remove accept filter if one is present. */
454 if (so->so_accf != NULL)
455 do_setopt_accept_filter(so, NULL);
456 #ifdef MAC
457 mac_socket_destroy(so);
458 #endif
459 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE);
460
461 crfree(so->so_cred);
462 khelp_destroy_osd(&so->osd);
463 sx_destroy(&so->so_snd.sb_sx);
464 sx_destroy(&so->so_rcv.sb_sx);
465 SOCKBUF_LOCK_DESTROY(&so->so_snd);
466 SOCKBUF_LOCK_DESTROY(&so->so_rcv);
467 uma_zfree(socket_zone, so);
468 }
469
470 /*
471 * socreate returns a socket with a ref count of 1. The socket should be
472 * closed with soclose().
473 */
474 int
475 socreate(int dom, struct socket **aso, int type, int proto,
476 struct ucred *cred, struct thread *td)
477 {
478 struct protosw *prp;
479 struct socket *so;
480 int error;
481
482 if (proto)
483 prp = pffindproto(dom, proto, type);
484 else
485 prp = pffindtype(dom, type);
486
487 if (prp == NULL) {
488 /* No support for domain. */
489 if (pffinddomain(dom) == NULL)
490 return (EAFNOSUPPORT);
491 /* No support for socket type. */
492 if (proto == 0 && type != 0)
493 return (EPROTOTYPE);
494 return (EPROTONOSUPPORT);
495 }
496 if (prp->pr_usrreqs->pru_attach == NULL ||
497 prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
498 return (EPROTONOSUPPORT);
499
500 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
501 return (EPROTONOSUPPORT);
502
503 if (prp->pr_type != type)
504 return (EPROTOTYPE);
505 so = soalloc(CRED_TO_VNET(cred));
506 if (so == NULL)
507 return (ENOBUFS);
508
509 TAILQ_INIT(&so->so_incomp);
510 TAILQ_INIT(&so->so_comp);
511 so->so_type = type;
512 so->so_cred = crhold(cred);
513 if ((prp->pr_domain->dom_family == PF_INET) ||
514 (prp->pr_domain->dom_family == PF_INET6) ||
515 (prp->pr_domain->dom_family == PF_ROUTE))
516 so->so_fibnum = td->td_proc->p_fibnum;
517 else
518 so->so_fibnum = 0;
519 so->so_proto = prp;
520 #ifdef MAC
521 mac_socket_create(cred, so);
522 #endif
523 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
524 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
525 so->so_count = 1;
526 /*
527 * Auto-sizing of socket buffers is managed by the protocols and
528 * the appropriate flags must be set in the pru_attach function.
529 */
530 CURVNET_SET(so->so_vnet);
531 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
532 CURVNET_RESTORE();
533 if (error) {
534 KASSERT(so->so_count == 1, ("socreate: so_count %d",
535 so->so_count));
536 so->so_count = 0;
537 sodealloc(so);
538 return (error);
539 }
540 *aso = so;
541 return (0);
542 }
543
544 #ifdef REGRESSION
545 static int regression_sonewconn_earlytest = 1;
546 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
547 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
548 #endif
549
550 /*
551 * When an attempt at a new connection is noted on a socket which accepts
552 * connections, sonewconn is called. If the connection is possible (subject
553 * to space constraints, etc.) then we allocate a new structure, properly
554 * linked into the data structure of the original socket, and return this.
555 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED.
556 *
557 * Note: the ref count on the socket is 0 on return.
558 */
559 struct socket *
560 sonewconn(struct socket *head, int connstatus)
561 {
562 static struct timeval lastover;
563 static struct timeval overinterval = { 60, 0 };
564 static int overcount;
565
566 struct socket *so;
567 int over;
568
569 ACCEPT_LOCK();
570 over = (head->so_qlen > 3 * head->so_qlimit / 2);
571 ACCEPT_UNLOCK();
572 #ifdef REGRESSION
573 if (regression_sonewconn_earlytest && over) {
574 #else
575 if (over) {
576 #endif
577 overcount++;
578
579 if (ratecheck(&lastover, &overinterval)) {
580 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: "
581 "%i already in queue awaiting acceptance "
582 "(%d occurrences)\n",
583 __func__, head->so_pcb, head->so_qlen, overcount);
584
585 overcount = 0;
586 }
587
588 return (NULL);
589 }
590 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p",
591 __func__, __LINE__, head));
592 so = soalloc(head->so_vnet);
593 if (so == NULL) {
594 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: "
595 "limit reached or out of memory\n",
596 __func__, head->so_pcb);
597 return (NULL);
598 }
599 if ((head->so_options & SO_ACCEPTFILTER) != 0)
600 connstatus = 0;
601 so->so_head = head;
602 so->so_type = head->so_type;
603 so->so_options = head->so_options &~ SO_ACCEPTCONN;
604 so->so_linger = head->so_linger;
605 so->so_state = head->so_state | SS_NOFDREF;
606 so->so_fibnum = head->so_fibnum;
607 so->so_proto = head->so_proto;
608 so->so_cred = crhold(head->so_cred);
609 #ifdef MAC
610 mac_socket_newconn(head, so);
611 #endif
612 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
613 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
614 VNET_SO_ASSERT(head);
615 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
616 sodealloc(so);
617 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n",
618 __func__, head->so_pcb);
619 return (NULL);
620 }
621 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
622 sodealloc(so);
623 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n",
624 __func__, head->so_pcb);
625 return (NULL);
626 }
627 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
628 so->so_snd.sb_lowat = head->so_snd.sb_lowat;
629 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
630 so->so_snd.sb_timeo = head->so_snd.sb_timeo;
631 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
632 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
633 so->so_state |= connstatus;
634 ACCEPT_LOCK();
635 /*
636 * The accept socket may be tearing down but we just
637 * won a race on the ACCEPT_LOCK.
638 * However, if sctp_peeloff() is called on a 1-to-many
639 * style socket, the SO_ACCEPTCONN doesn't need to be set.
640 */
641 if (!(head->so_options & SO_ACCEPTCONN) &&
642 ((head->so_proto->pr_protocol != IPPROTO_SCTP) ||
643 (head->so_type != SOCK_SEQPACKET))) {
644 SOCK_LOCK(so);
645 so->so_head = NULL;
646 sofree(so); /* NB: returns ACCEPT_UNLOCK'ed. */
647 return (NULL);
648 }
649 if (connstatus) {
650 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
651 so->so_qstate |= SQ_COMP;
652 head->so_qlen++;
653 } else {
654 /*
655 * Keep removing sockets from the head until there's room for
656 * us to insert on the tail. In pre-locking revisions, this
657 * was a simple if(), but as we could be racing with other
658 * threads and soabort() requires dropping locks, we must
659 * loop waiting for the condition to be true.
660 */
661 while (head->so_incqlen > head->so_qlimit) {
662 struct socket *sp;
663 sp = TAILQ_FIRST(&head->so_incomp);
664 TAILQ_REMOVE(&head->so_incomp, sp, so_list);
665 head->so_incqlen--;
666 sp->so_qstate &= ~SQ_INCOMP;
667 sp->so_head = NULL;
668 ACCEPT_UNLOCK();
669 soabort(sp);
670 ACCEPT_LOCK();
671 }
672 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
673 so->so_qstate |= SQ_INCOMP;
674 head->so_incqlen++;
675 }
676 ACCEPT_UNLOCK();
677 if (connstatus) {
678 sorwakeup(head);
679 wakeup_one(&head->so_timeo);
680 }
681 return (so);
682 }
683
684 int
685 sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
686 {
687 int error;
688
689 CURVNET_SET(so->so_vnet);
690 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
691 CURVNET_RESTORE();
692 return (error);
693 }
694
695 int
696 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
697 {
698 int error;
699
700 CURVNET_SET(so->so_vnet);
701 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td);
702 CURVNET_RESTORE();
703 return (error);
704 }
705
706 /*
707 * solisten() transitions a socket from a non-listening state to a listening
708 * state, but can also be used to update the listen queue depth on an
709 * existing listen socket. The protocol will call back into the sockets
710 * layer using solisten_proto_check() and solisten_proto() to check and set
711 * socket-layer listen state. Call backs are used so that the protocol can
712 * acquire both protocol and socket layer locks in whatever order is required
713 * by the protocol.
714 *
715 * Protocol implementors are advised to hold the socket lock across the
716 * socket-layer test and set to avoid races at the socket layer.
717 */
718 int
719 solisten(struct socket *so, int backlog, struct thread *td)
720 {
721 int error;
722
723 CURVNET_SET(so->so_vnet);
724 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td);
725 CURVNET_RESTORE();
726 return (error);
727 }
728
729 int
730 solisten_proto_check(struct socket *so)
731 {
732
733 SOCK_LOCK_ASSERT(so);
734
735 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
736 SS_ISDISCONNECTING))
737 return (EINVAL);
738 return (0);
739 }
740
741 void
742 solisten_proto(struct socket *so, int backlog)
743 {
744
745 SOCK_LOCK_ASSERT(so);
746
747 if (backlog < 0 || backlog > somaxconn)
748 backlog = somaxconn;
749 so->so_qlimit = backlog;
750 so->so_options |= SO_ACCEPTCONN;
751 }
752
753 /*
754 * Evaluate the reference count and named references on a socket; if no
755 * references remain, free it. This should be called whenever a reference is
756 * released, such as in sorele(), but also when named reference flags are
757 * cleared in socket or protocol code.
758 *
759 * sofree() will free the socket if:
760 *
761 * - There are no outstanding file descriptor references or related consumers
762 * (so_count == 0).
763 *
764 * - The socket has been closed by user space, if ever open (SS_NOFDREF).
765 *
766 * - The protocol does not have an outstanding strong reference on the socket
767 * (SS_PROTOREF).
768 *
769 * - The socket is not in a completed connection queue, so a process has been
770 * notified that it is present. If it is removed, the user process may
771 * block in accept() despite select() saying the socket was ready.
772 */
773 void
774 sofree(struct socket *so)
775 {
776 struct protosw *pr = so->so_proto;
777 struct socket *head;
778
779 ACCEPT_LOCK_ASSERT();
780 SOCK_LOCK_ASSERT(so);
781
782 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
783 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
784 SOCK_UNLOCK(so);
785 ACCEPT_UNLOCK();
786 return;
787 }
788
789 head = so->so_head;
790 if (head != NULL) {
791 KASSERT((so->so_qstate & SQ_COMP) != 0 ||
792 (so->so_qstate & SQ_INCOMP) != 0,
793 ("sofree: so_head != NULL, but neither SQ_COMP nor "
794 "SQ_INCOMP"));
795 KASSERT((so->so_qstate & SQ_COMP) == 0 ||
796 (so->so_qstate & SQ_INCOMP) == 0,
797 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
798 TAILQ_REMOVE(&head->so_incomp, so, so_list);
799 head->so_incqlen--;
800 so->so_qstate &= ~SQ_INCOMP;
801 so->so_head = NULL;
802 }
803 KASSERT((so->so_qstate & SQ_COMP) == 0 &&
804 (so->so_qstate & SQ_INCOMP) == 0,
805 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
806 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
807 if (so->so_options & SO_ACCEPTCONN) {
808 KASSERT((TAILQ_EMPTY(&so->so_comp)),
809 ("sofree: so_comp populated"));
810 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
811 ("sofree: so_incomp populated"));
812 }
813 SOCK_UNLOCK(so);
814 ACCEPT_UNLOCK();
815
816 VNET_SO_ASSERT(so);
817 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
818 (*pr->pr_domain->dom_dispose)(so);
819 if (pr->pr_usrreqs->pru_detach != NULL)
820 (*pr->pr_usrreqs->pru_detach)(so);
821
822 /*
823 * From this point on, we assume that no other references to this
824 * socket exist anywhere else in the stack. Therefore, no locks need
825 * to be acquired or held.
826 *
827 * We used to do a lot of socket buffer and socket locking here, as
828 * well as invoke sorflush() and perform wakeups. The direct call to
829 * dom_dispose() and sbrelease_internal() are an inlining of what was
830 * necessary from sorflush().
831 *
832 * Notice that the socket buffer and kqueue state are torn down
833 * before calling pru_detach. This means that protocols shold not
834 * assume they can perform socket wakeups, etc, in their detach code.
835 */
836 sbdestroy(&so->so_snd, so);
837 sbdestroy(&so->so_rcv, so);
838 seldrain(&so->so_snd.sb_sel);
839 seldrain(&so->so_rcv.sb_sel);
840 knlist_destroy(&so->so_rcv.sb_sel.si_note);
841 knlist_destroy(&so->so_snd.sb_sel.si_note);
842 sodealloc(so);
843 }
844
845 /*
846 * Close a socket on last file table reference removal. Initiate disconnect
847 * if connected. Free socket when disconnect complete.
848 *
849 * This function will sorele() the socket. Note that soclose() may be called
850 * prior to the ref count reaching zero. The actual socket structure will
851 * not be freed until the ref count reaches zero.
852 */
853 int
854 soclose(struct socket *so)
855 {
856 int error = 0;
857
858 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
859
860 CURVNET_SET(so->so_vnet);
861 funsetown(&so->so_sigio);
862 if (so->so_state & SS_ISCONNECTED) {
863 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
864 error = sodisconnect(so);
865 if (error) {
866 if (error == ENOTCONN)
867 error = 0;
868 goto drop;
869 }
870 }
871 if (so->so_options & SO_LINGER) {
872 if ((so->so_state & SS_ISDISCONNECTING) &&
873 (so->so_state & SS_NBIO))
874 goto drop;
875 while (so->so_state & SS_ISCONNECTED) {
876 error = tsleep(&so->so_timeo,
877 PSOCK | PCATCH, "soclos",
878 so->so_linger * hz);
879 if (error)
880 break;
881 }
882 }
883 }
884
885 drop:
886 if (so->so_proto->pr_usrreqs->pru_close != NULL)
887 (*so->so_proto->pr_usrreqs->pru_close)(so);
888 ACCEPT_LOCK();
889 if (so->so_options & SO_ACCEPTCONN) {
890 struct socket *sp;
891 /*
892 * Prevent new additions to the accept queues due
893 * to ACCEPT_LOCK races while we are draining them.
894 */
895 so->so_options &= ~SO_ACCEPTCONN;
896 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
897 TAILQ_REMOVE(&so->so_incomp, sp, so_list);
898 so->so_incqlen--;
899 sp->so_qstate &= ~SQ_INCOMP;
900 sp->so_head = NULL;
901 ACCEPT_UNLOCK();
902 soabort(sp);
903 ACCEPT_LOCK();
904 }
905 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
906 TAILQ_REMOVE(&so->so_comp, sp, so_list);
907 so->so_qlen--;
908 sp->so_qstate &= ~SQ_COMP;
909 sp->so_head = NULL;
910 ACCEPT_UNLOCK();
911 soabort(sp);
912 ACCEPT_LOCK();
913 }
914 KASSERT((TAILQ_EMPTY(&so->so_comp)),
915 ("%s: so_comp populated", __func__));
916 KASSERT((TAILQ_EMPTY(&so->so_incomp)),
917 ("%s: so_incomp populated", __func__));
918 }
919 SOCK_LOCK(so);
920 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
921 so->so_state |= SS_NOFDREF;
922 sorele(so); /* NB: Returns with ACCEPT_UNLOCK(). */
923 CURVNET_RESTORE();
924 return (error);
925 }
926
927 /*
928 * soabort() is used to abruptly tear down a connection, such as when a
929 * resource limit is reached (listen queue depth exceeded), or if a listen
930 * socket is closed while there are sockets waiting to be accepted.
931 *
932 * This interface is tricky, because it is called on an unreferenced socket,
933 * and must be called only by a thread that has actually removed the socket
934 * from the listen queue it was on, or races with other threads are risked.
935 *
936 * This interface will call into the protocol code, so must not be called
937 * with any socket locks held. Protocols do call it while holding their own
938 * recursible protocol mutexes, but this is something that should be subject
939 * to review in the future.
940 */
941 void
942 soabort(struct socket *so)
943 {
944
945 /*
946 * In as much as is possible, assert that no references to this
947 * socket are held. This is not quite the same as asserting that the
948 * current thread is responsible for arranging for no references, but
949 * is as close as we can get for now.
950 */
951 KASSERT(so->so_count == 0, ("soabort: so_count"));
952 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
953 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
954 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
955 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
956 VNET_SO_ASSERT(so);
957
958 if (so->so_proto->pr_usrreqs->pru_abort != NULL)
959 (*so->so_proto->pr_usrreqs->pru_abort)(so);
960 ACCEPT_LOCK();
961 SOCK_LOCK(so);
962 sofree(so);
963 }
964
965 int
966 soaccept(struct socket *so, struct sockaddr **nam)
967 {
968 int error;
969
970 SOCK_LOCK(so);
971 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
972 so->so_state &= ~SS_NOFDREF;
973 SOCK_UNLOCK(so);
974
975 CURVNET_SET(so->so_vnet);
976 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
977 CURVNET_RESTORE();
978 return (error);
979 }
980
981 int
982 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
983 {
984
985 return (soconnectat(AT_FDCWD, so, nam, td));
986 }
987
988 int
989 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td)
990 {
991 int error;
992
993 if (so->so_options & SO_ACCEPTCONN)
994 return (EOPNOTSUPP);
995
996 CURVNET_SET(so->so_vnet);
997 /*
998 * If protocol is connection-based, can only connect once.
999 * Otherwise, if connected, try to disconnect first. This allows
1000 * user to disconnect by connecting to, e.g., a null address.
1001 */
1002 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
1003 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
1004 (error = sodisconnect(so)))) {
1005 error = EISCONN;
1006 } else {
1007 /*
1008 * Prevent accumulated error from previous connection from
1009 * biting us.
1010 */
1011 so->so_error = 0;
1012 if (fd == AT_FDCWD) {
1013 error = (*so->so_proto->pr_usrreqs->pru_connect)(so,
1014 nam, td);
1015 } else {
1016 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd,
1017 so, nam, td);
1018 }
1019 }
1020 CURVNET_RESTORE();
1021
1022 return (error);
1023 }
1024
1025 int
1026 soconnect2(struct socket *so1, struct socket *so2)
1027 {
1028 int error;
1029
1030 CURVNET_SET(so1->so_vnet);
1031 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
1032 CURVNET_RESTORE();
1033 return (error);
1034 }
1035
1036 int
1037 sodisconnect(struct socket *so)
1038 {
1039 int error;
1040
1041 if ((so->so_state & SS_ISCONNECTED) == 0)
1042 return (ENOTCONN);
1043 if (so->so_state & SS_ISDISCONNECTING)
1044 return (EALREADY);
1045 VNET_SO_ASSERT(so);
1046 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
1047 return (error);
1048 }
1049
1050 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1051
1052 int
1053 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
1054 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1055 {
1056 long space;
1057 ssize_t resid;
1058 int clen = 0, error, dontroute;
1059
1060 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM"));
1061 KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
1062 ("sosend_dgram: !PR_ATOMIC"));
1063
1064 if (uio != NULL)
1065 resid = uio->uio_resid;
1066 else
1067 resid = top->m_pkthdr.len;
1068 /*
1069 * In theory resid should be unsigned. However, space must be
1070 * signed, as it might be less than 0 if we over-committed, and we
1071 * must use a signed comparison of space and resid. On the other
1072 * hand, a negative resid causes us to loop sending 0-length
1073 * segments to the protocol.
1074 */
1075 if (resid < 0) {
1076 error = EINVAL;
1077 goto out;
1078 }
1079
1080 dontroute =
1081 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
1082 if (td != NULL)
1083 td->td_ru.ru_msgsnd++;
1084 if (control != NULL)
1085 clen = control->m_len;
1086
1087 SOCKBUF_LOCK(&so->so_snd);
1088 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1089 SOCKBUF_UNLOCK(&so->so_snd);
1090 error = EPIPE;
1091 goto out;
1092 }
1093 if (so->so_error) {
1094 error = so->so_error;
1095 so->so_error = 0;
1096 SOCKBUF_UNLOCK(&so->so_snd);
1097 goto out;
1098 }
1099 if ((so->so_state & SS_ISCONNECTED) == 0) {
1100 /*
1101 * `sendto' and `sendmsg' is allowed on a connection-based
1102 * socket if it supports implied connect. Return ENOTCONN if
1103 * not connected and no address is supplied.
1104 */
1105 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1106 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1107 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1108 !(resid == 0 && clen != 0)) {
1109 SOCKBUF_UNLOCK(&so->so_snd);
1110 error = ENOTCONN;
1111 goto out;
1112 }
1113 } else if (addr == NULL) {
1114 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1115 error = ENOTCONN;
1116 else
1117 error = EDESTADDRREQ;
1118 SOCKBUF_UNLOCK(&so->so_snd);
1119 goto out;
1120 }
1121 }
1122
1123 /*
1124 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
1125 * problem and need fixing.
1126 */
1127 space = sbspace(&so->so_snd);
1128 if (flags & MSG_OOB)
1129 space += 1024;
1130 space -= clen;
1131 SOCKBUF_UNLOCK(&so->so_snd);
1132 if (resid > space) {
1133 error = EMSGSIZE;
1134 goto out;
1135 }
1136 if (uio == NULL) {
1137 resid = 0;
1138 if (flags & MSG_EOR)
1139 top->m_flags |= M_EOR;
1140 } else {
1141 /*
1142 * Copy the data from userland into a mbuf chain.
1143 * If no data is to be copied in, a single empty mbuf
1144 * is returned.
1145 */
1146 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
1147 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
1148 if (top == NULL) {
1149 error = EFAULT; /* only possible error */
1150 goto out;
1151 }
1152 space -= resid - uio->uio_resid;
1153 resid = uio->uio_resid;
1154 }
1155 KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
1156 /*
1157 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
1158 * than with.
1159 */
1160 if (dontroute) {
1161 SOCK_LOCK(so);
1162 so->so_options |= SO_DONTROUTE;
1163 SOCK_UNLOCK(so);
1164 }
1165 /*
1166 * XXX all the SBS_CANTSENDMORE checks previously done could be out
1167 * of date. We could have received a reset packet in an interrupt or
1168 * maybe we slept while doing page faults in uiomove() etc. We could
1169 * probably recheck again inside the locking protection here, but
1170 * there are probably other places that this also happens. We must
1171 * rethink this.
1172 */
1173 VNET_SO_ASSERT(so);
1174 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1175 (flags & MSG_OOB) ? PRUS_OOB :
1176 /*
1177 * If the user set MSG_EOF, the protocol understands this flag and
1178 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
1179 */
1180 ((flags & MSG_EOF) &&
1181 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1182 (resid <= 0)) ?
1183 PRUS_EOF :
1184 /* If there is more to send set PRUS_MORETOCOME */
1185 (flags & MSG_MORETOCOME) ||
1186 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1187 top, addr, control, td);
1188 if (dontroute) {
1189 SOCK_LOCK(so);
1190 so->so_options &= ~SO_DONTROUTE;
1191 SOCK_UNLOCK(so);
1192 }
1193 clen = 0;
1194 control = NULL;
1195 top = NULL;
1196 out:
1197 if (top != NULL)
1198 m_freem(top);
1199 if (control != NULL)
1200 m_freem(control);
1201 return (error);
1202 }
1203
1204 /*
1205 * Send on a socket. If send must go all at once and message is larger than
1206 * send buffering, then hard error. Lock against other senders. If must go
1207 * all at once and not enough room now, then inform user that this would
1208 * block and do nothing. Otherwise, if nonblocking, send as much as
1209 * possible. The data to be sent is described by "uio" if nonzero, otherwise
1210 * by the mbuf chain "top" (which must be null if uio is not). Data provided
1211 * in mbuf chain must be small enough to send all at once.
1212 *
1213 * Returns nonzero on error, timeout or signal; callers must check for short
1214 * counts if EINTR/ERESTART are returned. Data and control buffers are freed
1215 * on return.
1216 */
1217 int
1218 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
1219 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1220 {
1221 long space;
1222 ssize_t resid;
1223 int clen = 0, error, dontroute;
1224 int atomic = sosendallatonce(so) || top;
1225
1226 if (uio != NULL)
1227 resid = uio->uio_resid;
1228 else
1229 resid = top->m_pkthdr.len;
1230 /*
1231 * In theory resid should be unsigned. However, space must be
1232 * signed, as it might be less than 0 if we over-committed, and we
1233 * must use a signed comparison of space and resid. On the other
1234 * hand, a negative resid causes us to loop sending 0-length
1235 * segments to the protocol.
1236 *
1237 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
1238 * type sockets since that's an error.
1239 */
1240 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
1241 error = EINVAL;
1242 goto out;
1243 }
1244
1245 dontroute =
1246 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
1247 (so->so_proto->pr_flags & PR_ATOMIC);
1248 if (td != NULL)
1249 td->td_ru.ru_msgsnd++;
1250 if (control != NULL)
1251 clen = control->m_len;
1252
1253 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
1254 if (error)
1255 goto out;
1256
1257 restart:
1258 do {
1259 SOCKBUF_LOCK(&so->so_snd);
1260 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
1261 SOCKBUF_UNLOCK(&so->so_snd);
1262 error = EPIPE;
1263 goto release;
1264 }
1265 if (so->so_error) {
1266 error = so->so_error;
1267 so->so_error = 0;
1268 SOCKBUF_UNLOCK(&so->so_snd);
1269 goto release;
1270 }
1271 if ((so->so_state & SS_ISCONNECTED) == 0) {
1272 /*
1273 * `sendto' and `sendmsg' is allowed on a connection-
1274 * based socket if it supports implied connect.
1275 * Return ENOTCONN if not connected and no address is
1276 * supplied.
1277 */
1278 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
1279 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
1280 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
1281 !(resid == 0 && clen != 0)) {
1282 SOCKBUF_UNLOCK(&so->so_snd);
1283 error = ENOTCONN;
1284 goto release;
1285 }
1286 } else if (addr == NULL) {
1287 SOCKBUF_UNLOCK(&so->so_snd);
1288 if (so->so_proto->pr_flags & PR_CONNREQUIRED)
1289 error = ENOTCONN;
1290 else
1291 error = EDESTADDRREQ;
1292 goto release;
1293 }
1294 }
1295 space = sbspace(&so->so_snd);
1296 if (flags & MSG_OOB)
1297 space += 1024;
1298 if ((atomic && resid > so->so_snd.sb_hiwat) ||
1299 clen > so->so_snd.sb_hiwat) {
1300 SOCKBUF_UNLOCK(&so->so_snd);
1301 error = EMSGSIZE;
1302 goto release;
1303 }
1304 if (space < resid + clen &&
1305 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
1306 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
1307 SOCKBUF_UNLOCK(&so->so_snd);
1308 error = EWOULDBLOCK;
1309 goto release;
1310 }
1311 error = sbwait(&so->so_snd);
1312 SOCKBUF_UNLOCK(&so->so_snd);
1313 if (error)
1314 goto release;
1315 goto restart;
1316 }
1317 SOCKBUF_UNLOCK(&so->so_snd);
1318 space -= clen;
1319 do {
1320 if (uio == NULL) {
1321 resid = 0;
1322 if (flags & MSG_EOR)
1323 top->m_flags |= M_EOR;
1324 } else {
1325 /*
1326 * Copy the data from userland into a mbuf
1327 * chain. If resid is 0, which can happen
1328 * only if we have control to send, then
1329 * a single empty mbuf is returned. This
1330 * is a workaround to prevent protocol send
1331 * methods to panic.
1332 */
1333 top = m_uiotombuf(uio, M_WAITOK, space,
1334 (atomic ? max_hdr : 0),
1335 (atomic ? M_PKTHDR : 0) |
1336 ((flags & MSG_EOR) ? M_EOR : 0));
1337 if (top == NULL) {
1338 error = EFAULT; /* only possible error */
1339 goto release;
1340 }
1341 space -= resid - uio->uio_resid;
1342 resid = uio->uio_resid;
1343 }
1344 if (dontroute) {
1345 SOCK_LOCK(so);
1346 so->so_options |= SO_DONTROUTE;
1347 SOCK_UNLOCK(so);
1348 }
1349 /*
1350 * XXX all the SBS_CANTSENDMORE checks previously
1351 * done could be out of date. We could have received
1352 * a reset packet in an interrupt or maybe we slept
1353 * while doing page faults in uiomove() etc. We
1354 * could probably recheck again inside the locking
1355 * protection here, but there are probably other
1356 * places that this also happens. We must rethink
1357 * this.
1358 */
1359 VNET_SO_ASSERT(so);
1360 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
1361 (flags & MSG_OOB) ? PRUS_OOB :
1362 /*
1363 * If the user set MSG_EOF, the protocol understands
1364 * this flag and nothing left to send then use
1365 * PRU_SEND_EOF instead of PRU_SEND.
1366 */
1367 ((flags & MSG_EOF) &&
1368 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
1369 (resid <= 0)) ?
1370 PRUS_EOF :
1371 /* If there is more to send set PRUS_MORETOCOME. */
1372 (flags & MSG_MORETOCOME) ||
1373 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
1374 top, addr, control, td);
1375 if (dontroute) {
1376 SOCK_LOCK(so);
1377 so->so_options &= ~SO_DONTROUTE;
1378 SOCK_UNLOCK(so);
1379 }
1380 clen = 0;
1381 control = NULL;
1382 top = NULL;
1383 if (error)
1384 goto release;
1385 } while (resid && space > 0);
1386 } while (resid);
1387
1388 release:
1389 sbunlock(&so->so_snd);
1390 out:
1391 if (top != NULL)
1392 m_freem(top);
1393 if (control != NULL)
1394 m_freem(control);
1395 return (error);
1396 }
1397
1398 int
1399 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
1400 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
1401 {
1402 int error;
1403
1404 CURVNET_SET(so->so_vnet);
1405 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
1406 control, flags, td);
1407 CURVNET_RESTORE();
1408 return (error);
1409 }
1410
1411 /*
1412 * The part of soreceive() that implements reading non-inline out-of-band
1413 * data from a socket. For more complete comments, see soreceive(), from
1414 * which this code originated.
1415 *
1416 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
1417 * unable to return an mbuf chain to the caller.
1418 */
1419 static int
1420 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1421 {
1422 struct protosw *pr = so->so_proto;
1423 struct mbuf *m;
1424 int error;
1425
1426 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
1427 VNET_SO_ASSERT(so);
1428
1429 m = m_get(M_WAITOK, MT_DATA);
1430 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
1431 if (error)
1432 goto bad;
1433 do {
1434 error = uiomove(mtod(m, void *),
1435 (int) min(uio->uio_resid, m->m_len), uio);
1436 m = m_free(m);
1437 } while (uio->uio_resid && error == 0 && m);
1438 bad:
1439 if (m != NULL)
1440 m_freem(m);
1441 return (error);
1442 }
1443
1444 /*
1445 * Following replacement or removal of the first mbuf on the first mbuf chain
1446 * of a socket buffer, push necessary state changes back into the socket
1447 * buffer so that other consumers see the values consistently. 'nextrecord'
1448 * is the callers locally stored value of the original value of
1449 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
1450 * NOTE: 'nextrecord' may be NULL.
1451 */
1452 static __inline void
1453 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
1454 {
1455
1456 SOCKBUF_LOCK_ASSERT(sb);
1457 /*
1458 * First, update for the new value of nextrecord. If necessary, make
1459 * it the first record.
1460 */
1461 if (sb->sb_mb != NULL)
1462 sb->sb_mb->m_nextpkt = nextrecord;
1463 else
1464 sb->sb_mb = nextrecord;
1465
1466 /*
1467 * Now update any dependent socket buffer fields to reflect the new
1468 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
1469 * addition of a second clause that takes care of the case where
1470 * sb_mb has been updated, but remains the last record.
1471 */
1472 if (sb->sb_mb == NULL) {
1473 sb->sb_mbtail = NULL;
1474 sb->sb_lastrecord = NULL;
1475 } else if (sb->sb_mb->m_nextpkt == NULL)
1476 sb->sb_lastrecord = sb->sb_mb;
1477 }
1478
1479 /*
1480 * Implement receive operations on a socket. We depend on the way that
1481 * records are added to the sockbuf by sbappend. In particular, each record
1482 * (mbufs linked through m_next) must begin with an address if the protocol
1483 * so specifies, followed by an optional mbuf or mbufs containing ancillary
1484 * data, and then zero or more mbufs of data. In order to allow parallelism
1485 * between network receive and copying to user space, as well as avoid
1486 * sleeping with a mutex held, we release the socket buffer mutex during the
1487 * user space copy. Although the sockbuf is locked, new data may still be
1488 * appended, and thus we must maintain consistency of the sockbuf during that
1489 * time.
1490 *
1491 * The caller may receive the data as a single mbuf chain by supplying an
1492 * mbuf **mp0 for use in returning the chain. The uio is then used only for
1493 * the count in uio_resid.
1494 */
1495 int
1496 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
1497 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1498 {
1499 struct mbuf *m, **mp;
1500 int flags, error, offset;
1501 ssize_t len;
1502 struct protosw *pr = so->so_proto;
1503 struct mbuf *nextrecord;
1504 int moff, type = 0;
1505 ssize_t orig_resid = uio->uio_resid;
1506
1507 mp = mp0;
1508 if (psa != NULL)
1509 *psa = NULL;
1510 if (controlp != NULL)
1511 *controlp = NULL;
1512 if (flagsp != NULL)
1513 flags = *flagsp &~ MSG_EOR;
1514 else
1515 flags = 0;
1516 if (flags & MSG_OOB)
1517 return (soreceive_rcvoob(so, uio, flags));
1518 if (mp != NULL)
1519 *mp = NULL;
1520 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1521 && uio->uio_resid) {
1522 VNET_SO_ASSERT(so);
1523 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
1524 }
1525
1526 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1527 if (error)
1528 return (error);
1529
1530 restart:
1531 SOCKBUF_LOCK(&so->so_rcv);
1532 m = so->so_rcv.sb_mb;
1533 /*
1534 * If we have less data than requested, block awaiting more (subject
1535 * to any timeout) if:
1536 * 1. the current count is less than the low water mark, or
1537 * 2. MSG_DONTWAIT is not set
1538 */
1539 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1540 sbavail(&so->so_rcv) < uio->uio_resid) &&
1541 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
1542 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1543 KASSERT(m != NULL || !sbavail(&so->so_rcv),
1544 ("receive: m == %p sbavail == %u",
1545 m, sbavail(&so->so_rcv)));
1546 if (so->so_error) {
1547 if (m != NULL)
1548 goto dontblock;
1549 error = so->so_error;
1550 if ((flags & MSG_PEEK) == 0)
1551 so->so_error = 0;
1552 SOCKBUF_UNLOCK(&so->so_rcv);
1553 goto release;
1554 }
1555 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1556 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1557 if (m == NULL) {
1558 SOCKBUF_UNLOCK(&so->so_rcv);
1559 goto release;
1560 } else
1561 goto dontblock;
1562 }
1563 for (; m != NULL; m = m->m_next)
1564 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
1565 m = so->so_rcv.sb_mb;
1566 goto dontblock;
1567 }
1568 if ((so->so_state & (SS_ISCONNECTING | SS_ISCONNECTED |
1569 SS_ISDISCONNECTING | SS_ISDISCONNECTED)) == 0 &&
1570 (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
1571 SOCKBUF_UNLOCK(&so->so_rcv);
1572 error = ENOTCONN;
1573 goto release;
1574 }
1575 if (uio->uio_resid == 0) {
1576 SOCKBUF_UNLOCK(&so->so_rcv);
1577 goto release;
1578 }
1579 if ((so->so_state & SS_NBIO) ||
1580 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1581 SOCKBUF_UNLOCK(&so->so_rcv);
1582 error = EWOULDBLOCK;
1583 goto release;
1584 }
1585 SBLASTRECORDCHK(&so->so_rcv);
1586 SBLASTMBUFCHK(&so->so_rcv);
1587 error = sbwait(&so->so_rcv);
1588 SOCKBUF_UNLOCK(&so->so_rcv);
1589 if (error)
1590 goto release;
1591 goto restart;
1592 }
1593 dontblock:
1594 /*
1595 * From this point onward, we maintain 'nextrecord' as a cache of the
1596 * pointer to the next record in the socket buffer. We must keep the
1597 * various socket buffer pointers and local stack versions of the
1598 * pointers in sync, pushing out modifications before dropping the
1599 * socket buffer mutex, and re-reading them when picking it up.
1600 *
1601 * Otherwise, we will race with the network stack appending new data
1602 * or records onto the socket buffer by using inconsistent/stale
1603 * versions of the field, possibly resulting in socket buffer
1604 * corruption.
1605 *
1606 * By holding the high-level sblock(), we prevent simultaneous
1607 * readers from pulling off the front of the socket buffer.
1608 */
1609 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1610 if (uio->uio_td)
1611 uio->uio_td->td_ru.ru_msgrcv++;
1612 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1613 SBLASTRECORDCHK(&so->so_rcv);
1614 SBLASTMBUFCHK(&so->so_rcv);
1615 nextrecord = m->m_nextpkt;
1616 if (pr->pr_flags & PR_ADDR) {
1617 KASSERT(m->m_type == MT_SONAME,
1618 ("m->m_type == %d", m->m_type));
1619 orig_resid = 0;
1620 if (psa != NULL)
1621 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
1622 M_NOWAIT);
1623 if (flags & MSG_PEEK) {
1624 m = m->m_next;
1625 } else {
1626 sbfree(&so->so_rcv, m);
1627 so->so_rcv.sb_mb = m_free(m);
1628 m = so->so_rcv.sb_mb;
1629 sockbuf_pushsync(&so->so_rcv, nextrecord);
1630 }
1631 }
1632
1633 /*
1634 * Process one or more MT_CONTROL mbufs present before any data mbufs
1635 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
1636 * just copy the data; if !MSG_PEEK, we call into the protocol to
1637 * perform externalization (or freeing if controlp == NULL).
1638 */
1639 if (m != NULL && m->m_type == MT_CONTROL) {
1640 struct mbuf *cm = NULL, *cmn;
1641 struct mbuf **cme = &cm;
1642
1643 do {
1644 if (flags & MSG_PEEK) {
1645 if (controlp != NULL) {
1646 *controlp = m_copy(m, 0, m->m_len);
1647 controlp = &(*controlp)->m_next;
1648 }
1649 m = m->m_next;
1650 } else {
1651 sbfree(&so->so_rcv, m);
1652 so->so_rcv.sb_mb = m->m_next;
1653 m->m_next = NULL;
1654 *cme = m;
1655 cme = &(*cme)->m_next;
1656 m = so->so_rcv.sb_mb;
1657 }
1658 } while (m != NULL && m->m_type == MT_CONTROL);
1659 if ((flags & MSG_PEEK) == 0)
1660 sockbuf_pushsync(&so->so_rcv, nextrecord);
1661 while (cm != NULL) {
1662 cmn = cm->m_next;
1663 cm->m_next = NULL;
1664 if (pr->pr_domain->dom_externalize != NULL) {
1665 SOCKBUF_UNLOCK(&so->so_rcv);
1666 VNET_SO_ASSERT(so);
1667 error = (*pr->pr_domain->dom_externalize)
1668 (cm, controlp, flags);
1669 SOCKBUF_LOCK(&so->so_rcv);
1670 } else if (controlp != NULL)
1671 *controlp = cm;
1672 else
1673 m_freem(cm);
1674 if (controlp != NULL) {
1675 orig_resid = 0;
1676 while (*controlp != NULL)
1677 controlp = &(*controlp)->m_next;
1678 }
1679 cm = cmn;
1680 }
1681 if (m != NULL)
1682 nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1683 else
1684 nextrecord = so->so_rcv.sb_mb;
1685 orig_resid = 0;
1686 }
1687 if (m != NULL) {
1688 if ((flags & MSG_PEEK) == 0) {
1689 KASSERT(m->m_nextpkt == nextrecord,
1690 ("soreceive: post-control, nextrecord !sync"));
1691 if (nextrecord == NULL) {
1692 KASSERT(so->so_rcv.sb_mb == m,
1693 ("soreceive: post-control, sb_mb!=m"));
1694 KASSERT(so->so_rcv.sb_lastrecord == m,
1695 ("soreceive: post-control, lastrecord!=m"));
1696 }
1697 }
1698 type = m->m_type;
1699 if (type == MT_OOBDATA)
1700 flags |= MSG_OOB;
1701 } else {
1702 if ((flags & MSG_PEEK) == 0) {
1703 KASSERT(so->so_rcv.sb_mb == nextrecord,
1704 ("soreceive: sb_mb != nextrecord"));
1705 if (so->so_rcv.sb_mb == NULL) {
1706 KASSERT(so->so_rcv.sb_lastrecord == NULL,
1707 ("soreceive: sb_lastercord != NULL"));
1708 }
1709 }
1710 }
1711 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1712 SBLASTRECORDCHK(&so->so_rcv);
1713 SBLASTMBUFCHK(&so->so_rcv);
1714
1715 /*
1716 * Now continue to read any data mbufs off of the head of the socket
1717 * buffer until the read request is satisfied. Note that 'type' is
1718 * used to store the type of any mbuf reads that have happened so far
1719 * such that soreceive() can stop reading if the type changes, which
1720 * causes soreceive() to return only one of regular data and inline
1721 * out-of-band data in a single socket receive operation.
1722 */
1723 moff = 0;
1724 offset = 0;
1725 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
1726 && error == 0) {
1727 /*
1728 * If the type of mbuf has changed since the last mbuf
1729 * examined ('type'), end the receive operation.
1730 */
1731 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1732 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) {
1733 if (type != m->m_type)
1734 break;
1735 } else if (type == MT_OOBDATA)
1736 break;
1737 else
1738 KASSERT(m->m_type == MT_DATA,
1739 ("m->m_type == %d", m->m_type));
1740 so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1741 len = uio->uio_resid;
1742 if (so->so_oobmark && len > so->so_oobmark - offset)
1743 len = so->so_oobmark - offset;
1744 if (len > m->m_len - moff)
1745 len = m->m_len - moff;
1746 /*
1747 * If mp is set, just pass back the mbufs. Otherwise copy
1748 * them out via the uio, then free. Sockbuf must be
1749 * consistent here (points to current mbuf, it points to next
1750 * record) when we drop priority; we must note any additions
1751 * to the sockbuf when we block interrupts again.
1752 */
1753 if (mp == NULL) {
1754 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1755 SBLASTRECORDCHK(&so->so_rcv);
1756 SBLASTMBUFCHK(&so->so_rcv);
1757 SOCKBUF_UNLOCK(&so->so_rcv);
1758 error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1759 SOCKBUF_LOCK(&so->so_rcv);
1760 if (error) {
1761 /*
1762 * The MT_SONAME mbuf has already been removed
1763 * from the record, so it is necessary to
1764 * remove the data mbufs, if any, to preserve
1765 * the invariant in the case of PR_ADDR that
1766 * requires MT_SONAME mbufs at the head of
1767 * each record.
1768 */
1769 if (m && pr->pr_flags & PR_ATOMIC &&
1770 ((flags & MSG_PEEK) == 0))
1771 (void)sbdroprecord_locked(&so->so_rcv);
1772 SOCKBUF_UNLOCK(&so->so_rcv);
1773 goto release;
1774 }
1775 } else
1776 uio->uio_resid -= len;
1777 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1778 if (len == m->m_len - moff) {
1779 if (m->m_flags & M_EOR)
1780 flags |= MSG_EOR;
1781 if (flags & MSG_PEEK) {
1782 m = m->m_next;
1783 moff = 0;
1784 } else {
1785 nextrecord = m->m_nextpkt;
1786 sbfree(&so->so_rcv, m);
1787 if (mp != NULL) {
1788 m->m_nextpkt = NULL;
1789 *mp = m;
1790 mp = &m->m_next;
1791 so->so_rcv.sb_mb = m = m->m_next;
1792 *mp = NULL;
1793 } else {
1794 so->so_rcv.sb_mb = m_free(m);
1795 m = so->so_rcv.sb_mb;
1796 }
1797 sockbuf_pushsync(&so->so_rcv, nextrecord);
1798 SBLASTRECORDCHK(&so->so_rcv);
1799 SBLASTMBUFCHK(&so->so_rcv);
1800 }
1801 } else {
1802 if (flags & MSG_PEEK)
1803 moff += len;
1804 else {
1805 if (mp != NULL) {
1806 if (flags & MSG_DONTWAIT) {
1807 *mp = m_copym(m, 0, len,
1808 M_NOWAIT);
1809 if (*mp == NULL) {
1810 /*
1811 * m_copym() couldn't
1812 * allocate an mbuf.
1813 * Adjust uio_resid back
1814 * (it was adjusted
1815 * down by len bytes,
1816 * which we didn't end
1817 * up "copying" over).
1818 */
1819 uio->uio_resid += len;
1820 break;
1821 }
1822 } else {
1823 SOCKBUF_UNLOCK(&so->so_rcv);
1824 *mp = m_copym(m, 0, len,
1825 M_WAITOK);
1826 SOCKBUF_LOCK(&so->so_rcv);
1827 }
1828 }
1829 sbcut_locked(&so->so_rcv, len);
1830 }
1831 }
1832 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1833 if (so->so_oobmark) {
1834 if ((flags & MSG_PEEK) == 0) {
1835 so->so_oobmark -= len;
1836 if (so->so_oobmark == 0) {
1837 so->so_rcv.sb_state |= SBS_RCVATMARK;
1838 break;
1839 }
1840 } else {
1841 offset += len;
1842 if (offset == so->so_oobmark)
1843 break;
1844 }
1845 }
1846 if (flags & MSG_EOR)
1847 break;
1848 /*
1849 * If the MSG_WAITALL flag is set (for non-atomic socket), we
1850 * must not quit until "uio->uio_resid == 0" or an error
1851 * termination. If a signal/timeout occurs, return with a
1852 * short count but without error. Keep sockbuf locked
1853 * against other readers.
1854 */
1855 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1856 !sosendallatonce(so) && nextrecord == NULL) {
1857 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1858 if (so->so_error ||
1859 so->so_rcv.sb_state & SBS_CANTRCVMORE)
1860 break;
1861 /*
1862 * Notify the protocol that some data has been
1863 * drained before blocking.
1864 */
1865 if (pr->pr_flags & PR_WANTRCVD) {
1866 SOCKBUF_UNLOCK(&so->so_rcv);
1867 VNET_SO_ASSERT(so);
1868 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1869 SOCKBUF_LOCK(&so->so_rcv);
1870 }
1871 SBLASTRECORDCHK(&so->so_rcv);
1872 SBLASTMBUFCHK(&so->so_rcv);
1873 /*
1874 * We could receive some data while was notifying
1875 * the protocol. Skip blocking in this case.
1876 */
1877 if (so->so_rcv.sb_mb == NULL) {
1878 error = sbwait(&so->so_rcv);
1879 if (error) {
1880 SOCKBUF_UNLOCK(&so->so_rcv);
1881 goto release;
1882 }
1883 }
1884 m = so->so_rcv.sb_mb;
1885 if (m != NULL)
1886 nextrecord = m->m_nextpkt;
1887 }
1888 }
1889
1890 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1891 if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1892 flags |= MSG_TRUNC;
1893 if ((flags & MSG_PEEK) == 0)
1894 (void) sbdroprecord_locked(&so->so_rcv);
1895 }
1896 if ((flags & MSG_PEEK) == 0) {
1897 if (m == NULL) {
1898 /*
1899 * First part is an inline SB_EMPTY_FIXUP(). Second
1900 * part makes sure sb_lastrecord is up-to-date if
1901 * there is still data in the socket buffer.
1902 */
1903 so->so_rcv.sb_mb = nextrecord;
1904 if (so->so_rcv.sb_mb == NULL) {
1905 so->so_rcv.sb_mbtail = NULL;
1906 so->so_rcv.sb_lastrecord = NULL;
1907 } else if (nextrecord->m_nextpkt == NULL)
1908 so->so_rcv.sb_lastrecord = nextrecord;
1909 }
1910 SBLASTRECORDCHK(&so->so_rcv);
1911 SBLASTMBUFCHK(&so->so_rcv);
1912 /*
1913 * If soreceive() is being done from the socket callback,
1914 * then don't need to generate ACK to peer to update window,
1915 * since ACK will be generated on return to TCP.
1916 */
1917 if (!(flags & MSG_SOCALLBCK) &&
1918 (pr->pr_flags & PR_WANTRCVD)) {
1919 SOCKBUF_UNLOCK(&so->so_rcv);
1920 VNET_SO_ASSERT(so);
1921 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
1922 SOCKBUF_LOCK(&so->so_rcv);
1923 }
1924 }
1925 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1926 if (orig_resid == uio->uio_resid && orig_resid &&
1927 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1928 SOCKBUF_UNLOCK(&so->so_rcv);
1929 goto restart;
1930 }
1931 SOCKBUF_UNLOCK(&so->so_rcv);
1932
1933 if (flagsp != NULL)
1934 *flagsp |= flags;
1935 release:
1936 sbunlock(&so->so_rcv);
1937 return (error);
1938 }
1939
1940 /*
1941 * Optimized version of soreceive() for stream (TCP) sockets.
1942 * XXXAO: (MSG_WAITALL | MSG_PEEK) isn't properly handled.
1943 */
1944 int
1945 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
1946 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1947 {
1948 int len = 0, error = 0, flags, oresid;
1949 struct sockbuf *sb;
1950 struct mbuf *m, *n = NULL;
1951
1952 /* We only do stream sockets. */
1953 if (so->so_type != SOCK_STREAM)
1954 return (EINVAL);
1955 if (psa != NULL)
1956 *psa = NULL;
1957 if (controlp != NULL)
1958 return (EINVAL);
1959 if (flagsp != NULL)
1960 flags = *flagsp &~ MSG_EOR;
1961 else
1962 flags = 0;
1963 if (flags & MSG_OOB)
1964 return (soreceive_rcvoob(so, uio, flags));
1965 if (mp0 != NULL)
1966 *mp0 = NULL;
1967
1968 sb = &so->so_rcv;
1969
1970 /* Prevent other readers from entering the socket. */
1971 error = sblock(sb, SBLOCKWAIT(flags));
1972 if (error)
1973 return (error);
1974 SOCKBUF_LOCK(sb);
1975
1976 /* Easy one, no space to copyout anything. */
1977 if (uio->uio_resid == 0) {
1978 error = EINVAL;
1979 goto out;
1980 }
1981 oresid = uio->uio_resid;
1982
1983 /* We will never ever get anything unless we are or were connected. */
1984 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1985 error = ENOTCONN;
1986 goto out;
1987 }
1988
1989 restart:
1990 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1991
1992 /* Abort if socket has reported problems. */
1993 if (so->so_error) {
1994 if (sbavail(sb) > 0)
1995 goto deliver;
1996 if (oresid > uio->uio_resid)
1997 goto out;
1998 error = so->so_error;
1999 if (!(flags & MSG_PEEK))
2000 so->so_error = 0;
2001 goto out;
2002 }
2003
2004 /* Door is closed. Deliver what is left, if any. */
2005 if (sb->sb_state & SBS_CANTRCVMORE) {
2006 if (sbavail(sb) > 0)
2007 goto deliver;
2008 else
2009 goto out;
2010 }
2011
2012 /* Socket buffer is empty and we shall not block. */
2013 if (sbavail(sb) == 0 &&
2014 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
2015 error = EAGAIN;
2016 goto out;
2017 }
2018
2019 /* Socket buffer got some data that we shall deliver now. */
2020 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
2021 ((so->so_state & SS_NBIO) ||
2022 (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
2023 sbavail(sb) >= sb->sb_lowat ||
2024 sbavail(sb) >= uio->uio_resid ||
2025 sbavail(sb) >= sb->sb_hiwat) ) {
2026 goto deliver;
2027 }
2028
2029 /* On MSG_WAITALL we must wait until all data or error arrives. */
2030 if ((flags & MSG_WAITALL) &&
2031 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
2032 goto deliver;
2033
2034 /*
2035 * Wait and block until (more) data comes in.
2036 * NB: Drops the sockbuf lock during wait.
2037 */
2038 error = sbwait(sb);
2039 if (error)
2040 goto out;
2041 goto restart;
2042
2043 deliver:
2044 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2045 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
2046 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
2047
2048 /* Statistics. */
2049 if (uio->uio_td)
2050 uio->uio_td->td_ru.ru_msgrcv++;
2051
2052 /* Fill uio until full or current end of socket buffer is reached. */
2053 len = min(uio->uio_resid, sbavail(sb));
2054 if (mp0 != NULL) {
2055 /* Dequeue as many mbufs as possible. */
2056 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
2057 if (*mp0 == NULL)
2058 *mp0 = sb->sb_mb;
2059 else
2060 m_cat(*mp0, sb->sb_mb);
2061 for (m = sb->sb_mb;
2062 m != NULL && m->m_len <= len;
2063 m = m->m_next) {
2064 KASSERT(!(m->m_flags & M_NOTAVAIL),
2065 ("%s: m %p not available", __func__, m));
2066 len -= m->m_len;
2067 uio->uio_resid -= m->m_len;
2068 sbfree(sb, m);
2069 n = m;
2070 }
2071 n->m_next = NULL;
2072 sb->sb_mb = m;
2073 sb->sb_lastrecord = sb->sb_mb;
2074 if (sb->sb_mb == NULL)
2075 SB_EMPTY_FIXUP(sb);
2076 }
2077 /* Copy the remainder. */
2078 if (len > 0) {
2079 KASSERT(sb->sb_mb != NULL,
2080 ("%s: len > 0 && sb->sb_mb empty", __func__));
2081
2082 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
2083 if (m == NULL)
2084 len = 0; /* Don't flush data from sockbuf. */
2085 else
2086 uio->uio_resid -= len;
2087 if (*mp0 != NULL)
2088 m_cat(*mp0, m);
2089 else
2090 *mp0 = m;
2091 if (*mp0 == NULL) {
2092 error = ENOBUFS;
2093 goto out;
2094 }
2095 }
2096 } else {
2097 /* NB: Must unlock socket buffer as uiomove may sleep. */
2098 SOCKBUF_UNLOCK(sb);
2099 error = m_mbuftouio(uio, sb->sb_mb, len);
2100 SOCKBUF_LOCK(sb);
2101 if (error)
2102 goto out;
2103 }
2104 SBLASTRECORDCHK(sb);
2105 SBLASTMBUFCHK(sb);
2106
2107 /*
2108 * Remove the delivered data from the socket buffer unless we
2109 * were only peeking.
2110 */
2111 if (!(flags & MSG_PEEK)) {
2112 if (len > 0)
2113 sbdrop_locked(sb, len);
2114
2115 /* Notify protocol that we drained some data. */
2116 if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
2117 (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
2118 !(flags & MSG_SOCALLBCK))) {
2119 SOCKBUF_UNLOCK(sb);
2120 VNET_SO_ASSERT(so);
2121 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
2122 SOCKBUF_LOCK(sb);
2123 }
2124 }
2125
2126 /*
2127 * For MSG_WAITALL we may have to loop again and wait for
2128 * more data to come in.
2129 */
2130 if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
2131 goto restart;
2132 out:
2133 SOCKBUF_LOCK_ASSERT(sb);
2134 SBLASTRECORDCHK(sb);
2135 SBLASTMBUFCHK(sb);
2136 SOCKBUF_UNLOCK(sb);
2137 sbunlock(sb);
2138 return (error);
2139 }
2140
2141 /*
2142 * Optimized version of soreceive() for simple datagram cases from userspace.
2143 * Unlike in the stream case, we're able to drop a datagram if copyout()
2144 * fails, and because we handle datagrams atomically, we don't need to use a
2145 * sleep lock to prevent I/O interlacing.
2146 */
2147 int
2148 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
2149 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2150 {
2151 struct mbuf *m, *m2;
2152 int flags, error;
2153 ssize_t len;
2154 struct protosw *pr = so->so_proto;
2155 struct mbuf *nextrecord;
2156
2157 if (psa != NULL)
2158 *psa = NULL;
2159 if (controlp != NULL)
2160 *controlp = NULL;
2161 if (flagsp != NULL)
2162 flags = *flagsp &~ MSG_EOR;
2163 else
2164 flags = 0;
2165
2166 /*
2167 * For any complicated cases, fall back to the full
2168 * soreceive_generic().
2169 */
2170 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
2171 return (soreceive_generic(so, psa, uio, mp0, controlp,
2172 flagsp));
2173
2174 /*
2175 * Enforce restrictions on use.
2176 */
2177 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
2178 ("soreceive_dgram: wantrcvd"));
2179 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
2180 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
2181 ("soreceive_dgram: SBS_RCVATMARK"));
2182 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
2183 ("soreceive_dgram: P_CONNREQUIRED"));
2184
2185 /*
2186 * Loop blocking while waiting for a datagram.
2187 */
2188 SOCKBUF_LOCK(&so->so_rcv);
2189 while ((m = so->so_rcv.sb_mb) == NULL) {
2190 KASSERT(sbavail(&so->so_rcv) == 0,
2191 ("soreceive_dgram: sb_mb NULL but sbavail %u",
2192 sbavail(&so->so_rcv)));
2193 if (so->so_error) {
2194 error = so->so_error;
2195 so->so_error = 0;
2196 SOCKBUF_UNLOCK(&so->so_rcv);
2197 return (error);
2198 }
2199 if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
2200 uio->uio_resid == 0) {
2201 SOCKBUF_UNLOCK(&so->so_rcv);
2202 return (0);
2203 }
2204 if ((so->so_state & SS_NBIO) ||
2205 (flags & (MSG_DONTWAIT|MSG_NBIO))) {
2206 SOCKBUF_UNLOCK(&so->so_rcv);
2207 return (EWOULDBLOCK);
2208 }
2209 SBLASTRECORDCHK(&so->so_rcv);
2210 SBLASTMBUFCHK(&so->so_rcv);
2211 error = sbwait(&so->so_rcv);
2212 if (error) {
2213 SOCKBUF_UNLOCK(&so->so_rcv);
2214 return (error);
2215 }
2216 }
2217 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2218
2219 if (uio->uio_td)
2220 uio->uio_td->td_ru.ru_msgrcv++;
2221 SBLASTRECORDCHK(&so->so_rcv);
2222 SBLASTMBUFCHK(&so->so_rcv);
2223 nextrecord = m->m_nextpkt;
2224 if (nextrecord == NULL) {
2225 KASSERT(so->so_rcv.sb_lastrecord == m,
2226 ("soreceive_dgram: lastrecord != m"));
2227 }
2228
2229 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
2230 ("soreceive_dgram: m_nextpkt != nextrecord"));
2231
2232 /*
2233 * Pull 'm' and its chain off the front of the packet queue.
2234 */
2235 so->so_rcv.sb_mb = NULL;
2236 sockbuf_pushsync(&so->so_rcv, nextrecord);
2237
2238 /*
2239 * Walk 'm's chain and free that many bytes from the socket buffer.
2240 */
2241 for (m2 = m; m2 != NULL; m2 = m2->m_next)
2242 sbfree(&so->so_rcv, m2);
2243
2244 /*
2245 * Do a few last checks before we let go of the lock.
2246 */
2247 SBLASTRECORDCHK(&so->so_rcv);
2248 SBLASTMBUFCHK(&so->so_rcv);
2249 SOCKBUF_UNLOCK(&so->so_rcv);
2250
2251 if (pr->pr_flags & PR_ADDR) {
2252 KASSERT(m->m_type == MT_SONAME,
2253 ("m->m_type == %d", m->m_type));
2254 if (psa != NULL)
2255 *psa = sodupsockaddr(mtod(m, struct sockaddr *),
2256 M_NOWAIT);
2257 m = m_free(m);
2258 }
2259 if (m == NULL) {
2260 /* XXXRW: Can this happen? */
2261 return (0);
2262 }
2263
2264 /*
2265 * Packet to copyout() is now in 'm' and it is disconnected from the
2266 * queue.
2267 *
2268 * Process one or more MT_CONTROL mbufs present before any data mbufs
2269 * in the first mbuf chain on the socket buffer. We call into the
2270 * protocol to perform externalization (or freeing if controlp ==
2271 * NULL). In some cases there can be only MT_CONTROL mbufs without
2272 * MT_DATA mbufs.
2273 */
2274 if (m->m_type == MT_CONTROL) {
2275 struct mbuf *cm = NULL, *cmn;
2276 struct mbuf **cme = &cm;
2277
2278 do {
2279 m2 = m->m_next;
2280 m->m_next = NULL;
2281 *cme = m;
2282 cme = &(*cme)->m_next;
2283 m = m2;
2284 } while (m != NULL && m->m_type == MT_CONTROL);
2285 while (cm != NULL) {
2286 cmn = cm->m_next;
2287 cm->m_next = NULL;
2288 if (pr->pr_domain->dom_externalize != NULL) {
2289 error = (*pr->pr_domain->dom_externalize)
2290 (cm, controlp, flags);
2291 } else if (controlp != NULL)
2292 *controlp = cm;
2293 else
2294 m_freem(cm);
2295 if (controlp != NULL) {
2296 while (*controlp != NULL)
2297 controlp = &(*controlp)->m_next;
2298 }
2299 cm = cmn;
2300 }
2301 }
2302 KASSERT(m == NULL || m->m_type == MT_DATA,
2303 ("soreceive_dgram: !data"));
2304 while (m != NULL && uio->uio_resid > 0) {
2305 len = uio->uio_resid;
2306 if (len > m->m_len)
2307 len = m->m_len;
2308 error = uiomove(mtod(m, char *), (int)len, uio);
2309 if (error) {
2310 m_freem(m);
2311 return (error);
2312 }
2313 if (len == m->m_len)
2314 m = m_free(m);
2315 else {
2316 m->m_data += len;
2317 m->m_len -= len;
2318 }
2319 }
2320 if (m != NULL) {
2321 flags |= MSG_TRUNC;
2322 m_freem(m);
2323 }
2324 if (flagsp != NULL)
2325 *flagsp |= flags;
2326 return (0);
2327 }
2328
2329 int
2330 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
2331 struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
2332 {
2333 int error;
2334
2335 CURVNET_SET(so->so_vnet);
2336 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
2337 controlp, flagsp));
2338 CURVNET_RESTORE();
2339 return (error);
2340 }
2341
2342 int
2343 soshutdown(struct socket *so, int how)
2344 {
2345 struct protosw *pr = so->so_proto;
2346 int error, soerror_enotconn;
2347
2348 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
2349 return (EINVAL);
2350
2351 soerror_enotconn = 0;
2352 if ((so->so_state &
2353 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
2354 /*
2355 * POSIX mandates us to return ENOTCONN when shutdown(2) is
2356 * invoked on a datagram sockets, however historically we would
2357 * actually tear socket down. This is known to be leveraged by
2358 * some applications to unblock process waiting in recvXXX(2)
2359 * by other process that it shares that socket with. Try to meet
2360 * both backward-compatibility and POSIX requirements by forcing
2361 * ENOTCONN but still asking protocol to perform pru_shutdown().
2362 */
2363 if (so->so_type != SOCK_DGRAM &&
2364 !(so->so_options & SO_ACCEPTCONN))
2365 return (ENOTCONN);
2366 soerror_enotconn = 1;
2367 }
2368
2369 CURVNET_SET(so->so_vnet);
2370 if (pr->pr_usrreqs->pru_flush != NULL)
2371 (*pr->pr_usrreqs->pru_flush)(so, how);
2372 if (how != SHUT_WR)
2373 sorflush(so);
2374 if (how != SHUT_RD) {
2375 error = (*pr->pr_usrreqs->pru_shutdown)(so);
2376 wakeup(&so->so_timeo);
2377 CURVNET_RESTORE();
2378 return ((error == 0 && soerror_enotconn) ? ENOTCONN : error);
2379 }
2380 wakeup(&so->so_timeo);
2381 CURVNET_RESTORE();
2382
2383 return (soerror_enotconn ? ENOTCONN : 0);
2384 }
2385
2386 void
2387 sorflush(struct socket *so)
2388 {
2389 struct sockbuf *sb = &so->so_rcv;
2390 struct protosw *pr = so->so_proto;
2391 struct socket aso;
2392
2393 VNET_SO_ASSERT(so);
2394
2395 /*
2396 * In order to avoid calling dom_dispose with the socket buffer mutex
2397 * held, and in order to generally avoid holding the lock for a long
2398 * time, we make a copy of the socket buffer and clear the original
2399 * (except locks, state). The new socket buffer copy won't have
2400 * initialized locks so we can only call routines that won't use or
2401 * assert those locks.
2402 *
2403 * Dislodge threads currently blocked in receive and wait to acquire
2404 * a lock against other simultaneous readers before clearing the
2405 * socket buffer. Don't let our acquire be interrupted by a signal
2406 * despite any existing socket disposition on interruptable waiting.
2407 */
2408 socantrcvmore(so);
2409 (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
2410
2411 /*
2412 * Invalidate/clear most of the sockbuf structure, but leave selinfo
2413 * and mutex data unchanged.
2414 */
2415 SOCKBUF_LOCK(sb);
2416 bzero(&aso, sizeof(aso));
2417 aso.so_pcb = so->so_pcb;
2418 bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero,
2419 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2420 bzero(&sb->sb_startzero,
2421 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
2422 SOCKBUF_UNLOCK(sb);
2423 sbunlock(sb);
2424
2425 /*
2426 * Dispose of special rights and flush the copied socket. Don't call
2427 * any unsafe routines (that rely on locks being initialized) on aso.
2428 */
2429 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
2430 (*pr->pr_domain->dom_dispose)(&aso);
2431 sbrelease_internal(&aso.so_rcv, so);
2432 }
2433
2434 /*
2435 * Wrapper for Socket established helper hook.
2436 * Parameters: socket, context of the hook point, hook id.
2437 */
2438 static int inline
2439 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id)
2440 {
2441 struct socket_hhook_data hhook_data = {
2442 .so = so,
2443 .hctx = hctx,
2444 .m = NULL,
2445 .status = 0
2446 };
2447
2448 CURVNET_SET(so->so_vnet);
2449 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd);
2450 CURVNET_RESTORE();
2451
2452 /* Ugly but needed, since hhooks return void for now */
2453 return (hhook_data.status);
2454 }
2455
2456 /*
2457 * Perhaps this routine, and sooptcopyout(), below, ought to come in an
2458 * additional variant to handle the case where the option value needs to be
2459 * some kind of integer, but not a specific size. In addition to their use
2460 * here, these functions are also called by the protocol-level pr_ctloutput()
2461 * routines.
2462 */
2463 int
2464 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
2465 {
2466 size_t valsize;
2467
2468 /*
2469 * If the user gives us more than we wanted, we ignore it, but if we
2470 * don't get the minimum length the caller wants, we return EINVAL.
2471 * On success, sopt->sopt_valsize is set to however much we actually
2472 * retrieved.
2473 */
2474 if ((valsize = sopt->sopt_valsize) < minlen)
2475 return EINVAL;
2476 if (valsize > len)
2477 sopt->sopt_valsize = valsize = len;
2478
2479 if (sopt->sopt_td != NULL)
2480 return (copyin(sopt->sopt_val, buf, valsize));
2481
2482 bcopy(sopt->sopt_val, buf, valsize);
2483 return (0);
2484 }
2485
2486 /*
2487 * Kernel version of setsockopt(2).
2488 *
2489 * XXX: optlen is size_t, not socklen_t
2490 */
2491 int
2492 so_setsockopt(struct socket *so, int level, int optname, void *optval,
2493 size_t optlen)
2494 {
2495 struct sockopt sopt;
2496
2497 sopt.sopt_level = level;
2498 sopt.sopt_name = optname;
2499 sopt.sopt_dir = SOPT_SET;
2500 sopt.sopt_val = optval;
2501 sopt.sopt_valsize = optlen;
2502 sopt.sopt_td = NULL;
2503 return (sosetopt(so, &sopt));
2504 }
2505
2506 int
2507 sosetopt(struct socket *so, struct sockopt *sopt)
2508 {
2509 int error, optval;
2510 struct linger l;
2511 struct timeval tv;
2512 sbintime_t val;
2513 uint32_t val32;
2514 #ifdef MAC
2515 struct mac extmac;
2516 #endif
2517
2518 CURVNET_SET(so->so_vnet);
2519 error = 0;
2520 if (sopt->sopt_level != SOL_SOCKET) {
2521 if (so->so_proto->pr_ctloutput != NULL) {
2522 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2523 CURVNET_RESTORE();
2524 return (error);
2525 }
2526 error = ENOPROTOOPT;
2527 } else {
2528 switch (sopt->sopt_name) {
2529 case SO_ACCEPTFILTER:
2530 error = do_setopt_accept_filter(so, sopt);
2531 if (error)
2532 goto bad;
2533 break;
2534
2535 case SO_LINGER:
2536 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
2537 if (error)
2538 goto bad;
2539 if (l.l_linger < 0 ||
2540 l.l_linger > USHRT_MAX ||
2541 l.l_linger > (INT_MAX / hz)) {
2542 error = EDOM;
2543 goto bad;
2544 }
2545 SOCK_LOCK(so);
2546 so->so_linger = l.l_linger;
2547 if (l.l_onoff)
2548 so->so_options |= SO_LINGER;
2549 else
2550 so->so_options &= ~SO_LINGER;
2551 SOCK_UNLOCK(so);
2552 break;
2553
2554 case SO_DEBUG:
2555 case SO_KEEPALIVE:
2556 case SO_DONTROUTE:
2557 case SO_USELOOPBACK:
2558 case SO_BROADCAST:
2559 case SO_REUSEADDR:
2560 case SO_REUSEPORT:
2561 case SO_OOBINLINE:
2562 case SO_TIMESTAMP:
2563 case SO_BINTIME:
2564 case SO_NOSIGPIPE:
2565 case SO_NO_DDP:
2566 case SO_NO_OFFLOAD:
2567 error = sooptcopyin(sopt, &optval, sizeof optval,
2568 sizeof optval);
2569 if (error)
2570 goto bad;
2571 SOCK_LOCK(so);
2572 if (optval)
2573 so->so_options |= sopt->sopt_name;
2574 else
2575 so->so_options &= ~sopt->sopt_name;
2576 SOCK_UNLOCK(so);
2577 break;
2578
2579 case SO_SETFIB:
2580 error = sooptcopyin(sopt, &optval, sizeof optval,
2581 sizeof optval);
2582 if (error)
2583 goto bad;
2584
2585 if (optval < 0 || optval >= rt_numfibs) {
2586 error = EINVAL;
2587 goto bad;
2588 }
2589 if (((so->so_proto->pr_domain->dom_family == PF_INET) ||
2590 (so->so_proto->pr_domain->dom_family == PF_INET6) ||
2591 (so->so_proto->pr_domain->dom_family == PF_ROUTE)))
2592 so->so_fibnum = optval;
2593 else
2594 so->so_fibnum = 0;
2595 break;
2596
2597 case SO_USER_COOKIE:
2598 error = sooptcopyin(sopt, &val32, sizeof val32,
2599 sizeof val32);
2600 if (error)
2601 goto bad;
2602 so->so_user_cookie = val32;
2603 break;
2604
2605 case SO_SNDBUF:
2606 case SO_RCVBUF:
2607 case SO_SNDLOWAT:
2608 case SO_RCVLOWAT:
2609 error = sooptcopyin(sopt, &optval, sizeof optval,
2610 sizeof optval);
2611 if (error)
2612 goto bad;
2613
2614 /*
2615 * Values < 1 make no sense for any of these options,
2616 * so disallow them.
2617 */
2618 if (optval < 1) {
2619 error = EINVAL;
2620 goto bad;
2621 }
2622
2623 switch (sopt->sopt_name) {
2624 case SO_SNDBUF:
2625 case SO_RCVBUF:
2626 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
2627 &so->so_snd : &so->so_rcv, (u_long)optval,
2628 so, curthread) == 0) {
2629 error = ENOBUFS;
2630 goto bad;
2631 }
2632 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
2633 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
2634 break;
2635
2636 /*
2637 * Make sure the low-water is never greater than the
2638 * high-water.
2639 */
2640 case SO_SNDLOWAT:
2641 SOCKBUF_LOCK(&so->so_snd);
2642 so->so_snd.sb_lowat =
2643 (optval > so->so_snd.sb_hiwat) ?
2644 so->so_snd.sb_hiwat : optval;
2645 SOCKBUF_UNLOCK(&so->so_snd);
2646 break;
2647 case SO_RCVLOWAT:
2648 SOCKBUF_LOCK(&so->so_rcv);
2649 so->so_rcv.sb_lowat =
2650 (optval > so->so_rcv.sb_hiwat) ?
2651 so->so_rcv.sb_hiwat : optval;
2652 SOCKBUF_UNLOCK(&so->so_rcv);
2653 break;
2654 }
2655 break;
2656
2657 case SO_SNDTIMEO:
2658 case SO_RCVTIMEO:
2659 #ifdef COMPAT_FREEBSD32
2660 if (SV_CURPROC_FLAG(SV_ILP32)) {
2661 struct timeval32 tv32;
2662
2663 error = sooptcopyin(sopt, &tv32, sizeof tv32,
2664 sizeof tv32);
2665 CP(tv32, tv, tv_sec);
2666 CP(tv32, tv, tv_usec);
2667 } else
2668 #endif
2669 error = sooptcopyin(sopt, &tv, sizeof tv,
2670 sizeof tv);
2671 if (error)
2672 goto bad;
2673 if (tv.tv_sec < 0 || tv.tv_usec < 0 ||
2674 tv.tv_usec >= 1000000) {
2675 error = EDOM;
2676 goto bad;
2677 }
2678 if (tv.tv_sec > INT32_MAX)
2679 val = SBT_MAX;
2680 else
2681 val = tvtosbt(tv);
2682 switch (sopt->sopt_name) {
2683 case SO_SNDTIMEO:
2684 so->so_snd.sb_timeo = val;
2685 break;
2686 case SO_RCVTIMEO:
2687 so->so_rcv.sb_timeo = val;
2688 break;
2689 }
2690 break;
2691
2692 case SO_LABEL:
2693 #ifdef MAC
2694 error = sooptcopyin(sopt, &extmac, sizeof extmac,
2695 sizeof extmac);
2696 if (error)
2697 goto bad;
2698 error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
2699 so, &extmac);
2700 #else
2701 error = EOPNOTSUPP;
2702 #endif
2703 break;
2704
2705 case SO_TS_CLOCK:
2706 error = sooptcopyin(sopt, &optval, sizeof optval,
2707 sizeof optval);
2708 if (error)
2709 goto bad;
2710 if (optval < 0 || optval > SO_TS_CLOCK_MAX) {
2711 error = EINVAL;
2712 goto bad;
2713 }
2714 so->so_ts_clock = optval;
2715 break;
2716
2717 default:
2718 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2719 error = hhook_run_socket(so, sopt,
2720 HHOOK_SOCKET_OPT);
2721 else
2722 error = ENOPROTOOPT;
2723 break;
2724 }
2725 if (error == 0 && so->so_proto->pr_ctloutput != NULL)
2726 (void)(*so->so_proto->pr_ctloutput)(so, sopt);
2727 }
2728 bad:
2729 CURVNET_RESTORE();
2730 return (error);
2731 }
2732
2733 /*
2734 * Helper routine for getsockopt.
2735 */
2736 int
2737 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
2738 {
2739 int error;
2740 size_t valsize;
2741
2742 error = 0;
2743
2744 /*
2745 * Documented get behavior is that we always return a value, possibly
2746 * truncated to fit in the user's buffer. Traditional behavior is
2747 * that we always tell the user precisely how much we copied, rather
2748 * than something useful like the total amount we had available for
2749 * her. Note that this interface is not idempotent; the entire
2750 * answer must be generated ahead of time.
2751 */
2752 valsize = min(len, sopt->sopt_valsize);
2753 sopt->sopt_valsize = valsize;
2754 if (sopt->sopt_val != NULL) {
2755 if (sopt->sopt_td != NULL)
2756 error = copyout(buf, sopt->sopt_val, valsize);
2757 else
2758 bcopy(buf, sopt->sopt_val, valsize);
2759 }
2760 return (error);
2761 }
2762
2763 int
2764 sogetopt(struct socket *so, struct sockopt *sopt)
2765 {
2766 int error, optval;
2767 struct linger l;
2768 struct timeval tv;
2769 #ifdef MAC
2770 struct mac extmac;
2771 #endif
2772
2773 CURVNET_SET(so->so_vnet);
2774 error = 0;
2775 if (sopt->sopt_level != SOL_SOCKET) {
2776 if (so->so_proto->pr_ctloutput != NULL)
2777 error = (*so->so_proto->pr_ctloutput)(so, sopt);
2778 else
2779 error = ENOPROTOOPT;
2780 CURVNET_RESTORE();
2781 return (error);
2782 } else {
2783 switch (sopt->sopt_name) {
2784 case SO_ACCEPTFILTER:
2785 error = do_getopt_accept_filter(so, sopt);
2786 break;
2787
2788 case SO_LINGER:
2789 SOCK_LOCK(so);
2790 l.l_onoff = so->so_options & SO_LINGER;
2791 l.l_linger = so->so_linger;
2792 SOCK_UNLOCK(so);
2793 error = sooptcopyout(sopt, &l, sizeof l);
2794 break;
2795
2796 case SO_USELOOPBACK:
2797 case SO_DONTROUTE:
2798 case SO_DEBUG:
2799 case SO_KEEPALIVE:
2800 case SO_REUSEADDR:
2801 case SO_REUSEPORT:
2802 case SO_BROADCAST:
2803 case SO_OOBINLINE:
2804 case SO_ACCEPTCONN:
2805 case SO_TIMESTAMP:
2806 case SO_BINTIME:
2807 case SO_NOSIGPIPE:
2808 case SO_NO_DDP:
2809 case SO_NO_OFFLOAD:
2810 optval = so->so_options & sopt->sopt_name;
2811 integer:
2812 error = sooptcopyout(sopt, &optval, sizeof optval);
2813 break;
2814
2815 case SO_TYPE:
2816 optval = so->so_type;
2817 goto integer;
2818
2819 case SO_PROTOCOL:
2820 optval = so->so_proto->pr_protocol;
2821 goto integer;
2822
2823 case SO_ERROR:
2824 SOCK_LOCK(so);
2825 optval = so->so_error;
2826 so->so_error = 0;
2827 SOCK_UNLOCK(so);
2828 goto integer;
2829
2830 case SO_SNDBUF:
2831 optval = so->so_snd.sb_hiwat;
2832 goto integer;
2833
2834 case SO_RCVBUF:
2835 optval = so->so_rcv.sb_hiwat;
2836 goto integer;
2837
2838 case SO_SNDLOWAT:
2839 optval = so->so_snd.sb_lowat;
2840 goto integer;
2841
2842 case SO_RCVLOWAT:
2843 optval = so->so_rcv.sb_lowat;
2844 goto integer;
2845
2846 case SO_SNDTIMEO:
2847 case SO_RCVTIMEO:
2848 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ?
2849 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
2850 #ifdef COMPAT_FREEBSD32
2851 if (SV_CURPROC_FLAG(SV_ILP32)) {
2852 struct timeval32 tv32;
2853
2854 CP(tv, tv32, tv_sec);
2855 CP(tv, tv32, tv_usec);
2856 error = sooptcopyout(sopt, &tv32, sizeof tv32);
2857 } else
2858 #endif
2859 error = sooptcopyout(sopt, &tv, sizeof tv);
2860 break;
2861
2862 case SO_LABEL:
2863 #ifdef MAC
2864 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2865 sizeof(extmac));
2866 if (error)
2867 goto bad;
2868 error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
2869 so, &extmac);
2870 if (error)
2871 goto bad;
2872 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2873 #else
2874 error = EOPNOTSUPP;
2875 #endif
2876 break;
2877
2878 case SO_PEERLABEL:
2879 #ifdef MAC
2880 error = sooptcopyin(sopt, &extmac, sizeof(extmac),
2881 sizeof(extmac));
2882 if (error)
2883 goto bad;
2884 error = mac_getsockopt_peerlabel(
2885 sopt->sopt_td->td_ucred, so, &extmac);
2886 if (error)
2887 goto bad;
2888 error = sooptcopyout(sopt, &extmac, sizeof extmac);
2889 #else
2890 error = EOPNOTSUPP;
2891 #endif
2892 break;
2893
2894 case SO_LISTENQLIMIT:
2895 optval = so->so_qlimit;
2896 goto integer;
2897
2898 case SO_LISTENQLEN:
2899 optval = so->so_qlen;
2900 goto integer;
2901
2902 case SO_LISTENINCQLEN:
2903 optval = so->so_incqlen;
2904 goto integer;
2905
2906 case SO_TS_CLOCK:
2907 optval = so->so_ts_clock;
2908 goto integer;
2909
2910 default:
2911 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0)
2912 error = hhook_run_socket(so, sopt,
2913 HHOOK_SOCKET_OPT);
2914 else
2915 error = ENOPROTOOPT;
2916 break;
2917 }
2918 }
2919 #ifdef MAC
2920 bad:
2921 #endif
2922 CURVNET_RESTORE();
2923 return (error);
2924 }
2925
2926 int
2927 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
2928 {
2929 struct mbuf *m, *m_prev;
2930 int sopt_size = sopt->sopt_valsize;
2931
2932 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2933 if (m == NULL)
2934 return ENOBUFS;
2935 if (sopt_size > MLEN) {
2936 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT);
2937 if ((m->m_flags & M_EXT) == 0) {
2938 m_free(m);
2939 return ENOBUFS;
2940 }
2941 m->m_len = min(MCLBYTES, sopt_size);
2942 } else {
2943 m->m_len = min(MLEN, sopt_size);
2944 }
2945 sopt_size -= m->m_len;
2946 *mp = m;
2947 m_prev = m;
2948
2949 while (sopt_size) {
2950 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA);
2951 if (m == NULL) {
2952 m_freem(*mp);
2953 return ENOBUFS;
2954 }
2955 if (sopt_size > MLEN) {
2956 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK :
2957 M_NOWAIT);
2958 if ((m->m_flags & M_EXT) == 0) {
2959 m_freem(m);
2960 m_freem(*mp);
2961 return ENOBUFS;
2962 }
2963 m->m_len = min(MCLBYTES, sopt_size);
2964 } else {
2965 m->m_len = min(MLEN, sopt_size);
2966 }
2967 sopt_size -= m->m_len;
2968 m_prev->m_next = m;
2969 m_prev = m;
2970 }
2971 return (0);
2972 }
2973
2974 int
2975 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2976 {
2977 struct mbuf *m0 = m;
2978
2979 if (sopt->sopt_val == NULL)
2980 return (0);
2981 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2982 if (sopt->sopt_td != NULL) {
2983 int error;
2984
2985 error = copyin(sopt->sopt_val, mtod(m, char *),
2986 m->m_len);
2987 if (error != 0) {
2988 m_freem(m0);
2989 return(error);
2990 }
2991 } else
2992 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2993 sopt->sopt_valsize -= m->m_len;
2994 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2995 m = m->m_next;
2996 }
2997 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2998 panic("ip6_sooptmcopyin");
2999 return (0);
3000 }
3001
3002 int
3003 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
3004 {
3005 struct mbuf *m0 = m;
3006 size_t valsize = 0;
3007
3008 if (sopt->sopt_val == NULL)
3009 return (0);
3010 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
3011 if (sopt->sopt_td != NULL) {
3012 int error;
3013
3014 error = copyout(mtod(m, char *), sopt->sopt_val,
3015 m->m_len);
3016 if (error != 0) {
3017 m_freem(m0);
3018 return(error);
3019 }
3020 } else
3021 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
3022 sopt->sopt_valsize -= m->m_len;
3023 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
3024 valsize += m->m_len;
3025 m = m->m_next;
3026 }
3027 if (m != NULL) {
3028 /* enough soopt buffer should be given from user-land */
3029 m_freem(m0);
3030 return(EINVAL);
3031 }
3032 sopt->sopt_valsize = valsize;
3033 return (0);
3034 }
3035
3036 /*
3037 * sohasoutofband(): protocol notifies socket layer of the arrival of new
3038 * out-of-band data, which will then notify socket consumers.
3039 */
3040 void
3041 sohasoutofband(struct socket *so)
3042 {
3043
3044 if (so->so_sigio != NULL)
3045 pgsigio(&so->so_sigio, SIGURG, 0);
3046 selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
3047 }
3048
3049 int
3050 sopoll(struct socket *so, int events, struct ucred *active_cred,
3051 struct thread *td)
3052 {
3053
3054 /*
3055 * We do not need to set or assert curvnet as long as everyone uses
3056 * sopoll_generic().
3057 */
3058 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
3059 td));
3060 }
3061
3062 int
3063 sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
3064 struct thread *td)
3065 {
3066 int revents = 0;
3067
3068 SOCKBUF_LOCK(&so->so_snd);
3069 SOCKBUF_LOCK(&so->so_rcv);
3070 if (events & (POLLIN | POLLRDNORM))
3071 if (soreadabledata(so))
3072 revents |= events & (POLLIN | POLLRDNORM);
3073
3074 if (events & (POLLOUT | POLLWRNORM))
3075 if (sowriteable(so))
3076 revents |= events & (POLLOUT | POLLWRNORM);
3077
3078 if (events & (POLLPRI | POLLRDBAND))
3079 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
3080 revents |= events & (POLLPRI | POLLRDBAND);
3081
3082 if ((events & POLLINIGNEOF) == 0) {
3083 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3084 revents |= events & (POLLIN | POLLRDNORM);
3085 if (so->so_snd.sb_state & SBS_CANTSENDMORE)
3086 revents |= POLLHUP;
3087 }
3088 }
3089
3090 if (revents == 0) {
3091 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
3092 selrecord(td, &so->so_rcv.sb_sel);
3093 so->so_rcv.sb_flags |= SB_SEL;
3094 }
3095
3096 if (events & (POLLOUT | POLLWRNORM)) {
3097 selrecord(td, &so->so_snd.sb_sel);
3098 so->so_snd.sb_flags |= SB_SEL;
3099 }
3100 }
3101
3102 SOCKBUF_UNLOCK(&so->so_rcv);
3103 SOCKBUF_UNLOCK(&so->so_snd);
3104 return (revents);
3105 }
3106
3107 int
3108 soo_kqfilter(struct file *fp, struct knote *kn)
3109 {
3110 struct socket *so = kn->kn_fp->f_data;
3111 struct sockbuf *sb;
3112
3113 switch (kn->kn_filter) {
3114 case EVFILT_READ:
3115 if (so->so_options & SO_ACCEPTCONN)
3116 kn->kn_fop = &solisten_filtops;
3117 else
3118 kn->kn_fop = &soread_filtops;
3119 sb = &so->so_rcv;
3120 break;
3121 case EVFILT_WRITE:
3122 kn->kn_fop = &sowrite_filtops;
3123 sb = &so->so_snd;
3124 break;
3125 default:
3126 return (EINVAL);
3127 }
3128
3129 SOCKBUF_LOCK(sb);
3130 knlist_add(&sb->sb_sel.si_note, kn, 1);
3131 sb->sb_flags |= SB_KNOTE;
3132 SOCKBUF_UNLOCK(sb);
3133 return (0);
3134 }
3135
3136 /*
3137 * Some routines that return EOPNOTSUPP for entry points that are not
3138 * supported by a protocol. Fill in as needed.
3139 */
3140 int
3141 pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
3142 {
3143
3144 return EOPNOTSUPP;
3145 }
3146
3147 int
3148 pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job)
3149 {
3150
3151 return EOPNOTSUPP;
3152 }
3153
3154 int
3155 pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
3156 {
3157
3158 return EOPNOTSUPP;
3159 }
3160
3161 int
3162 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3163 {
3164
3165 return EOPNOTSUPP;
3166 }
3167
3168 int
3169 pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3170 struct thread *td)
3171 {
3172
3173 return EOPNOTSUPP;
3174 }
3175
3176 int
3177 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
3178 {
3179
3180 return EOPNOTSUPP;
3181 }
3182
3183 int
3184 pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam,
3185 struct thread *td)
3186 {
3187
3188 return EOPNOTSUPP;
3189 }
3190
3191 int
3192 pru_connect2_notsupp(struct socket *so1, struct socket *so2)
3193 {
3194
3195 return EOPNOTSUPP;
3196 }
3197
3198 int
3199 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
3200 struct ifnet *ifp, struct thread *td)
3201 {
3202
3203 return EOPNOTSUPP;
3204 }
3205
3206 int
3207 pru_disconnect_notsupp(struct socket *so)
3208 {
3209
3210 return EOPNOTSUPP;
3211 }
3212
3213 int
3214 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
3215 {
3216
3217 return EOPNOTSUPP;
3218 }
3219
3220 int
3221 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
3222 {
3223
3224 return EOPNOTSUPP;
3225 }
3226
3227 int
3228 pru_rcvd_notsupp(struct socket *so, int flags)
3229 {
3230
3231 return EOPNOTSUPP;
3232 }
3233
3234 int
3235 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
3236 {
3237
3238 return EOPNOTSUPP;
3239 }
3240
3241 int
3242 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
3243 struct sockaddr *addr, struct mbuf *control, struct thread *td)
3244 {
3245
3246 return EOPNOTSUPP;
3247 }
3248
3249 int
3250 pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
3251 {
3252
3253 return (EOPNOTSUPP);
3254 }
3255
3256 /*
3257 * This isn't really a ``null'' operation, but it's the default one and
3258 * doesn't do anything destructive.
3259 */
3260 int
3261 pru_sense_null(struct socket *so, struct stat *sb)
3262 {
3263
3264 sb->st_blksize = so->so_snd.sb_hiwat;
3265 return 0;
3266 }
3267
3268 int
3269 pru_shutdown_notsupp(struct socket *so)
3270 {
3271
3272 return EOPNOTSUPP;
3273 }
3274
3275 int
3276 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
3277 {
3278
3279 return EOPNOTSUPP;
3280 }
3281
3282 int
3283 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
3284 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
3285 {
3286
3287 return EOPNOTSUPP;
3288 }
3289
3290 int
3291 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
3292 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
3293 {
3294
3295 return EOPNOTSUPP;
3296 }
3297
3298 int
3299 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
3300 struct thread *td)
3301 {
3302
3303 return EOPNOTSUPP;
3304 }
3305
3306 static void
3307 filt_sordetach(struct knote *kn)
3308 {
3309 struct socket *so = kn->kn_fp->f_data;
3310
3311 SOCKBUF_LOCK(&so->so_rcv);
3312 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
3313 if (knlist_empty(&so->so_rcv.sb_sel.si_note))
3314 so->so_rcv.sb_flags &= ~SB_KNOTE;
3315 SOCKBUF_UNLOCK(&so->so_rcv);
3316 }
3317
3318 /*ARGSUSED*/
3319 static int
3320 filt_soread(struct knote *kn, long hint)
3321 {
3322 struct socket *so;
3323
3324 so = kn->kn_fp->f_data;
3325 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
3326
3327 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
3328 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
3329 kn->kn_flags |= EV_EOF;
3330 kn->kn_fflags = so->so_error;
3331 return (1);
3332 } else if (so->so_error) /* temporary udp error */
3333 return (1);
3334
3335 if (kn->kn_sfflags & NOTE_LOWAT) {
3336 if (kn->kn_data >= kn->kn_sdata)
3337 return 1;
3338 } else {
3339 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
3340 return 1;
3341 }
3342
3343 /* This hook returning non-zero indicates an event, not error */
3344 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD));
3345 }
3346
3347 static void
3348 filt_sowdetach(struct knote *kn)
3349 {
3350 struct socket *so = kn->kn_fp->f_data;
3351
3352 SOCKBUF_LOCK(&so->so_snd);
3353 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
3354 if (knlist_empty(&so->so_snd.sb_sel.si_note))
3355 so->so_snd.sb_flags &= ~SB_KNOTE;
3356 SOCKBUF_UNLOCK(&so->so_snd);
3357 }
3358
3359 /*ARGSUSED*/
3360 static int
3361 filt_sowrite(struct knote *kn, long hint)
3362 {
3363 struct socket *so;
3364
3365 so = kn->kn_fp->f_data;
3366 SOCKBUF_LOCK_ASSERT(&so->so_snd);
3367 kn->kn_data = sbspace(&so->so_snd);
3368
3369 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE);
3370
3371 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
3372 kn->kn_flags |= EV_EOF;
3373 kn->kn_fflags = so->so_error;
3374 return (1);
3375 } else if (so->so_error) /* temporary udp error */
3376 return (1);
3377 else if (((so->so_state & SS_ISCONNECTED) == 0) &&
3378 (so->so_proto->pr_flags & PR_CONNREQUIRED))
3379 return (0);
3380 else if (kn->kn_sfflags & NOTE_LOWAT)
3381 return (kn->kn_data >= kn->kn_sdata);
3382 else
3383 return (kn->kn_data >= so->so_snd.sb_lowat);
3384 }
3385
3386 /*ARGSUSED*/
3387 static int
3388 filt_solisten(struct knote *kn, long hint)
3389 {
3390 struct socket *so = kn->kn_fp->f_data;
3391
3392 kn->kn_data = so->so_qlen;
3393 return (!TAILQ_EMPTY(&so->so_comp));
3394 }
3395
3396 int
3397 socheckuid(struct socket *so, uid_t uid)
3398 {
3399
3400 if (so == NULL)
3401 return (EPERM);
3402 if (so->so_cred->cr_uid != uid)
3403 return (EPERM);
3404 return (0);
3405 }
3406
3407 /*
3408 * These functions are used by protocols to notify the socket layer (and its
3409 * consumers) of state changes in the sockets driven by protocol-side events.
3410 */
3411
3412 /*
3413 * Procedures to manipulate state flags of socket and do appropriate wakeups.
3414 *
3415 * Normal sequence from the active (originating) side is that
3416 * soisconnecting() is called during processing of connect() call, resulting
3417 * in an eventual call to soisconnected() if/when the connection is
3418 * established. When the connection is torn down soisdisconnecting() is
3419 * called during processing of disconnect() call, and soisdisconnected() is
3420 * called when the connection to the peer is totally severed. The semantics
3421 * of these routines are such that connectionless protocols can call
3422 * soisconnected() and soisdisconnected() only, bypassing the in-progress
3423 * calls when setting up a ``connection'' takes no time.
3424 *
3425 * From the passive side, a socket is created with two queues of sockets:
3426 * so_incomp for connections in progress and so_comp for connections already
3427 * made and awaiting user acceptance. As a protocol is preparing incoming
3428 * connections, it creates a socket structure queued on so_incomp by calling
3429 * sonewconn(). When the connection is established, soisconnected() is
3430 * called, and transfers the socket structure to so_comp, making it available
3431 * to accept().
3432 *
3433 * If a socket is closed with sockets on either so_incomp or so_comp, these
3434 * sockets are dropped.
3435 *
3436 * If higher-level protocols are implemented in the kernel, the wakeups done
3437 * here will sometimes cause software-interrupt process scheduling.
3438 */
3439 void
3440 soisconnecting(struct socket *so)
3441 {
3442
3443 SOCK_LOCK(so);
3444 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
3445 so->so_state |= SS_ISCONNECTING;
3446 SOCK_UNLOCK(so);
3447 }
3448
3449 void
3450 soisconnected(struct socket *so)
3451 {
3452 struct socket *head;
3453 int ret;
3454
3455 restart:
3456 ACCEPT_LOCK();
3457 SOCK_LOCK(so);
3458 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
3459 so->so_state |= SS_ISCONNECTED;
3460 head = so->so_head;
3461 if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
3462 if ((so->so_options & SO_ACCEPTFILTER) == 0) {
3463 SOCK_UNLOCK(so);
3464 TAILQ_REMOVE(&head->so_incomp, so, so_list);
3465 head->so_incqlen--;
3466 so->so_qstate &= ~SQ_INCOMP;
3467 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
3468 head->so_qlen++;
3469 so->so_qstate |= SQ_COMP;
3470 ACCEPT_UNLOCK();
3471 sorwakeup(head);
3472 wakeup_one(&head->so_timeo);
3473 } else {
3474 ACCEPT_UNLOCK();
3475 soupcall_set(so, SO_RCV,
3476 head->so_accf->so_accept_filter->accf_callback,
3477 head->so_accf->so_accept_filter_arg);
3478 so->so_options &= ~SO_ACCEPTFILTER;
3479 ret = head->so_accf->so_accept_filter->accf_callback(so,
3480 head->so_accf->so_accept_filter_arg, M_NOWAIT);
3481 if (ret == SU_ISCONNECTED)
3482 soupcall_clear(so, SO_RCV);
3483 SOCK_UNLOCK(so);
3484 if (ret == SU_ISCONNECTED)
3485 goto restart;
3486 }
3487 return;
3488 }
3489 SOCK_UNLOCK(so);
3490 ACCEPT_UNLOCK();
3491 wakeup(&so->so_timeo);
3492 sorwakeup(so);
3493 sowwakeup(so);
3494 }
3495
3496 void
3497 soisdisconnecting(struct socket *so)
3498 {
3499
3500 /*
3501 * Note: This code assumes that SOCK_LOCK(so) and
3502 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3503 */
3504 SOCKBUF_LOCK(&so->so_rcv);
3505 so->so_state &= ~SS_ISCONNECTING;
3506 so->so_state |= SS_ISDISCONNECTING;
3507 socantrcvmore_locked(so);
3508 SOCKBUF_LOCK(&so->so_snd);
3509 socantsendmore_locked(so);
3510 wakeup(&so->so_timeo);
3511 }
3512
3513 void
3514 soisdisconnected(struct socket *so)
3515 {
3516
3517 /*
3518 * Note: This code assumes that SOCK_LOCK(so) and
3519 * SOCKBUF_LOCK(&so->so_rcv) are the same.
3520 */
3521 SOCKBUF_LOCK(&so->so_rcv);
3522
3523 /*
3524 * There is at least one reader of so_state that does not
3525 * acquire socket lock, namely soreceive_generic(). Ensure
3526 * that it never sees all flags that track connection status
3527 * cleared, by ordering the update with a barrier semantic of
3528 * our release thread fence.
3529 */
3530 so->so_state |= SS_ISDISCONNECTED;
3531 atomic_thread_fence_rel();
3532 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
3533 socantrcvmore_locked(so);
3534 SOCKBUF_LOCK(&so->so_snd);
3535 sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
3536 socantsendmore_locked(so);
3537 wakeup(&so->so_timeo);
3538 }
3539
3540 /*
3541 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
3542 */
3543 struct sockaddr *
3544 sodupsockaddr(const struct sockaddr *sa, int mflags)
3545 {
3546 struct sockaddr *sa2;
3547
3548 sa2 = malloc(sa->sa_len, M_SONAME, mflags);
3549 if (sa2)
3550 bcopy(sa, sa2, sa->sa_len);
3551 return sa2;
3552 }
3553
3554 /*
3555 * Register per-socket buffer upcalls.
3556 */
3557 void
3558 soupcall_set(struct socket *so, int which,
3559 int (*func)(struct socket *, void *, int), void *arg)
3560 {
3561 struct sockbuf *sb;
3562
3563 switch (which) {
3564 case SO_RCV:
3565 sb = &so->so_rcv;
3566 break;
3567 case SO_SND:
3568 sb = &so->so_snd;
3569 break;
3570 default:
3571 panic("soupcall_set: bad which");
3572 }
3573 SOCKBUF_LOCK_ASSERT(sb);
3574 #if 0
3575 /* XXX: accf_http actually wants to do this on purpose. */
3576 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
3577 #endif
3578 sb->sb_upcall = func;
3579 sb->sb_upcallarg = arg;
3580 sb->sb_flags |= SB_UPCALL;
3581 }
3582
3583 void
3584 soupcall_clear(struct socket *so, int which)
3585 {
3586 struct sockbuf *sb;
3587
3588 switch (which) {
3589 case SO_RCV:
3590 sb = &so->so_rcv;
3591 break;
3592 case SO_SND:
3593 sb = &so->so_snd;
3594 break;
3595 default:
3596 panic("soupcall_clear: bad which");
3597 }
3598 SOCKBUF_LOCK_ASSERT(sb);
3599 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
3600 sb->sb_upcall = NULL;
3601 sb->sb_upcallarg = NULL;
3602 sb->sb_flags &= ~SB_UPCALL;
3603 }
3604
3605 /*
3606 * Create an external-format (``xsocket'') structure using the information in
3607 * the kernel-format socket structure pointed to by so. This is done to
3608 * reduce the spew of irrelevant information over this interface, to isolate
3609 * user code from changes in the kernel structure, and potentially to provide
3610 * information-hiding if we decide that some of this information should be
3611 * hidden from users.
3612 */
3613 void
3614 sotoxsocket(struct socket *so, struct xsocket *xso)
3615 {
3616
3617 bzero(xso, sizeof(*xso));
3618 xso->xso_len = sizeof *xso;
3619 xso->xso_so = so;
3620 xso->so_type = so->so_type;
3621 xso->so_options = so->so_options;
3622 xso->so_linger = so->so_linger;
3623 xso->so_state = so->so_state;
3624 xso->so_pcb = so->so_pcb;
3625 xso->xso_protocol = so->so_proto->pr_protocol;
3626 xso->xso_family = so->so_proto->pr_domain->dom_family;
3627 xso->so_qlen = so->so_qlen;
3628 xso->so_incqlen = so->so_incqlen;
3629 xso->so_qlimit = so->so_qlimit;
3630 xso->so_timeo = so->so_timeo;
3631 xso->so_error = so->so_error;
3632 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
3633 xso->so_oobmark = so->so_oobmark;
3634 sbtoxsockbuf(&so->so_snd, &xso->so_snd);
3635 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
3636 xso->so_uid = so->so_cred->cr_uid;
3637 }
3638
3639
3640 /*
3641 * Socket accessor functions to provide external consumers with
3642 * a safe interface to socket state
3643 *
3644 */
3645
3646 void
3647 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *),
3648 void *arg)
3649 {
3650
3651 TAILQ_FOREACH(so, &so->so_comp, so_list)
3652 func(so, arg);
3653 }
3654
3655 struct sockbuf *
3656 so_sockbuf_rcv(struct socket *so)
3657 {
3658
3659 return (&so->so_rcv);
3660 }
3661
3662 struct sockbuf *
3663 so_sockbuf_snd(struct socket *so)
3664 {
3665
3666 return (&so->so_snd);
3667 }
3668
3669 int
3670 so_state_get(const struct socket *so)
3671 {
3672
3673 return (so->so_state);
3674 }
3675
3676 void
3677 so_state_set(struct socket *so, int val)
3678 {
3679
3680 so->so_state = val;
3681 }
3682
3683 int
3684 so_options_get(const struct socket *so)
3685 {
3686
3687 return (so->so_options);
3688 }
3689
3690 void
3691 so_options_set(struct socket *so, int val)
3692 {
3693
3694 so->so_options = val;
3695 }
3696
3697 int
3698 so_error_get(const struct socket *so)
3699 {
3700
3701 return (so->so_error);
3702 }
3703
3704 void
3705 so_error_set(struct socket *so, int val)
3706 {
3707
3708 so->so_error = val;
3709 }
3710
3711 int
3712 so_linger_get(const struct socket *so)
3713 {
3714
3715 return (so->so_linger);
3716 }
3717
3718 void
3719 so_linger_set(struct socket *so, int val)
3720 {
3721
3722 KASSERT(val >= 0 && val <= USHRT_MAX && val <= (INT_MAX / hz),
3723 ("%s: val %d out of range", __func__, val));
3724
3725 so->so_linger = val;
3726 }
3727
3728 struct protosw *
3729 so_protosw_get(const struct socket *so)
3730 {
3731
3732 return (so->so_proto);
3733 }
3734
3735 void
3736 so_protosw_set(struct socket *so, struct protosw *val)
3737 {
3738
3739 so->so_proto = val;
3740 }
3741
3742 void
3743 so_sorwakeup(struct socket *so)
3744 {
3745
3746 sorwakeup(so);
3747 }
3748
3749 void
3750 so_sowwakeup(struct socket *so)
3751 {
3752
3753 sowwakeup(so);
3754 }
3755
3756 void
3757 so_sorwakeup_locked(struct socket *so)
3758 {
3759
3760 sorwakeup_locked(so);
3761 }
3762
3763 void
3764 so_sowwakeup_locked(struct socket *so)
3765 {
3766
3767 sowwakeup_locked(so);
3768 }
3769
3770 void
3771 so_lock(struct socket *so)
3772 {
3773
3774 SOCK_LOCK(so);
3775 }
3776
3777 void
3778 so_unlock(struct socket *so)
3779 {
3780
3781 SOCK_UNLOCK(so);
3782 }
Cache object: f69cb7a8958270fb730404d246cb139e
|