FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_socket.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)sys_socket.c 8.1 (Berkeley) 6/10/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/12.0/sys/kern/sys_socket.c 327844 2018-01-11 20:26:17Z tuexen $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/aio.h>
40 #include <sys/domain.h>
41 #include <sys/file.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/protosw.h>
48 #include <sys/sigio.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/filio.h> /* XXX */
54 #include <sys/sockio.h>
55 #include <sys/stat.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/taskqueue.h>
59 #include <sys/uio.h>
60 #include <sys/ucred.h>
61 #include <sys/un.h>
62 #include <sys/unpcb.h>
63 #include <sys/user.h>
64
65 #include <net/if.h>
66 #include <net/if_var.h>
67 #include <net/route.h>
68 #include <net/vnet.h>
69
70 #include <netinet/in.h>
71 #include <netinet/in_pcb.h>
72
73 #include <security/mac/mac_framework.h>
74
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_map.h>
79
80 static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD, NULL,
81 "socket AIO stats");
82
83 static int empty_results;
84 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results,
85 0, "socket operation returned EAGAIN");
86
87 static int empty_retries;
88 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries,
89 0, "socket operation retries");
90
91 static fo_rdwr_t soo_read;
92 static fo_rdwr_t soo_write;
93 static fo_ioctl_t soo_ioctl;
94 static fo_poll_t soo_poll;
95 extern fo_kqfilter_t soo_kqfilter;
96 static fo_stat_t soo_stat;
97 static fo_close_t soo_close;
98 static fo_fill_kinfo_t soo_fill_kinfo;
99 static fo_aio_queue_t soo_aio_queue;
100
101 static void soo_aio_cancel(struct kaiocb *job);
102
103 struct fileops socketops = {
104 .fo_read = soo_read,
105 .fo_write = soo_write,
106 .fo_truncate = invfo_truncate,
107 .fo_ioctl = soo_ioctl,
108 .fo_poll = soo_poll,
109 .fo_kqfilter = soo_kqfilter,
110 .fo_stat = soo_stat,
111 .fo_close = soo_close,
112 .fo_chmod = invfo_chmod,
113 .fo_chown = invfo_chown,
114 .fo_sendfile = invfo_sendfile,
115 .fo_fill_kinfo = soo_fill_kinfo,
116 .fo_aio_queue = soo_aio_queue,
117 .fo_flags = DFLAG_PASSABLE
118 };
119
120 static int
121 soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
122 int flags, struct thread *td)
123 {
124 struct socket *so = fp->f_data;
125 int error;
126
127 #ifdef MAC
128 error = mac_socket_check_receive(active_cred, so);
129 if (error)
130 return (error);
131 #endif
132 error = soreceive(so, 0, uio, 0, 0, 0);
133 return (error);
134 }
135
136 static int
137 soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
138 int flags, struct thread *td)
139 {
140 struct socket *so = fp->f_data;
141 int error;
142
143 #ifdef MAC
144 error = mac_socket_check_send(active_cred, so);
145 if (error)
146 return (error);
147 #endif
148 error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
149 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
150 PROC_LOCK(uio->uio_td->td_proc);
151 tdsignal(uio->uio_td, SIGPIPE);
152 PROC_UNLOCK(uio->uio_td->td_proc);
153 }
154 return (error);
155 }
156
157 static int
158 soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
159 struct thread *td)
160 {
161 struct socket *so = fp->f_data;
162 int error = 0;
163
164 switch (cmd) {
165 case FIONBIO:
166 SOCK_LOCK(so);
167 if (*(int *)data)
168 so->so_state |= SS_NBIO;
169 else
170 so->so_state &= ~SS_NBIO;
171 SOCK_UNLOCK(so);
172 break;
173
174 case FIOASYNC:
175 if (*(int *)data) {
176 SOCK_LOCK(so);
177 so->so_state |= SS_ASYNC;
178 if (SOLISTENING(so)) {
179 so->sol_sbrcv_flags |= SB_ASYNC;
180 so->sol_sbsnd_flags |= SB_ASYNC;
181 } else {
182 SOCKBUF_LOCK(&so->so_rcv);
183 so->so_rcv.sb_flags |= SB_ASYNC;
184 SOCKBUF_UNLOCK(&so->so_rcv);
185 SOCKBUF_LOCK(&so->so_snd);
186 so->so_snd.sb_flags |= SB_ASYNC;
187 SOCKBUF_UNLOCK(&so->so_snd);
188 }
189 SOCK_UNLOCK(so);
190 } else {
191 SOCK_LOCK(so);
192 so->so_state &= ~SS_ASYNC;
193 if (SOLISTENING(so)) {
194 so->sol_sbrcv_flags &= ~SB_ASYNC;
195 so->sol_sbsnd_flags &= ~SB_ASYNC;
196 } else {
197 SOCKBUF_LOCK(&so->so_rcv);
198 so->so_rcv.sb_flags &= ~SB_ASYNC;
199 SOCKBUF_UNLOCK(&so->so_rcv);
200 SOCKBUF_LOCK(&so->so_snd);
201 so->so_snd.sb_flags &= ~SB_ASYNC;
202 SOCKBUF_UNLOCK(&so->so_snd);
203 }
204 SOCK_UNLOCK(so);
205 }
206 break;
207
208 case FIONREAD:
209 /* Unlocked read. */
210 *(int *)data = sbavail(&so->so_rcv);
211 break;
212
213 case FIONWRITE:
214 /* Unlocked read. */
215 *(int *)data = sbavail(&so->so_snd);
216 break;
217
218 case FIONSPACE:
219 /* Unlocked read. */
220 if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
221 (so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt))
222 *(int *)data = 0;
223 else
224 *(int *)data = sbspace(&so->so_snd);
225 break;
226
227 case FIOSETOWN:
228 error = fsetown(*(int *)data, &so->so_sigio);
229 break;
230
231 case FIOGETOWN:
232 *(int *)data = fgetown(&so->so_sigio);
233 break;
234
235 case SIOCSPGRP:
236 error = fsetown(-(*(int *)data), &so->so_sigio);
237 break;
238
239 case SIOCGPGRP:
240 *(int *)data = -fgetown(&so->so_sigio);
241 break;
242
243 case SIOCATMARK:
244 /* Unlocked read. */
245 *(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
246 break;
247 default:
248 /*
249 * Interface/routing/protocol specific ioctls: interface and
250 * routing ioctls should have a different entry since a
251 * socket is unnecessary.
252 */
253 if (IOCGROUP(cmd) == 'i')
254 error = ifioctl(so, cmd, data, td);
255 else if (IOCGROUP(cmd) == 'r') {
256 CURVNET_SET(so->so_vnet);
257 error = rtioctl_fib(cmd, data, so->so_fibnum);
258 CURVNET_RESTORE();
259 } else {
260 CURVNET_SET(so->so_vnet);
261 error = ((*so->so_proto->pr_usrreqs->pru_control)
262 (so, cmd, data, 0, td));
263 CURVNET_RESTORE();
264 }
265 break;
266 }
267 return (error);
268 }
269
270 static int
271 soo_poll(struct file *fp, int events, struct ucred *active_cred,
272 struct thread *td)
273 {
274 struct socket *so = fp->f_data;
275 #ifdef MAC
276 int error;
277
278 error = mac_socket_check_poll(active_cred, so);
279 if (error)
280 return (error);
281 #endif
282 return (sopoll(so, events, fp->f_cred, td));
283 }
284
285 static int
286 soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
287 struct thread *td)
288 {
289 struct socket *so = fp->f_data;
290 #ifdef MAC
291 int error;
292 #endif
293
294 bzero((caddr_t)ub, sizeof (*ub));
295 ub->st_mode = S_IFSOCK;
296 #ifdef MAC
297 error = mac_socket_check_stat(active_cred, so);
298 if (error)
299 return (error);
300 #endif
301 if (!SOLISTENING(so)) {
302 struct sockbuf *sb;
303
304 /*
305 * If SBS_CANTRCVMORE is set, but there's still data left
306 * in the receive buffer, the socket is still readable.
307 */
308 sb = &so->so_rcv;
309 SOCKBUF_LOCK(sb);
310 if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
311 ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
312 ub->st_size = sbavail(sb) - sb->sb_ctl;
313 SOCKBUF_UNLOCK(sb);
314
315 sb = &so->so_snd;
316 SOCKBUF_LOCK(sb);
317 if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
318 ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
319 SOCKBUF_UNLOCK(sb);
320 }
321 ub->st_uid = so->so_cred->cr_uid;
322 ub->st_gid = so->so_cred->cr_gid;
323 return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
324 }
325
326 /*
327 * API socket close on file pointer. We call soclose() to close the socket
328 * (including initiating closing protocols). soclose() will sorele() the
329 * file reference but the actual socket will not go away until the socket's
330 * ref count hits 0.
331 */
332 static int
333 soo_close(struct file *fp, struct thread *td)
334 {
335 int error = 0;
336 struct socket *so;
337
338 so = fp->f_data;
339 fp->f_ops = &badfileops;
340 fp->f_data = NULL;
341
342 if (so)
343 error = soclose(so);
344 return (error);
345 }
346
347 static int
348 soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
349 {
350 struct sockaddr *sa;
351 struct inpcb *inpcb;
352 struct unpcb *unpcb;
353 struct socket *so;
354 int error;
355
356 kif->kf_type = KF_TYPE_SOCKET;
357 so = fp->f_data;
358 CURVNET_SET(so->so_vnet);
359 kif->kf_un.kf_sock.kf_sock_domain0 =
360 so->so_proto->pr_domain->dom_family;
361 kif->kf_un.kf_sock.kf_sock_type0 = so->so_type;
362 kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol;
363 kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
364 switch (kif->kf_un.kf_sock.kf_sock_domain0) {
365 case AF_INET:
366 case AF_INET6:
367 if (kif->kf_un.kf_sock.kf_sock_protocol0 == IPPROTO_TCP) {
368 if (so->so_pcb != NULL) {
369 inpcb = (struct inpcb *)(so->so_pcb);
370 kif->kf_un.kf_sock.kf_sock_inpcb =
371 (uintptr_t)inpcb->inp_ppcb;
372 kif->kf_un.kf_sock.kf_sock_sendq =
373 sbused(&so->so_snd);
374 kif->kf_un.kf_sock.kf_sock_recvq =
375 sbused(&so->so_rcv);
376 }
377 }
378 break;
379 case AF_UNIX:
380 if (so->so_pcb != NULL) {
381 unpcb = (struct unpcb *)(so->so_pcb);
382 if (unpcb->unp_conn) {
383 kif->kf_un.kf_sock.kf_sock_unpconn =
384 (uintptr_t)unpcb->unp_conn;
385 kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
386 so->so_rcv.sb_state;
387 kif->kf_un.kf_sock.kf_sock_snd_sb_state =
388 so->so_snd.sb_state;
389 kif->kf_un.kf_sock.kf_sock_sendq =
390 sbused(&so->so_snd);
391 kif->kf_un.kf_sock.kf_sock_recvq =
392 sbused(&so->so_rcv);
393 }
394 }
395 break;
396 }
397 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
398 if (error == 0 &&
399 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) {
400 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len);
401 free(sa, M_SONAME);
402 }
403 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
404 if (error == 0 &&
405 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) {
406 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len);
407 free(sa, M_SONAME);
408 }
409 strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
410 sizeof(kif->kf_path));
411 CURVNET_RESTORE();
412 return (0);
413 }
414
415 /*
416 * Use the 'backend3' field in AIO jobs to store the amount of data
417 * completed by the AIO job so far.
418 */
419 #define aio_done backend3
420
421 static STAILQ_HEAD(, task) soaio_jobs;
422 static struct mtx soaio_jobs_lock;
423 static struct task soaio_kproc_task;
424 static int soaio_starting, soaio_idle, soaio_queued;
425 static struct unrhdr *soaio_kproc_unr;
426
427 static int soaio_max_procs = MAX_AIO_PROCS;
428 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0,
429 "Maximum number of kernel processes to use for async socket IO");
430
431 static int soaio_num_procs;
432 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0,
433 "Number of active kernel processes for async socket IO");
434
435 static int soaio_target_procs = TARGET_AIO_PROCS;
436 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD,
437 &soaio_target_procs, 0,
438 "Preferred number of ready kernel processes for async socket IO");
439
440 static int soaio_lifetime;
441 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0,
442 "Maximum lifetime for idle aiod");
443
444 static void
445 soaio_kproc_loop(void *arg)
446 {
447 struct proc *p;
448 struct vmspace *myvm;
449 struct task *task;
450 int error, id, pending;
451
452 id = (intptr_t)arg;
453
454 /*
455 * Grab an extra reference on the daemon's vmspace so that it
456 * doesn't get freed by jobs that switch to a different
457 * vmspace.
458 */
459 p = curproc;
460 myvm = vmspace_acquire_ref(p);
461
462 mtx_lock(&soaio_jobs_lock);
463 MPASS(soaio_starting > 0);
464 soaio_starting--;
465 for (;;) {
466 while (!STAILQ_EMPTY(&soaio_jobs)) {
467 task = STAILQ_FIRST(&soaio_jobs);
468 STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
469 soaio_queued--;
470 pending = task->ta_pending;
471 task->ta_pending = 0;
472 mtx_unlock(&soaio_jobs_lock);
473
474 task->ta_func(task->ta_context, pending);
475
476 mtx_lock(&soaio_jobs_lock);
477 }
478 MPASS(soaio_queued == 0);
479
480 if (p->p_vmspace != myvm) {
481 mtx_unlock(&soaio_jobs_lock);
482 vmspace_switch_aio(myvm);
483 mtx_lock(&soaio_jobs_lock);
484 continue;
485 }
486
487 soaio_idle++;
488 error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
489 soaio_lifetime);
490 soaio_idle--;
491 if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
492 soaio_num_procs > soaio_target_procs)
493 break;
494 }
495 soaio_num_procs--;
496 mtx_unlock(&soaio_jobs_lock);
497 free_unr(soaio_kproc_unr, id);
498 kproc_exit(0);
499 }
500
501 static void
502 soaio_kproc_create(void *context, int pending)
503 {
504 struct proc *p;
505 int error, id;
506
507 mtx_lock(&soaio_jobs_lock);
508 for (;;) {
509 if (soaio_num_procs < soaio_target_procs) {
510 /* Must create */
511 } else if (soaio_num_procs >= soaio_max_procs) {
512 /*
513 * Hit the limit on kernel processes, don't
514 * create another one.
515 */
516 break;
517 } else if (soaio_queued <= soaio_idle + soaio_starting) {
518 /*
519 * No more AIO jobs waiting for a process to be
520 * created, so stop.
521 */
522 break;
523 }
524 soaio_starting++;
525 mtx_unlock(&soaio_jobs_lock);
526
527 id = alloc_unr(soaio_kproc_unr);
528 error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
529 &p, 0, 0, "soaiod%d", id);
530 if (error != 0) {
531 free_unr(soaio_kproc_unr, id);
532 mtx_lock(&soaio_jobs_lock);
533 soaio_starting--;
534 break;
535 }
536
537 mtx_lock(&soaio_jobs_lock);
538 soaio_num_procs++;
539 }
540 mtx_unlock(&soaio_jobs_lock);
541 }
542
543 void
544 soaio_enqueue(struct task *task)
545 {
546
547 mtx_lock(&soaio_jobs_lock);
548 MPASS(task->ta_pending == 0);
549 task->ta_pending++;
550 STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
551 soaio_queued++;
552 if (soaio_queued <= soaio_idle)
553 wakeup_one(&soaio_idle);
554 else if (soaio_num_procs < soaio_max_procs)
555 taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
556 mtx_unlock(&soaio_jobs_lock);
557 }
558
559 static void
560 soaio_init(void)
561 {
562
563 soaio_lifetime = AIOD_LIFETIME_DEFAULT;
564 STAILQ_INIT(&soaio_jobs);
565 mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF);
566 soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL);
567 TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL);
568 if (soaio_target_procs > 0)
569 taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
570 }
571 SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL);
572
573 static __inline int
574 soaio_ready(struct socket *so, struct sockbuf *sb)
575 {
576 return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so));
577 }
578
579 static void
580 soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
581 {
582 struct ucred *td_savedcred;
583 struct thread *td;
584 struct file *fp;
585 struct uio uio;
586 struct iovec iov;
587 size_t cnt, done;
588 long ru_before;
589 int error, flags;
590
591 SOCKBUF_UNLOCK(sb);
592 aio_switch_vmspace(job);
593 td = curthread;
594 fp = job->fd_file;
595 retry:
596 td_savedcred = td->td_ucred;
597 td->td_ucred = job->cred;
598
599 done = job->aio_done;
600 cnt = job->uaiocb.aio_nbytes - done;
601 iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done);
602 iov.iov_len = cnt;
603 uio.uio_iov = &iov;
604 uio.uio_iovcnt = 1;
605 uio.uio_offset = 0;
606 uio.uio_resid = cnt;
607 uio.uio_segflg = UIO_USERSPACE;
608 uio.uio_td = td;
609 flags = MSG_NBIO;
610
611 /*
612 * For resource usage accounting, only count a completed request
613 * as a single message to avoid counting multiple calls to
614 * sosend/soreceive on a blocking socket.
615 */
616
617 if (sb == &so->so_rcv) {
618 uio.uio_rw = UIO_READ;
619 ru_before = td->td_ru.ru_msgrcv;
620 #ifdef MAC
621 error = mac_socket_check_receive(fp->f_cred, so);
622 if (error == 0)
623
624 #endif
625 error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
626 if (td->td_ru.ru_msgrcv != ru_before)
627 job->msgrcv = 1;
628 } else {
629 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
630 flags |= MSG_MORETOCOME;
631 uio.uio_rw = UIO_WRITE;
632 ru_before = td->td_ru.ru_msgsnd;
633 #ifdef MAC
634 error = mac_socket_check_send(fp->f_cred, so);
635 if (error == 0)
636 #endif
637 error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
638 if (td->td_ru.ru_msgsnd != ru_before)
639 job->msgsnd = 1;
640 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
641 PROC_LOCK(job->userproc);
642 kern_psignal(job->userproc, SIGPIPE);
643 PROC_UNLOCK(job->userproc);
644 }
645 }
646
647 done += cnt - uio.uio_resid;
648 job->aio_done = done;
649 td->td_ucred = td_savedcred;
650
651 if (error == EWOULDBLOCK) {
652 /*
653 * The request was either partially completed or not
654 * completed at all due to racing with a read() or
655 * write() on the socket. If the socket is
656 * non-blocking, return with any partial completion.
657 * If the socket is blocking or if no progress has
658 * been made, requeue this request at the head of the
659 * queue to try again when the socket is ready.
660 */
661 MPASS(done != job->uaiocb.aio_nbytes);
662 SOCKBUF_LOCK(sb);
663 if (done == 0 || !(so->so_state & SS_NBIO)) {
664 empty_results++;
665 if (soaio_ready(so, sb)) {
666 empty_retries++;
667 SOCKBUF_UNLOCK(sb);
668 goto retry;
669 }
670
671 if (!aio_set_cancel_function(job, soo_aio_cancel)) {
672 SOCKBUF_UNLOCK(sb);
673 if (done != 0)
674 aio_complete(job, done, 0);
675 else
676 aio_cancel(job);
677 SOCKBUF_LOCK(sb);
678 } else {
679 TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
680 }
681 return;
682 }
683 SOCKBUF_UNLOCK(sb);
684 }
685 if (done != 0 && (error == ERESTART || error == EINTR ||
686 error == EWOULDBLOCK))
687 error = 0;
688 if (error)
689 aio_complete(job, -1, error);
690 else
691 aio_complete(job, done, 0);
692 SOCKBUF_LOCK(sb);
693 }
694
695 static void
696 soaio_process_sb(struct socket *so, struct sockbuf *sb)
697 {
698 struct kaiocb *job;
699
700 CURVNET_SET(so->so_vnet);
701 SOCKBUF_LOCK(sb);
702 while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
703 job = TAILQ_FIRST(&sb->sb_aiojobq);
704 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
705 if (!aio_clear_cancel_function(job))
706 continue;
707
708 soaio_process_job(so, sb, job);
709 }
710
711 /*
712 * If there are still pending requests, the socket must not be
713 * ready so set SB_AIO to request a wakeup when the socket
714 * becomes ready.
715 */
716 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
717 sb->sb_flags |= SB_AIO;
718 sb->sb_flags &= ~SB_AIO_RUNNING;
719 SOCKBUF_UNLOCK(sb);
720
721 SOCK_LOCK(so);
722 sorele(so);
723 CURVNET_RESTORE();
724 }
725
726 void
727 soaio_rcv(void *context, int pending)
728 {
729 struct socket *so;
730
731 so = context;
732 soaio_process_sb(so, &so->so_rcv);
733 }
734
735 void
736 soaio_snd(void *context, int pending)
737 {
738 struct socket *so;
739
740 so = context;
741 soaio_process_sb(so, &so->so_snd);
742 }
743
744 void
745 sowakeup_aio(struct socket *so, struct sockbuf *sb)
746 {
747
748 SOCKBUF_LOCK_ASSERT(sb);
749 sb->sb_flags &= ~SB_AIO;
750 if (sb->sb_flags & SB_AIO_RUNNING)
751 return;
752 sb->sb_flags |= SB_AIO_RUNNING;
753 soref(so);
754 soaio_enqueue(&sb->sb_aiotask);
755 }
756
757 static void
758 soo_aio_cancel(struct kaiocb *job)
759 {
760 struct socket *so;
761 struct sockbuf *sb;
762 long done;
763 int opcode;
764
765 so = job->fd_file->f_data;
766 opcode = job->uaiocb.aio_lio_opcode;
767 if (opcode == LIO_READ)
768 sb = &so->so_rcv;
769 else {
770 MPASS(opcode == LIO_WRITE);
771 sb = &so->so_snd;
772 }
773
774 SOCKBUF_LOCK(sb);
775 if (!aio_cancel_cleared(job))
776 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
777 if (TAILQ_EMPTY(&sb->sb_aiojobq))
778 sb->sb_flags &= ~SB_AIO;
779 SOCKBUF_UNLOCK(sb);
780
781 done = job->aio_done;
782 if (done != 0)
783 aio_complete(job, done, 0);
784 else
785 aio_cancel(job);
786 }
787
788 static int
789 soo_aio_queue(struct file *fp, struct kaiocb *job)
790 {
791 struct socket *so;
792 struct sockbuf *sb;
793 int error;
794
795 so = fp->f_data;
796 error = (*so->so_proto->pr_usrreqs->pru_aio_queue)(so, job);
797 if (error == 0)
798 return (0);
799
800 switch (job->uaiocb.aio_lio_opcode) {
801 case LIO_READ:
802 sb = &so->so_rcv;
803 break;
804 case LIO_WRITE:
805 sb = &so->so_snd;
806 break;
807 default:
808 return (EINVAL);
809 }
810
811 SOCKBUF_LOCK(sb);
812 if (!aio_set_cancel_function(job, soo_aio_cancel))
813 panic("new job was cancelled");
814 TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
815 if (!(sb->sb_flags & SB_AIO_RUNNING)) {
816 if (soaio_ready(so, sb))
817 sowakeup_aio(so, sb);
818 else
819 sb->sb_flags |= SB_AIO;
820 }
821 SOCKBUF_UNLOCK(sb);
822 return (0);
823 }
Cache object: 6a3b56d4e7f24e4492d02ccfe71c5cef
|