1 /*-
2 * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 1998, David Greenman. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "opt_kern_tls.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/capsicum.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/ktls.h>
41 #include <sys/mutex.h>
42 #include <sys/malloc.h>
43 #include <sys/mman.h>
44 #include <sys/mount.h>
45 #include <sys/mbuf.h>
46 #include <sys/proc.h>
47 #include <sys/protosw.h>
48 #include <sys/rwlock.h>
49 #include <sys/sf_buf.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/vnode.h>
56
57 #include <net/vnet.h>
58 #include <netinet/in.h>
59 #include <netinet/tcp.h>
60
61 #include <security/audit/audit.h>
62 #include <security/mac/mac_framework.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_pager.h>
67
68 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile dynamic memory");
69
70 #define EXT_FLAG_SYNC EXT_FLAG_VENDOR1
71 #define EXT_FLAG_NOCACHE EXT_FLAG_VENDOR2
72 #define EXT_FLAG_CACHE_LAST EXT_FLAG_VENDOR3
73
74 /*
75 * Structure describing a single sendfile(2) I/O, which may consist of
76 * several underlying pager I/Os.
77 *
78 * The syscall context allocates the structure and initializes 'nios'
79 * to 1. As sendfile_swapin() runs through pages and starts asynchronous
80 * paging operations, it increments 'nios'.
81 *
82 * Every I/O completion calls sendfile_iodone(), which decrements the 'nios',
83 * and the syscall also calls sendfile_iodone() after allocating all mbufs,
84 * linking them and sending to socket. Whoever reaches zero 'nios' is
85 * responsible to * call pru_ready on the socket, to notify it of readyness
86 * of the data.
87 */
88 struct sf_io {
89 volatile u_int nios;
90 u_int error;
91 int npages;
92 struct socket *so;
93 struct mbuf *m;
94 vm_object_t obj;
95 vm_pindex_t pindex0;
96 #ifdef KERN_TLS
97 struct ktls_session *tls;
98 #endif
99 vm_page_t pa[];
100 };
101
102 /*
103 * Structure used to track requests with SF_SYNC flag.
104 */
105 struct sendfile_sync {
106 struct mtx mtx;
107 struct cv cv;
108 unsigned count;
109 bool waiting;
110 };
111
112 static void
113 sendfile_sync_destroy(struct sendfile_sync *sfs)
114 {
115 KASSERT(sfs->count == 0, ("sendfile sync %p still busy", sfs));
116
117 cv_destroy(&sfs->cv);
118 mtx_destroy(&sfs->mtx);
119 free(sfs, M_SENDFILE);
120 }
121
122 static void
123 sendfile_sync_signal(struct sendfile_sync *sfs)
124 {
125 mtx_lock(&sfs->mtx);
126 KASSERT(sfs->count > 0, ("sendfile sync %p not busy", sfs));
127 if (--sfs->count == 0) {
128 if (!sfs->waiting) {
129 /* The sendfile() waiter was interrupted by a signal. */
130 sendfile_sync_destroy(sfs);
131 return;
132 } else {
133 cv_signal(&sfs->cv);
134 }
135 }
136 mtx_unlock(&sfs->mtx);
137 }
138
139 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
140
141 static void
142 sfstat_init(const void *unused)
143 {
144
145 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
146 M_WAITOK);
147 }
148 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
149
150 static int
151 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
152 {
153 struct sfstat s;
154
155 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
156 if (req->newptr)
157 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
158 return (SYSCTL_OUT(req, &s, sizeof(s)));
159 }
160 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat,
161 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
162 sfstat_sysctl, "I",
163 "sendfile statistics");
164
165 static void
166 sendfile_free_mext(struct mbuf *m)
167 {
168 struct sf_buf *sf;
169 vm_page_t pg;
170 int flags;
171
172 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
173 ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
174
175 sf = m->m_ext.ext_arg1;
176 pg = sf_buf_page(sf);
177 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
178
179 sf_buf_free(sf);
180 vm_page_release(pg, flags);
181
182 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
183 struct sendfile_sync *sfs = m->m_ext.ext_arg2;
184 sendfile_sync_signal(sfs);
185 }
186 }
187
188 static void
189 sendfile_free_mext_pg(struct mbuf *m)
190 {
191 vm_page_t pg;
192 int flags, i;
193 bool cache_last;
194
195 M_ASSERTEXTPG(m);
196
197 cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
198 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
199
200 for (i = 0; i < m->m_epg_npgs; i++) {
201 if (cache_last && i == m->m_epg_npgs - 1)
202 flags = 0;
203 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
204 vm_page_release(pg, flags);
205 }
206
207 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
208 struct sendfile_sync *sfs = m->m_ext.ext_arg1;
209 sendfile_sync_signal(sfs);
210 }
211 }
212
213 /*
214 * Helper function to calculate how much data to put into page i of n.
215 * Only first and last pages are special.
216 */
217 static inline off_t
218 xfsize(int i, int n, off_t off, off_t len)
219 {
220
221 if (i == 0)
222 return (omin(PAGE_SIZE - (off & PAGE_MASK), len));
223
224 if (i == n - 1 && ((off + len) & PAGE_MASK) > 0)
225 return ((off + len) & PAGE_MASK);
226
227 return (PAGE_SIZE);
228 }
229
230 /*
231 * Helper function to get offset within object for i page.
232 */
233 static inline vm_ooffset_t
234 vmoff(int i, off_t off)
235 {
236
237 if (i == 0)
238 return ((vm_ooffset_t)off);
239
240 return (trunc_page(off + i * PAGE_SIZE));
241 }
242
243 /*
244 * Helper function used when allocation of a page or sf_buf failed.
245 * Pretend as if we don't have enough space, subtract xfsize() of
246 * all pages that failed.
247 */
248 static inline void
249 fixspace(int old, int new, off_t off, int *space)
250 {
251
252 KASSERT(old > new, ("%s: old %d new %d", __func__, old, new));
253
254 /* Subtract last one. */
255 *space -= xfsize(old - 1, old, off, *space);
256 old--;
257
258 if (new == old)
259 /* There was only one page. */
260 return;
261
262 /* Subtract first one. */
263 if (new == 0) {
264 *space -= xfsize(0, old, off, *space);
265 new++;
266 }
267
268 /* Rest of pages are full sized. */
269 *space -= (old - new) * PAGE_SIZE;
270
271 KASSERT(*space >= 0, ("%s: space went backwards", __func__));
272 }
273
274 /*
275 * Wait for all in-flight ios to complete, we must not unwire pages
276 * under them.
277 */
278 static void
279 sendfile_iowait(struct sf_io *sfio, const char *wmesg)
280 {
281 while (atomic_load_int(&sfio->nios) != 1)
282 pause(wmesg, 1);
283 }
284
285 /*
286 * I/O completion callback.
287 */
288 static void
289 sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
290 {
291 struct sf_io *sfio = arg;
292 struct socket *so;
293 int i;
294
295 if (error != 0)
296 sfio->error = error;
297
298 /*
299 * Restore the valid page pointers. They are already
300 * unbusied, but still wired.
301 *
302 * XXXKIB since pages are only wired, and we do not
303 * own the object lock, other users might have
304 * invalidated them in meantime. Similarly, after we
305 * unbusied the swapped-in pages, they can become
306 * invalid under us.
307 */
308 MPASS(count == 0 || pa[0] != bogus_page);
309 for (i = 0; i < count; i++) {
310 if (pa[i] == bogus_page) {
311 sfio->pa[(pa[0]->pindex - sfio->pindex0) + i] =
312 pa[i] = vm_page_relookup(sfio->obj,
313 pa[0]->pindex + i);
314 KASSERT(pa[i] != NULL,
315 ("%s: page %p[%d] disappeared",
316 __func__, pa, i));
317 } else {
318 vm_page_xunbusy_unchecked(pa[i]);
319 }
320 }
321
322 if (!refcount_release(&sfio->nios))
323 return;
324
325 #ifdef INVARIANTS
326 for (i = 1; i < sfio->npages; i++) {
327 if (sfio->pa[i] == NULL)
328 break;
329 KASSERT(vm_page_wired(sfio->pa[i]),
330 ("sfio %p page %d %p not wired", sfio, i, sfio->pa[i]));
331 if (i == 0)
332 continue;
333 KASSERT(sfio->pa[0]->object == sfio->pa[i]->object,
334 ("sfio %p page %d %p wrong owner %p %p", sfio, i,
335 sfio->pa[i], sfio->pa[0]->object, sfio->pa[i]->object));
336 KASSERT(sfio->pa[0]->pindex + i == sfio->pa[i]->pindex,
337 ("sfio %p page %d %p wrong index %jx %jx", sfio, i,
338 sfio->pa[i], (uintmax_t)sfio->pa[0]->pindex,
339 (uintmax_t)sfio->pa[i]->pindex));
340 }
341 #endif
342
343 vm_object_pip_wakeup(sfio->obj);
344
345 if (sfio->m == NULL) {
346 /*
347 * Either I/O operation failed, or we failed to allocate
348 * buffers, or we bailed out on first busy page, or we
349 * succeeded filling the request without any I/Os. Anyway,
350 * pru_send hadn't been executed - nothing had been sent
351 * to the socket yet.
352 */
353 MPASS((curthread->td_pflags & TDP_KTHREAD) == 0);
354 free(sfio, M_SENDFILE);
355 return;
356 }
357
358 #if defined(KERN_TLS) && defined(INVARIANTS)
359 if ((sfio->m->m_flags & M_EXTPG) != 0)
360 KASSERT(sfio->tls == sfio->m->m_epg_tls,
361 ("TLS session mismatch"));
362 else
363 KASSERT(sfio->tls == NULL,
364 ("non-ext_pgs mbuf with TLS session"));
365 #endif
366 so = sfio->so;
367 CURVNET_SET(so->so_vnet);
368 if (__predict_false(sfio->error)) {
369 /*
370 * I/O operation failed. The state of data in the socket
371 * is now inconsistent, and all what we can do is to tear
372 * it down. Protocol abort method would tear down protocol
373 * state, free all ready mbufs and detach not ready ones.
374 * We will free the mbufs corresponding to this I/O manually.
375 *
376 * The socket would be marked with EIO and made available
377 * for read, so that application receives EIO on next
378 * syscall and eventually closes the socket.
379 */
380 so->so_proto->pr_usrreqs->pru_abort(so);
381 so->so_error = EIO;
382
383 mb_free_notready(sfio->m, sfio->npages);
384 #ifdef KERN_TLS
385 } else if (sfio->tls != NULL && sfio->tls->mode == TCP_TLS_MODE_SW) {
386 /*
387 * I/O operation is complete, but we still need to
388 * encrypt. We cannot do this in the interrupt thread
389 * of the disk controller, so forward the mbufs to a
390 * different thread.
391 *
392 * Donate the socket reference from sfio to rather
393 * than explicitly invoking soref().
394 */
395 ktls_enqueue(sfio->m, so, sfio->npages);
396 goto out_with_ref;
397 #endif
398 } else
399 (void)(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m,
400 sfio->npages);
401
402 SOCK_LOCK(so);
403 sorele(so);
404 #ifdef KERN_TLS
405 out_with_ref:
406 #endif
407 CURVNET_RESTORE();
408 free(sfio, M_SENDFILE);
409 }
410
411 /*
412 * Iterate through pages vector and request paging for non-valid pages.
413 */
414 static int
415 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
416 off_t len, int rhpages, int flags)
417 {
418 vm_page_t *pa;
419 int a, count, count1, grabbed, i, j, npages, rv;
420
421 pa = sfio->pa;
422 npages = sfio->npages;
423 *nios = 0;
424 flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
425 sfio->pindex0 = OFF_TO_IDX(off);
426
427 /*
428 * First grab all the pages and wire them. Note that we grab
429 * only required pages. Readahead pages are dealt with later.
430 */
431 grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
432 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
433 if (grabbed < npages) {
434 for (int i = grabbed; i < npages; i++)
435 pa[i] = NULL;
436 npages = grabbed;
437 rhpages = 0;
438 }
439
440 for (i = 0; i < npages;) {
441 /* Skip valid pages. */
442 if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
443 xfsize(i, npages, off, len))) {
444 vm_page_xunbusy(pa[i]);
445 SFSTAT_INC(sf_pages_valid);
446 i++;
447 continue;
448 }
449
450 /*
451 * Next page is invalid. Check if it belongs to pager. It
452 * may not be there, which is a regular situation for shmem
453 * pager. For vnode pager this happens only in case of
454 * a sparse file.
455 *
456 * Important feature of vm_pager_has_page() is the hint
457 * stored in 'a', about how many pages we can pagein after
458 * this page in a single I/O.
459 */
460 VM_OBJECT_RLOCK(obj);
461 if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
462 &a)) {
463 VM_OBJECT_RUNLOCK(obj);
464 pmap_zero_page(pa[i]);
465 vm_page_valid(pa[i]);
466 MPASS(pa[i]->dirty == 0);
467 vm_page_xunbusy(pa[i]);
468 i++;
469 continue;
470 }
471 VM_OBJECT_RUNLOCK(obj);
472
473 /*
474 * We want to pagein as many pages as possible, limited only
475 * by the 'a' hint and actual request.
476 */
477 count = min(a + 1, npages - i);
478
479 /*
480 * We should not pagein into a valid page because
481 * there might be still unfinished write tracked by
482 * e.g. a buffer, thus we substitute any valid pages
483 * with the bogus one.
484 *
485 * We must not leave around xbusy pages which are not
486 * part of the run passed to vm_pager_getpages(),
487 * otherwise pager might deadlock waiting for the busy
488 * status of the page, e.g. if it constitues the
489 * buffer needed to validate other page.
490 *
491 * First trim the end of the run consisting of the
492 * valid pages, then replace the rest of the valid
493 * with bogus.
494 */
495 count1 = count;
496 for (j = i + count - 1; j > i; j--) {
497 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
498 xfsize(j, npages, off, len))) {
499 vm_page_xunbusy(pa[j]);
500 SFSTAT_INC(sf_pages_valid);
501 count--;
502 } else {
503 break;
504 }
505 }
506
507 /*
508 * The last page in the run pa[i + count - 1] is
509 * guaranteed to be invalid by the trim above, so it
510 * is not replaced with bogus, thus -1 in the loop end
511 * condition.
512 */
513 MPASS(pa[i + count - 1]->valid != VM_PAGE_BITS_ALL);
514 for (j = i + 1; j < i + count - 1; j++) {
515 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
516 xfsize(j, npages, off, len))) {
517 vm_page_xunbusy(pa[j]);
518 SFSTAT_INC(sf_pages_valid);
519 SFSTAT_INC(sf_pages_bogus);
520 pa[j] = bogus_page;
521 }
522 }
523
524 refcount_acquire(&sfio->nios);
525 rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
526 i + count == npages ? &rhpages : NULL,
527 &sendfile_iodone, sfio);
528 if (__predict_false(rv != VM_PAGER_OK)) {
529 sendfile_iowait(sfio, "sferrio");
530
531 /*
532 * Do remaining pages recovery before returning EIO.
533 * Pages from 0 to npages are wired.
534 * Pages from (i + count1) to npages are busied.
535 */
536 for (j = 0; j < npages; j++) {
537 if (j >= i + count1)
538 vm_page_xunbusy(pa[j]);
539 KASSERT(pa[j] != NULL && pa[j] != bogus_page,
540 ("%s: page %p[%d] I/O recovery failure",
541 __func__, pa, j));
542 vm_page_unwire(pa[j], PQ_INACTIVE);
543 pa[j] = NULL;
544 }
545 return (EIO);
546 }
547
548 SFSTAT_INC(sf_iocnt);
549 SFSTAT_ADD(sf_pages_read, count);
550 if (i + count == npages)
551 SFSTAT_ADD(sf_rhpages_read, rhpages);
552
553 i += count1;
554 (*nios)++;
555 }
556
557 if (*nios == 0 && npages != 0)
558 SFSTAT_INC(sf_noiocnt);
559
560 return (0);
561 }
562
563 static int
564 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
565 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
566 int *bsize)
567 {
568 struct vattr va;
569 vm_object_t obj;
570 struct vnode *vp;
571 struct shmfd *shmfd;
572 int error;
573
574 vp = *vp_res = NULL;
575 obj = NULL;
576 shmfd = *shmfd_res = NULL;
577 *bsize = 0;
578
579 /*
580 * The file descriptor must be a regular file and have a
581 * backing VM object.
582 */
583 if (fp->f_type == DTYPE_VNODE) {
584 vp = fp->f_vnode;
585 vn_lock(vp, LK_SHARED | LK_RETRY);
586 if (vp->v_type != VREG) {
587 error = EINVAL;
588 goto out;
589 }
590 *bsize = vp->v_mount->mnt_stat.f_iosize;
591 obj = vp->v_object;
592 if (obj == NULL) {
593 error = EINVAL;
594 goto out;
595 }
596
597 /*
598 * Use the pager size when available to simplify synchronization
599 * with filesystems, which otherwise must atomically update both
600 * the vnode pager size and file size.
601 */
602 if (obj->type == OBJT_VNODE) {
603 VM_OBJECT_RLOCK(obj);
604 *obj_size = obj->un_pager.vnp.vnp_size;
605 } else {
606 error = VOP_GETATTR(vp, &va, td->td_ucred);
607 if (error != 0)
608 goto out;
609 *obj_size = va.va_size;
610 VM_OBJECT_RLOCK(obj);
611 }
612 } else if (fp->f_type == DTYPE_SHM) {
613 error = 0;
614 shmfd = fp->f_data;
615 obj = shmfd->shm_object;
616 VM_OBJECT_RLOCK(obj);
617 *obj_size = shmfd->shm_size;
618 } else {
619 error = EINVAL;
620 goto out;
621 }
622
623 if ((obj->flags & OBJ_DEAD) != 0) {
624 VM_OBJECT_RUNLOCK(obj);
625 error = EBADF;
626 goto out;
627 }
628
629 /*
630 * Temporarily increase the backing VM object's reference
631 * count so that a forced reclamation of its vnode does not
632 * immediately destroy it.
633 */
634 vm_object_reference_locked(obj);
635 VM_OBJECT_RUNLOCK(obj);
636 *obj_res = obj;
637 *vp_res = vp;
638 *shmfd_res = shmfd;
639
640 out:
641 if (vp != NULL)
642 VOP_UNLOCK(vp);
643 return (error);
644 }
645
646 static int
647 sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
648 struct socket **so)
649 {
650 int error;
651
652 *sock_fp = NULL;
653 *so = NULL;
654
655 /*
656 * The socket must be a stream socket and connected.
657 */
658 error = getsock_cap(td, s, &cap_send_rights,
659 sock_fp, NULL, NULL);
660 if (error != 0)
661 return (error);
662 *so = (*sock_fp)->f_data;
663 if ((*so)->so_type != SOCK_STREAM)
664 return (EINVAL);
665 /*
666 * SCTP one-to-one style sockets currently don't work with
667 * sendfile(). So indicate EINVAL for now.
668 */
669 if ((*so)->so_proto->pr_protocol == IPPROTO_SCTP)
670 return (EINVAL);
671 if (SOLISTENING(*so))
672 return (ENOTCONN);
673 return (0);
674 }
675
676 int
677 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
678 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
679 struct thread *td)
680 {
681 struct file *sock_fp;
682 struct vnode *vp;
683 struct vm_object *obj;
684 vm_page_t pga;
685 struct socket *so;
686 #ifdef KERN_TLS
687 struct ktls_session *tls;
688 #endif
689 struct mbuf *m, *mh, *mhtail;
690 struct sf_buf *sf;
691 struct shmfd *shmfd;
692 struct sendfile_sync *sfs;
693 struct vattr va;
694 off_t off, sbytes, rem, obj_size, nobj_size;
695 int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
696 #ifdef KERN_TLS
697 int tls_enq_cnt;
698 #endif
699 bool use_ext_pgs;
700
701 obj = NULL;
702 so = NULL;
703 m = mh = NULL;
704 sfs = NULL;
705 #ifdef KERN_TLS
706 tls = NULL;
707 #endif
708 hdrlen = sbytes = 0;
709 softerr = 0;
710 use_ext_pgs = false;
711
712 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
713 if (error != 0)
714 return (error);
715
716 error = sendfile_getsock(td, sockfd, &sock_fp, &so);
717 if (error != 0)
718 goto out;
719
720 #ifdef MAC
721 error = mac_socket_check_send(td->td_ucred, so);
722 if (error != 0)
723 goto out;
724 #endif
725
726 SFSTAT_INC(sf_syscalls);
727 SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags));
728
729 if (flags & SF_SYNC) {
730 sfs = malloc(sizeof(*sfs), M_SENDFILE, M_WAITOK | M_ZERO);
731 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
732 cv_init(&sfs->cv, "sendfile");
733 sfs->waiting = true;
734 }
735
736 rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset;
737
738 /*
739 * Protect against multiple writers to the socket.
740 *
741 * XXXRW: Historically this has assumed non-interruptibility, so now
742 * we implement that, but possibly shouldn't.
743 */
744 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
745 #ifdef KERN_TLS
746 tls = ktls_hold(so->so_snd.sb_tls_info);
747 #endif
748
749 /*
750 * Loop through the pages of the file, starting with the requested
751 * offset. Get a file page (do I/O if necessary), map the file page
752 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
753 * it on the socket.
754 * This is done in two loops. The inner loop turns as many pages
755 * as it can, up to available socket buffer space, without blocking
756 * into mbufs to have it bulk delivered into the socket send buffer.
757 * The outer loop checks the state and available space of the socket
758 * and takes care of the overall progress.
759 */
760 for (off = offset; rem > 0; ) {
761 struct sf_io *sfio;
762 vm_page_t *pa;
763 struct mbuf *m0, *mtail;
764 int nios, space, npages, rhpages;
765
766 mtail = NULL;
767 /*
768 * Check the socket state for ongoing connection,
769 * no errors and space in socket buffer.
770 * If space is low allow for the remainder of the
771 * file to be processed if it fits the socket buffer.
772 * Otherwise block in waiting for sufficient space
773 * to proceed, or if the socket is nonblocking, return
774 * to userland with EAGAIN while reporting how far
775 * we've come.
776 * We wait until the socket buffer has significant free
777 * space to do bulk sends. This makes good use of file
778 * system read ahead and allows packet segmentation
779 * offloading hardware to take over lots of work. If
780 * we were not careful here we would send off only one
781 * sfbuf at a time.
782 */
783 SOCKBUF_LOCK(&so->so_snd);
784 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
785 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
786 retry_space:
787 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
788 error = EPIPE;
789 SOCKBUF_UNLOCK(&so->so_snd);
790 goto done;
791 } else if (so->so_error) {
792 error = so->so_error;
793 so->so_error = 0;
794 SOCKBUF_UNLOCK(&so->so_snd);
795 goto done;
796 }
797 if ((so->so_state & SS_ISCONNECTED) == 0) {
798 SOCKBUF_UNLOCK(&so->so_snd);
799 error = ENOTCONN;
800 goto done;
801 }
802
803 space = sbspace(&so->so_snd);
804 if (space < rem &&
805 (space <= 0 ||
806 space < so->so_snd.sb_lowat)) {
807 if (so->so_state & SS_NBIO) {
808 SOCKBUF_UNLOCK(&so->so_snd);
809 error = EAGAIN;
810 goto done;
811 }
812 /*
813 * sbwait drops the lock while sleeping.
814 * When we loop back to retry_space the
815 * state may have changed and we retest
816 * for it.
817 */
818 error = sbwait(&so->so_snd);
819 /*
820 * An error from sbwait usually indicates that we've
821 * been interrupted by a signal. If we've sent anything
822 * then return bytes sent, otherwise return the error.
823 */
824 if (error != 0) {
825 SOCKBUF_UNLOCK(&so->so_snd);
826 goto done;
827 }
828 goto retry_space;
829 }
830 SOCKBUF_UNLOCK(&so->so_snd);
831
832 /*
833 * At the beginning of the first loop check if any headers
834 * are specified and copy them into mbufs. Reduce space in
835 * the socket buffer by the size of the header mbuf chain.
836 * Clear hdr_uio here and hdrlen at the end of the first loop.
837 */
838 if (hdr_uio != NULL && hdr_uio->uio_resid > 0) {
839 hdr_uio->uio_td = td;
840 hdr_uio->uio_rw = UIO_WRITE;
841 #ifdef KERN_TLS
842 if (tls != NULL)
843 mh = m_uiotombuf(hdr_uio, M_WAITOK, space,
844 tls->params.max_frame_len, M_EXTPG);
845 else
846 #endif
847 mh = m_uiotombuf(hdr_uio, M_WAITOK,
848 space, 0, 0);
849 hdrlen = m_length(mh, &mhtail);
850 space -= hdrlen;
851 /*
852 * If header consumed all the socket buffer space,
853 * don't waste CPU cycles and jump to the end.
854 */
855 if (space == 0) {
856 sfio = NULL;
857 nios = 0;
858 goto prepend_header;
859 }
860 hdr_uio = NULL;
861 }
862
863 if (vp != NULL) {
864 error = vn_lock(vp, LK_SHARED);
865 if (error != 0)
866 goto done;
867
868 /*
869 * Check to see if the file size has changed.
870 */
871 if (obj->type == OBJT_VNODE) {
872 VM_OBJECT_RLOCK(obj);
873 nobj_size = obj->un_pager.vnp.vnp_size;
874 VM_OBJECT_RUNLOCK(obj);
875 } else {
876 error = VOP_GETATTR(vp, &va, td->td_ucred);
877 if (error != 0) {
878 VOP_UNLOCK(vp);
879 goto done;
880 }
881 nobj_size = va.va_size;
882 }
883 if (off >= nobj_size) {
884 VOP_UNLOCK(vp);
885 goto done;
886 }
887 if (nobj_size != obj_size) {
888 obj_size = nobj_size;
889 rem = nbytes ? omin(nbytes + offset, obj_size) :
890 obj_size;
891 rem -= off;
892 }
893 }
894
895 if (space > rem)
896 space = rem;
897 else if (space > PAGE_SIZE) {
898 /*
899 * Use page boundaries when possible for large
900 * requests.
901 */
902 if (off & PAGE_MASK)
903 space -= (PAGE_SIZE - (off & PAGE_MASK));
904 space = trunc_page(space);
905 if (off & PAGE_MASK)
906 space += (PAGE_SIZE - (off & PAGE_MASK));
907 }
908
909 npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE);
910
911 /*
912 * Calculate maximum allowed number of pages for readahead
913 * at this iteration. If SF_USER_READAHEAD was set, we don't
914 * do any heuristics and use exactly the value supplied by
915 * application. Otherwise, we allow readahead up to "rem".
916 * If application wants more, let it be, but there is no
917 * reason to go above maxphys. Also check against "obj_size",
918 * since vm_pager_has_page() can hint beyond EOF.
919 */
920 if (flags & SF_USER_READAHEAD) {
921 rhpages = SF_READAHEAD(flags);
922 } else {
923 rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) -
924 npages;
925 rhpages += SF_READAHEAD(flags);
926 }
927 rhpages = min(howmany(maxphys, PAGE_SIZE), rhpages);
928 rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
929 npages, rhpages);
930
931 sfio = malloc(sizeof(struct sf_io) +
932 npages * sizeof(vm_page_t), M_SENDFILE, M_WAITOK);
933 refcount_init(&sfio->nios, 1);
934 sfio->obj = obj;
935 sfio->error = 0;
936 sfio->m = NULL;
937 sfio->npages = npages;
938 #ifdef KERN_TLS
939 /*
940 * This doesn't use ktls_hold() because sfio->m will
941 * also have a reference on 'tls' that will be valid
942 * for all of sfio's lifetime.
943 */
944 sfio->tls = tls;
945 #endif
946 vm_object_pip_add(obj, 1);
947 error = sendfile_swapin(obj, sfio, &nios, off, space, rhpages,
948 flags);
949 if (error != 0) {
950 if (vp != NULL)
951 VOP_UNLOCK(vp);
952 sendfile_iodone(sfio, NULL, 0, error);
953 goto done;
954 }
955
956 /*
957 * Loop and construct maximum sized mbuf chain to be bulk
958 * dumped into socket buffer.
959 */
960 pa = sfio->pa;
961
962 /*
963 * Use unmapped mbufs if enabled for TCP. Unmapped
964 * bufs are restricted to TCP as that is what has been
965 * tested. In particular, unmapped mbufs have not
966 * been tested with UNIX-domain sockets.
967 *
968 * TLS frames always require unmapped mbufs.
969 */
970 if ((mb_use_ext_pgs &&
971 so->so_proto->pr_protocol == IPPROTO_TCP)
972 #ifdef KERN_TLS
973 || tls != NULL
974 #endif
975 ) {
976 use_ext_pgs = true;
977 #ifdef KERN_TLS
978 if (tls != NULL)
979 max_pgs = num_pages(tls->params.max_frame_len);
980 else
981 #endif
982 max_pgs = MBUF_PEXT_MAX_PGS;
983
984 /* Start at last index, to wrap on first use. */
985 ext_pgs_idx = max_pgs - 1;
986 }
987
988 for (int i = 0; i < npages; i++) {
989 /*
990 * If a page wasn't grabbed successfully, then
991 * trim the array. Can happen only with SF_NODISKIO.
992 */
993 if (pa[i] == NULL) {
994 SFSTAT_INC(sf_busy);
995 fixspace(npages, i, off, &space);
996 sfio->npages = i;
997 softerr = EBUSY;
998 break;
999 }
1000 pga = pa[i];
1001 if (pga == bogus_page)
1002 pga = vm_page_relookup(obj, sfio->pindex0 + i);
1003
1004 if (use_ext_pgs) {
1005 off_t xfs;
1006
1007 ext_pgs_idx++;
1008 if (ext_pgs_idx == max_pgs) {
1009 m0 = mb_alloc_ext_pgs(M_WAITOK,
1010 sendfile_free_mext_pg);
1011
1012 if (flags & SF_NOCACHE) {
1013 m0->m_ext.ext_flags |=
1014 EXT_FLAG_NOCACHE;
1015
1016 /*
1017 * See comment below regarding
1018 * ignoring SF_NOCACHE for the
1019 * last page.
1020 */
1021 if ((npages - i <= max_pgs) &&
1022 ((off + space) & PAGE_MASK) &&
1023 (rem > space || rhpages > 0))
1024 m0->m_ext.ext_flags |=
1025 EXT_FLAG_CACHE_LAST;
1026 }
1027 if (sfs != NULL) {
1028 m0->m_ext.ext_flags |=
1029 EXT_FLAG_SYNC;
1030 m0->m_ext.ext_arg1 = sfs;
1031 mtx_lock(&sfs->mtx);
1032 sfs->count++;
1033 mtx_unlock(&sfs->mtx);
1034 }
1035 ext_pgs_idx = 0;
1036
1037 /* Append to mbuf chain. */
1038 if (mtail != NULL)
1039 mtail->m_next = m0;
1040 else
1041 m = m0;
1042 mtail = m0;
1043 m0->m_epg_1st_off =
1044 vmoff(i, off) & PAGE_MASK;
1045 }
1046 if (nios) {
1047 mtail->m_flags |= M_NOTREADY;
1048 m0->m_epg_nrdy++;
1049 }
1050
1051 m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
1052 m0->m_epg_npgs++;
1053 xfs = xfsize(i, npages, off, space);
1054 m0->m_epg_last_len = xfs;
1055 MBUF_EXT_PGS_ASSERT_SANITY(m0);
1056 mtail->m_len += xfs;
1057 mtail->m_ext.ext_size += PAGE_SIZE;
1058 continue;
1059 }
1060
1061 /*
1062 * Get a sendfile buf. When allocating the
1063 * first buffer for mbuf chain, we usually
1064 * wait as long as necessary, but this wait
1065 * can be interrupted. For consequent
1066 * buffers, do not sleep, since several
1067 * threads might exhaust the buffers and then
1068 * deadlock.
1069 */
1070 sf = sf_buf_alloc(pga,
1071 m != NULL ? SFB_NOWAIT : SFB_CATCH);
1072 if (sf == NULL) {
1073 SFSTAT_INC(sf_allocfail);
1074 sendfile_iowait(sfio, "sfnosf");
1075 for (int j = i; j < npages; j++) {
1076 vm_page_unwire(pa[j], PQ_INACTIVE);
1077 pa[j] = NULL;
1078 }
1079 if (m == NULL)
1080 softerr = ENOBUFS;
1081 fixspace(npages, i, off, &space);
1082 sfio->npages = i;
1083 break;
1084 }
1085
1086 m0 = m_get(M_WAITOK, MT_DATA);
1087 m0->m_ext.ext_buf = (char *)sf_buf_kva(sf);
1088 m0->m_ext.ext_size = PAGE_SIZE;
1089 m0->m_ext.ext_arg1 = sf;
1090 m0->m_ext.ext_type = EXT_SFBUF;
1091 m0->m_ext.ext_flags = EXT_FLAG_EMBREF;
1092 m0->m_ext.ext_free = sendfile_free_mext;
1093 /*
1094 * SF_NOCACHE sets the page as being freed upon send.
1095 * However, we ignore it for the last page in 'space',
1096 * if the page is truncated, and we got more data to
1097 * send (rem > space), or if we have readahead
1098 * configured (rhpages > 0).
1099 */
1100 if ((flags & SF_NOCACHE) &&
1101 (i != npages - 1 ||
1102 !((off + space) & PAGE_MASK) ||
1103 !(rem > space || rhpages > 0)))
1104 m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE;
1105 if (sfs != NULL) {
1106 m0->m_ext.ext_flags |= EXT_FLAG_SYNC;
1107 m0->m_ext.ext_arg2 = sfs;
1108 mtx_lock(&sfs->mtx);
1109 sfs->count++;
1110 mtx_unlock(&sfs->mtx);
1111 }
1112 m0->m_ext.ext_count = 1;
1113 m0->m_flags |= (M_EXT | M_RDONLY);
1114 if (nios)
1115 m0->m_flags |= M_NOTREADY;
1116 m0->m_data = (char *)sf_buf_kva(sf) +
1117 (vmoff(i, off) & PAGE_MASK);
1118 m0->m_len = xfsize(i, npages, off, space);
1119
1120 /* Append to mbuf chain. */
1121 if (mtail != NULL)
1122 mtail->m_next = m0;
1123 else
1124 m = m0;
1125 mtail = m0;
1126 }
1127
1128 if (vp != NULL)
1129 VOP_UNLOCK(vp);
1130
1131 /* Keep track of bytes processed. */
1132 off += space;
1133 rem -= space;
1134
1135 /*
1136 * Prepend header, if any. Save pointer to first mbuf
1137 * with a page.
1138 */
1139 if (hdrlen) {
1140 prepend_header:
1141 m0 = mhtail->m_next = m;
1142 m = mh;
1143 mh = NULL;
1144 } else
1145 m0 = m;
1146
1147 if (m == NULL) {
1148 KASSERT(softerr, ("%s: m NULL, no error", __func__));
1149 error = softerr;
1150 sendfile_iodone(sfio, NULL, 0, 0);
1151 goto done;
1152 }
1153
1154 /* Add the buffer chain to the socket buffer. */
1155 KASSERT(m_length(m, NULL) == space + hdrlen,
1156 ("%s: mlen %u space %d hdrlen %d",
1157 __func__, m_length(m, NULL), space, hdrlen));
1158
1159 CURVNET_SET(so->so_vnet);
1160 #ifdef KERN_TLS
1161 if (tls != NULL)
1162 ktls_frame(m, tls, &tls_enq_cnt, TLS_RLTYPE_APP);
1163 #endif
1164 if (nios == 0) {
1165 /*
1166 * If sendfile_swapin() didn't initiate any I/Os,
1167 * which happens if all data is cached in VM, or if
1168 * the header consumed all socket buffer space and
1169 * sfio is NULL, then we can send data right now
1170 * without the PRUS_NOTREADY flag.
1171 */
1172 if (sfio != NULL)
1173 sendfile_iodone(sfio, NULL, 0, 0);
1174 #ifdef KERN_TLS
1175 if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1176 error = (*so->so_proto->pr_usrreqs->pru_send)
1177 (so, PRUS_NOTREADY, m, NULL, NULL, td);
1178 soref(so);
1179 ktls_enqueue(m, so, tls_enq_cnt);
1180 } else
1181 #endif
1182 error = (*so->so_proto->pr_usrreqs->pru_send)
1183 (so, 0, m, NULL, NULL, td);
1184 } else {
1185 sfio->so = so;
1186 sfio->m = m0;
1187 soref(so);
1188 error = (*so->so_proto->pr_usrreqs->pru_send)
1189 (so, PRUS_NOTREADY, m, NULL, NULL, td);
1190 sendfile_iodone(sfio, NULL, 0, 0);
1191 }
1192 CURVNET_RESTORE();
1193
1194 m = NULL; /* pru_send always consumes */
1195 if (error)
1196 goto done;
1197 sbytes += space + hdrlen;
1198 if (hdrlen)
1199 hdrlen = 0;
1200 if (softerr) {
1201 error = softerr;
1202 goto done;
1203 }
1204 }
1205
1206 /*
1207 * Send trailers. Wimp out and use writev(2).
1208 */
1209 if (trl_uio != NULL) {
1210 sbunlock(&so->so_snd);
1211 error = kern_writev(td, sockfd, trl_uio);
1212 if (error == 0)
1213 sbytes += td->td_retval[0];
1214 goto out;
1215 }
1216
1217 done:
1218 sbunlock(&so->so_snd);
1219 out:
1220 /*
1221 * If there was no error we have to clear td->td_retval[0]
1222 * because it may have been set by writev.
1223 */
1224 if (error == 0) {
1225 td->td_retval[0] = 0;
1226 }
1227 if (sent != NULL) {
1228 (*sent) = sbytes;
1229 }
1230 if (obj != NULL)
1231 vm_object_deallocate(obj);
1232 if (so)
1233 fdrop(sock_fp, td);
1234 if (m)
1235 m_freem(m);
1236 if (mh)
1237 m_freem(mh);
1238
1239 if (sfs != NULL) {
1240 mtx_lock(&sfs->mtx);
1241 if (sfs->count != 0)
1242 error = cv_wait_sig(&sfs->cv, &sfs->mtx);
1243 if (sfs->count == 0) {
1244 sendfile_sync_destroy(sfs);
1245 } else {
1246 sfs->waiting = false;
1247 mtx_unlock(&sfs->mtx);
1248 }
1249 }
1250 #ifdef KERN_TLS
1251 if (tls != NULL)
1252 ktls_free(tls);
1253 #endif
1254
1255 if (error == ERESTART)
1256 error = EINTR;
1257
1258 return (error);
1259 }
1260
1261 static int
1262 sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1263 {
1264 struct sf_hdtr hdtr;
1265 struct uio *hdr_uio, *trl_uio;
1266 struct file *fp;
1267 off_t sbytes;
1268 int error;
1269
1270 /*
1271 * File offset must be positive. If it goes beyond EOF
1272 * we send only the header/trailer and no payload data.
1273 */
1274 if (uap->offset < 0)
1275 return (EINVAL);
1276
1277 sbytes = 0;
1278 hdr_uio = trl_uio = NULL;
1279
1280 if (uap->hdtr != NULL) {
1281 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1282 if (error != 0)
1283 goto out;
1284 if (hdtr.headers != NULL) {
1285 error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1286 &hdr_uio);
1287 if (error != 0)
1288 goto out;
1289 #ifdef COMPAT_FREEBSD4
1290 /*
1291 * In FreeBSD < 5.0 the nbytes to send also included
1292 * the header. If compat is specified subtract the
1293 * header size from nbytes.
1294 */
1295 if (compat) {
1296 if (uap->nbytes > hdr_uio->uio_resid)
1297 uap->nbytes -= hdr_uio->uio_resid;
1298 else
1299 uap->nbytes = 0;
1300 }
1301 #endif
1302 }
1303 if (hdtr.trailers != NULL) {
1304 error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1305 &trl_uio);
1306 if (error != 0)
1307 goto out;
1308 }
1309 }
1310
1311 AUDIT_ARG_FD(uap->fd);
1312
1313 /*
1314 * sendfile(2) can start at any offset within a file so we require
1315 * CAP_READ+CAP_SEEK = CAP_PREAD.
1316 */
1317 if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0)
1318 goto out;
1319
1320 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1321 uap->nbytes, &sbytes, uap->flags, td);
1322 fdrop(fp, td);
1323
1324 if (uap->sbytes != NULL)
1325 copyout(&sbytes, uap->sbytes, sizeof(off_t));
1326
1327 out:
1328 free(hdr_uio, M_IOV);
1329 free(trl_uio, M_IOV);
1330 return (error);
1331 }
1332
1333 /*
1334 * sendfile(2)
1335 *
1336 * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1337 * struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1338 *
1339 * Send a file specified by 'fd' and starting at 'offset' to a socket
1340 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1341 * 0. Optionally add a header and/or trailer to the socket output. If
1342 * specified, write the total number of bytes sent into *sbytes.
1343 */
1344 int
1345 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1346 {
1347
1348 return (sendfile(td, uap, 0));
1349 }
1350
1351 #ifdef COMPAT_FREEBSD4
1352 int
1353 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1354 {
1355 struct sendfile_args args;
1356
1357 args.fd = uap->fd;
1358 args.s = uap->s;
1359 args.offset = uap->offset;
1360 args.nbytes = uap->nbytes;
1361 args.hdtr = uap->hdtr;
1362 args.sbytes = uap->sbytes;
1363 args.flags = uap->flags;
1364
1365 return (sendfile(td, &args, 1));
1366 }
1367 #endif /* COMPAT_FREEBSD4 */
Cache object: 72ae72e097e2ec119479c000cd1e2cb6
|