FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_vnops.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
13 * Copyright (c) 2013, 2014 The FreeBSD Foundation
14 *
15 * Portions of this software were developed by Konstantin Belousov
16 * under sponsorship from the FreeBSD Foundation.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 * 3. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include "opt_hwpmc_hooks.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/disk.h>
53 #include <sys/fail.h>
54 #include <sys/fcntl.h>
55 #include <sys/file.h>
56 #include <sys/kdb.h>
57 #include <sys/stat.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/limits.h>
61 #include <sys/lock.h>
62 #include <sys/mman.h>
63 #include <sys/mount.h>
64 #include <sys/mutex.h>
65 #include <sys/namei.h>
66 #include <sys/vnode.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/filio.h>
70 #include <sys/resourcevar.h>
71 #include <sys/rwlock.h>
72 #include <sys/sx.h>
73 #include <sys/sysctl.h>
74 #include <sys/ttycom.h>
75 #include <sys/conf.h>
76 #include <sys/syslog.h>
77 #include <sys/unistd.h>
78 #include <sys/user.h>
79
80 #include <security/audit/audit.h>
81 #include <security/mac/mac_framework.h>
82
83 #include <vm/vm.h>
84 #include <vm/vm_extern.h>
85 #include <vm/pmap.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90
91 #ifdef HWPMC_HOOKS
92 #include <sys/pmckern.h>
93 #endif
94
95 static fo_rdwr_t vn_read;
96 static fo_rdwr_t vn_write;
97 static fo_rdwr_t vn_io_fault;
98 static fo_truncate_t vn_truncate;
99 static fo_ioctl_t vn_ioctl;
100 static fo_poll_t vn_poll;
101 static fo_kqfilter_t vn_kqfilter;
102 static fo_stat_t vn_statfile;
103 static fo_close_t vn_closefile;
104 static fo_mmap_t vn_mmap;
105
106 struct fileops vnops = {
107 .fo_read = vn_io_fault,
108 .fo_write = vn_io_fault,
109 .fo_truncate = vn_truncate,
110 .fo_ioctl = vn_ioctl,
111 .fo_poll = vn_poll,
112 .fo_kqfilter = vn_kqfilter,
113 .fo_stat = vn_statfile,
114 .fo_close = vn_closefile,
115 .fo_chmod = vn_chmod,
116 .fo_chown = vn_chown,
117 .fo_sendfile = vn_sendfile,
118 .fo_seek = vn_seek,
119 .fo_fill_kinfo = vn_fill_kinfo,
120 .fo_mmap = vn_mmap,
121 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
122 };
123
124 static const int io_hold_cnt = 16;
125 static int vn_io_fault_enable = 1;
126 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW,
127 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance");
128 static int vn_io_fault_prefault = 0;
129 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW,
130 &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting");
131 static u_long vn_io_faults_cnt;
132 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
133 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers");
134
135 static int vfs_allow_read_dir = 0;
136 SYSCTL_INT(_security_bsd, OID_AUTO, allow_read_dir, CTLFLAG_RW,
137 &vfs_allow_read_dir, 0,
138 "Enable read(2) of directory for filesystems that support it");
139
140 /*
141 * Returns true if vn_io_fault mode of handling the i/o request should
142 * be used.
143 */
144 static bool
145 do_vn_io_fault(struct vnode *vp, struct uio *uio)
146 {
147 struct mount *mp;
148
149 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG &&
150 (mp = vp->v_mount) != NULL &&
151 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable);
152 }
153
154 /*
155 * Structure used to pass arguments to vn_io_fault1(), to do either
156 * file- or vnode-based I/O calls.
157 */
158 struct vn_io_fault_args {
159 enum {
160 VN_IO_FAULT_FOP,
161 VN_IO_FAULT_VOP
162 } kind;
163 struct ucred *cred;
164 int flags;
165 union {
166 struct fop_args_tag {
167 struct file *fp;
168 fo_rdwr_t *doio;
169 } fop_args;
170 struct vop_args_tag {
171 struct vnode *vp;
172 } vop_args;
173 } args;
174 };
175
176 static int vn_io_fault1(struct vnode *vp, struct uio *uio,
177 struct vn_io_fault_args *args, struct thread *td);
178
179 int
180 vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp)
181 {
182 struct thread *td = ndp->ni_cnd.cn_thread;
183
184 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
185 }
186
187 static uint64_t
188 open2nameif(int fmode, u_int vn_open_flags)
189 {
190 uint64_t res;
191
192 res = ISOPEN | LOCKLEAF;
193 if ((fmode & O_RESOLVE_BENEATH) != 0)
194 res |= RBENEATH;
195 if ((vn_open_flags & VN_OPEN_NOAUDIT) == 0)
196 res |= AUDITVNODE1;
197 if ((vn_open_flags & VN_OPEN_NOCAPCHECK) != 0)
198 res |= NOCAPCHECK;
199 return (res);
200 }
201
202 /*
203 * Common code for vnode open operations via a name lookup.
204 * Lookup the vnode and invoke VOP_CREATE if needed.
205 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
206 *
207 * Note that this does NOT free nameidata for the successful case,
208 * due to the NDINIT being done elsewhere.
209 */
210 int
211 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
212 struct ucred *cred, struct file *fp)
213 {
214 struct vnode *vp;
215 struct mount *mp;
216 struct thread *td = ndp->ni_cnd.cn_thread;
217 struct vattr vat;
218 struct vattr *vap = &vat;
219 int fmode, error;
220
221 restart:
222 fmode = *flagp;
223 if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT |
224 O_EXCL | O_DIRECTORY))
225 return (EINVAL);
226 else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) {
227 ndp->ni_cnd.cn_nameiop = CREATE;
228 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags);
229 /*
230 * Set NOCACHE to avoid flushing the cache when
231 * rolling in many files at once.
232 */
233 ndp->ni_cnd.cn_flags |= LOCKPARENT | NOCACHE;
234 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
235 ndp->ni_cnd.cn_flags |= FOLLOW;
236 if ((vn_open_flags & VN_OPEN_INVFS) == 0)
237 bwillwrite();
238 if ((error = namei(ndp)) != 0)
239 return (error);
240 if (ndp->ni_vp == NULL) {
241 VATTR_NULL(vap);
242 vap->va_type = VREG;
243 vap->va_mode = cmode;
244 if (fmode & O_EXCL)
245 vap->va_vaflags |= VA_EXCLUSIVE;
246 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
247 NDFREE(ndp, NDF_ONLY_PNBUF);
248 vput(ndp->ni_dvp);
249 if ((error = vn_start_write(NULL, &mp,
250 V_XSLEEP | PCATCH)) != 0)
251 return (error);
252 goto restart;
253 }
254 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0)
255 ndp->ni_cnd.cn_flags |= MAKEENTRY;
256 #ifdef MAC
257 error = mac_vnode_check_create(cred, ndp->ni_dvp,
258 &ndp->ni_cnd, vap);
259 if (error == 0)
260 #endif
261 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
262 &ndp->ni_cnd, vap);
263 vput(ndp->ni_dvp);
264 vn_finished_write(mp);
265 if (error) {
266 NDFREE(ndp, NDF_ONLY_PNBUF);
267 return (error);
268 }
269 fmode &= ~O_TRUNC;
270 vp = ndp->ni_vp;
271 } else {
272 if (ndp->ni_dvp == ndp->ni_vp)
273 vrele(ndp->ni_dvp);
274 else
275 vput(ndp->ni_dvp);
276 ndp->ni_dvp = NULL;
277 vp = ndp->ni_vp;
278 if (fmode & O_EXCL) {
279 error = EEXIST;
280 goto bad;
281 }
282 if (vp->v_type == VDIR) {
283 error = EISDIR;
284 goto bad;
285 }
286 fmode &= ~O_CREAT;
287 }
288 } else {
289 ndp->ni_cnd.cn_nameiop = LOOKUP;
290 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags);
291 ndp->ni_cnd.cn_flags |= (fmode & O_NOFOLLOW) != 0 ? NOFOLLOW :
292 FOLLOW;
293 if ((fmode & FWRITE) == 0)
294 ndp->ni_cnd.cn_flags |= LOCKSHARED;
295 if ((error = namei(ndp)) != 0)
296 return (error);
297 vp = ndp->ni_vp;
298 }
299 error = vn_open_vnode(vp, fmode, cred, td, fp);
300 if (error)
301 goto bad;
302 *flagp = fmode;
303 return (0);
304 bad:
305 NDFREE(ndp, NDF_ONLY_PNBUF);
306 vput(vp);
307 *flagp = fmode;
308 ndp->ni_vp = NULL;
309 return (error);
310 }
311
312 static int
313 vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp)
314 {
315 struct flock lf;
316 int error, lock_flags, type;
317
318 ASSERT_VOP_LOCKED(vp, "vn_open_vnode_advlock");
319 if ((fmode & (O_EXLOCK | O_SHLOCK)) == 0)
320 return (0);
321 KASSERT(fp != NULL, ("open with flock requires fp"));
322 if (fp->f_type != DTYPE_NONE && fp->f_type != DTYPE_VNODE)
323 return (EOPNOTSUPP);
324
325 lock_flags = VOP_ISLOCKED(vp);
326 VOP_UNLOCK(vp, 0);
327
328 lf.l_whence = SEEK_SET;
329 lf.l_start = 0;
330 lf.l_len = 0;
331 lf.l_type = (fmode & O_EXLOCK) != 0 ? F_WRLCK : F_RDLCK;
332 type = F_FLOCK;
333 if ((fmode & FNONBLOCK) == 0)
334 type |= F_WAIT;
335 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
336 if (error == 0)
337 fp->f_flag |= FHASLOCK;
338
339 vn_lock(vp, lock_flags | LK_RETRY);
340 if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0)
341 error = ENOENT;
342 return (error);
343 }
344
345 /*
346 * Common code for vnode open operations once a vnode is located.
347 * Check permissions, and call the VOP_OPEN routine.
348 */
349 int
350 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
351 struct thread *td, struct file *fp)
352 {
353 accmode_t accmode;
354 int error;
355
356 if (vp->v_type == VLNK)
357 return (EMLINK);
358 if (vp->v_type == VSOCK)
359 return (EOPNOTSUPP);
360 if (vp->v_type != VDIR && fmode & O_DIRECTORY)
361 return (ENOTDIR);
362 accmode = 0;
363 if (fmode & (FWRITE | O_TRUNC)) {
364 if (vp->v_type == VDIR)
365 return (EISDIR);
366 accmode |= VWRITE;
367 }
368 if (fmode & FREAD)
369 accmode |= VREAD;
370 if (fmode & FEXEC)
371 accmode |= VEXEC;
372 if ((fmode & O_APPEND) && (fmode & FWRITE))
373 accmode |= VAPPEND;
374 #ifdef MAC
375 if (fmode & O_CREAT)
376 accmode |= VCREAT;
377 if (fmode & O_VERIFY)
378 accmode |= VVERIFY;
379 error = mac_vnode_check_open(cred, vp, accmode);
380 if (error)
381 return (error);
382
383 accmode &= ~(VCREAT | VVERIFY);
384 #endif
385 if ((fmode & O_CREAT) == 0 && accmode != 0) {
386 error = VOP_ACCESS(vp, accmode, cred, td);
387 if (error != 0)
388 return (error);
389 }
390 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
391 vn_lock(vp, LK_UPGRADE | LK_RETRY);
392 error = VOP_OPEN(vp, fmode, cred, td, fp);
393 if (error != 0)
394 return (error);
395
396 error = vn_open_vnode_advlock(vp, fmode, fp);
397 if (error == 0 && (fmode & FWRITE) != 0) {
398 error = VOP_ADD_WRITECOUNT(vp, 1);
399 if (error == 0) {
400 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
401 __func__, vp, vp->v_writecount);
402 }
403 }
404
405 /*
406 * Error from advlock or VOP_ADD_WRITECOUNT() still requires
407 * calling VOP_CLOSE() to pair with earlier VOP_OPEN().
408 */
409 if (error != 0) {
410 if (fp != NULL) {
411 /*
412 * Arrange the call by having fdrop() to use
413 * vn_closefile(). This is to satisfy
414 * filesystems like devfs or tmpfs, which
415 * override fo_close().
416 */
417 fp->f_flag |= FOPENFAILED;
418 fp->f_vnode = vp;
419 if (fp->f_ops == &badfileops) {
420 fp->f_type = DTYPE_VNODE;
421 fp->f_ops = &vnops;
422 }
423 vref(vp);
424 } else {
425 /*
426 * If there is no fp, due to kernel-mode open,
427 * we can call VOP_CLOSE() now.
428 */
429 if (vp->v_type != VFIFO && (fmode & FWRITE) != 0 &&
430 !MNT_EXTENDED_SHARED(vp->v_mount) &&
431 VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
432 vn_lock(vp, LK_UPGRADE | LK_RETRY);
433 (void)VOP_CLOSE(vp, fmode & (FREAD | FWRITE | FEXEC),
434 cred, td);
435 }
436 }
437
438 ASSERT_VOP_LOCKED(vp, "vn_open_vnode");
439 return (error);
440
441 }
442
443 /*
444 * Check for write permissions on the specified vnode.
445 * Prototype text segments cannot be written.
446 * It is racy.
447 */
448 int
449 vn_writechk(struct vnode *vp)
450 {
451
452 ASSERT_VOP_LOCKED(vp, "vn_writechk");
453 /*
454 * If there's shared text associated with
455 * the vnode, try to free it up once. If
456 * we fail, we can't allow writing.
457 */
458 if (VOP_IS_TEXT(vp))
459 return (ETXTBSY);
460
461 return (0);
462 }
463
464 /*
465 * Vnode close call
466 */
467 static int
468 vn_close1(struct vnode *vp, int flags, struct ucred *file_cred,
469 struct thread *td, bool keep_ref)
470 {
471 struct mount *mp;
472 int error, lock_flags;
473
474 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 &&
475 MNT_EXTENDED_SHARED(vp->v_mount))
476 lock_flags = LK_SHARED;
477 else
478 lock_flags = LK_EXCLUSIVE;
479
480 vn_start_write(vp, &mp, V_WAIT);
481 vn_lock(vp, lock_flags | LK_RETRY);
482 AUDIT_ARG_VNODE1(vp);
483 if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) {
484 VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
485 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
486 __func__, vp, vp->v_writecount);
487 }
488 error = VOP_CLOSE(vp, flags, file_cred, td);
489 if (keep_ref)
490 VOP_UNLOCK(vp, 0);
491 else
492 vput(vp);
493 vn_finished_write(mp);
494 return (error);
495 }
496
497 int
498 vn_close(struct vnode *vp, int flags, struct ucred *file_cred,
499 struct thread *td)
500 {
501
502 return (vn_close1(vp, flags, file_cred, td, false));
503 }
504
505 /*
506 * Heuristic to detect sequential operation.
507 */
508 static int
509 sequential_heuristic(struct uio *uio, struct file *fp)
510 {
511
512 ASSERT_VOP_LOCKED(fp->f_vnode, __func__);
513 if (fp->f_flag & FRDAHEAD)
514 return (fp->f_seqcount << IO_SEQSHIFT);
515
516 /*
517 * Offset 0 is handled specially. open() sets f_seqcount to 1 so
518 * that the first I/O is normally considered to be slightly
519 * sequential. Seeking to offset 0 doesn't change sequentiality
520 * unless previous seeks have reduced f_seqcount to 0, in which
521 * case offset 0 is not special.
522 */
523 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
524 uio->uio_offset == fp->f_nextoff) {
525 /*
526 * f_seqcount is in units of fixed-size blocks so that it
527 * depends mainly on the amount of sequential I/O and not
528 * much on the number of sequential I/O's. The fixed size
529 * of 16384 is hard-coded here since it is (not quite) just
530 * a magic size that works well here. This size is more
531 * closely related to the best I/O size for real disks than
532 * to any block size used by software.
533 */
534 if (uio->uio_resid >= IO_SEQMAX * 16384)
535 fp->f_seqcount = IO_SEQMAX;
536 else {
537 fp->f_seqcount += howmany(uio->uio_resid, 16384);
538 if (fp->f_seqcount > IO_SEQMAX)
539 fp->f_seqcount = IO_SEQMAX;
540 }
541 return (fp->f_seqcount << IO_SEQSHIFT);
542 }
543
544 /* Not sequential. Quickly draw-down sequentiality. */
545 if (fp->f_seqcount > 1)
546 fp->f_seqcount = 1;
547 else
548 fp->f_seqcount = 0;
549 return (0);
550 }
551
552 /*
553 * Package up an I/O request on a vnode into a uio and do it.
554 */
555 int
556 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
557 enum uio_seg segflg, int ioflg, struct ucred *active_cred,
558 struct ucred *file_cred, ssize_t *aresid, struct thread *td)
559 {
560 struct uio auio;
561 struct iovec aiov;
562 struct mount *mp;
563 struct ucred *cred;
564 void *rl_cookie;
565 struct vn_io_fault_args args;
566 int error, lock_flags;
567
568 if (offset < 0 && vp->v_type != VCHR)
569 return (EINVAL);
570 auio.uio_iov = &aiov;
571 auio.uio_iovcnt = 1;
572 aiov.iov_base = base;
573 aiov.iov_len = len;
574 auio.uio_resid = len;
575 auio.uio_offset = offset;
576 auio.uio_segflg = segflg;
577 auio.uio_rw = rw;
578 auio.uio_td = td;
579 error = 0;
580
581 if ((ioflg & IO_NODELOCKED) == 0) {
582 if ((ioflg & IO_RANGELOCKED) == 0) {
583 if (rw == UIO_READ) {
584 rl_cookie = vn_rangelock_rlock(vp, offset,
585 offset + len);
586 } else {
587 rl_cookie = vn_rangelock_wlock(vp, offset,
588 offset + len);
589 }
590 } else
591 rl_cookie = NULL;
592 mp = NULL;
593 if (rw == UIO_WRITE) {
594 if (vp->v_type != VCHR &&
595 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
596 != 0)
597 goto out;
598 if (MNT_SHARED_WRITES(mp) ||
599 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount)))
600 lock_flags = LK_SHARED;
601 else
602 lock_flags = LK_EXCLUSIVE;
603 } else
604 lock_flags = LK_SHARED;
605 vn_lock(vp, lock_flags | LK_RETRY);
606 } else
607 rl_cookie = NULL;
608
609 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
610 #ifdef MAC
611 if ((ioflg & IO_NOMACCHECK) == 0) {
612 if (rw == UIO_READ)
613 error = mac_vnode_check_read(active_cred, file_cred,
614 vp);
615 else
616 error = mac_vnode_check_write(active_cred, file_cred,
617 vp);
618 }
619 #endif
620 if (error == 0) {
621 if (file_cred != NULL)
622 cred = file_cred;
623 else
624 cred = active_cred;
625 if (do_vn_io_fault(vp, &auio)) {
626 args.kind = VN_IO_FAULT_VOP;
627 args.cred = cred;
628 args.flags = ioflg;
629 args.args.vop_args.vp = vp;
630 error = vn_io_fault1(vp, &auio, &args, td);
631 } else if (rw == UIO_READ) {
632 error = VOP_READ(vp, &auio, ioflg, cred);
633 } else /* if (rw == UIO_WRITE) */ {
634 error = VOP_WRITE(vp, &auio, ioflg, cred);
635 }
636 }
637 if (aresid)
638 *aresid = auio.uio_resid;
639 else
640 if (auio.uio_resid && error == 0)
641 error = EIO;
642 if ((ioflg & IO_NODELOCKED) == 0) {
643 VOP_UNLOCK(vp, 0);
644 if (mp != NULL)
645 vn_finished_write(mp);
646 }
647 out:
648 if (rl_cookie != NULL)
649 vn_rangelock_unlock(vp, rl_cookie);
650 return (error);
651 }
652
653 /*
654 * Package up an I/O request on a vnode into a uio and do it. The I/O
655 * request is split up into smaller chunks and we try to avoid saturating
656 * the buffer cache while potentially holding a vnode locked, so we
657 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield()
658 * to give other processes a chance to lock the vnode (either other processes
659 * core'ing the same binary, or unrelated processes scanning the directory).
660 */
661 int
662 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len,
663 off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred,
664 struct ucred *file_cred, size_t *aresid, struct thread *td)
665 {
666 int error = 0;
667 ssize_t iaresid;
668
669 do {
670 int chunk;
671
672 /*
673 * Force `offset' to a multiple of MAXBSIZE except possibly
674 * for the first chunk, so that filesystems only need to
675 * write full blocks except possibly for the first and last
676 * chunks.
677 */
678 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
679
680 if (chunk > len)
681 chunk = len;
682 if (rw != UIO_READ && vp->v_type == VREG)
683 bwillwrite();
684 iaresid = 0;
685 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
686 ioflg, active_cred, file_cred, &iaresid, td);
687 len -= chunk; /* aresid calc already includes length */
688 if (error)
689 break;
690 offset += chunk;
691 base = (char *)base + chunk;
692 kern_yield(PRI_USER);
693 } while (len);
694 if (aresid)
695 *aresid = len + iaresid;
696 return (error);
697 }
698
699 off_t
700 foffset_lock(struct file *fp, int flags)
701 {
702 struct mtx *mtxp;
703 off_t res;
704
705 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
706
707 #if OFF_MAX <= LONG_MAX
708 /*
709 * Caller only wants the current f_offset value. Assume that
710 * the long and shorter integer types reads are atomic.
711 */
712 if ((flags & FOF_NOLOCK) != 0)
713 return (fp->f_offset);
714 #endif
715
716 /*
717 * According to McKusick the vn lock was protecting f_offset here.
718 * It is now protected by the FOFFSET_LOCKED flag.
719 */
720 mtxp = mtx_pool_find(mtxpool_sleep, fp);
721 mtx_lock(mtxp);
722 if ((flags & FOF_NOLOCK) == 0) {
723 while (fp->f_vnread_flags & FOFFSET_LOCKED) {
724 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
725 msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
726 "vofflock", 0);
727 }
728 fp->f_vnread_flags |= FOFFSET_LOCKED;
729 }
730 res = fp->f_offset;
731 mtx_unlock(mtxp);
732 return (res);
733 }
734
735 void
736 foffset_unlock(struct file *fp, off_t val, int flags)
737 {
738 struct mtx *mtxp;
739
740 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
741
742 #if OFF_MAX <= LONG_MAX
743 if ((flags & FOF_NOLOCK) != 0) {
744 if ((flags & FOF_NOUPDATE) == 0)
745 fp->f_offset = val;
746 if ((flags & FOF_NEXTOFF) != 0)
747 fp->f_nextoff = val;
748 return;
749 }
750 #endif
751
752 mtxp = mtx_pool_find(mtxpool_sleep, fp);
753 mtx_lock(mtxp);
754 if ((flags & FOF_NOUPDATE) == 0)
755 fp->f_offset = val;
756 if ((flags & FOF_NEXTOFF) != 0)
757 fp->f_nextoff = val;
758 if ((flags & FOF_NOLOCK) == 0) {
759 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
760 ("Lost FOFFSET_LOCKED"));
761 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
762 wakeup(&fp->f_vnread_flags);
763 fp->f_vnread_flags = 0;
764 }
765 mtx_unlock(mtxp);
766 }
767
768 void
769 foffset_lock_uio(struct file *fp, struct uio *uio, int flags)
770 {
771
772 if ((flags & FOF_OFFSET) == 0)
773 uio->uio_offset = foffset_lock(fp, flags);
774 }
775
776 void
777 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags)
778 {
779
780 if ((flags & FOF_OFFSET) == 0)
781 foffset_unlock(fp, uio->uio_offset, flags);
782 }
783
784 static int
785 get_advice(struct file *fp, struct uio *uio)
786 {
787 struct mtx *mtxp;
788 int ret;
789
790 ret = POSIX_FADV_NORMAL;
791 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG)
792 return (ret);
793
794 mtxp = mtx_pool_find(mtxpool_sleep, fp);
795 mtx_lock(mtxp);
796 if (fp->f_advice != NULL &&
797 uio->uio_offset >= fp->f_advice->fa_start &&
798 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
799 ret = fp->f_advice->fa_advice;
800 mtx_unlock(mtxp);
801 return (ret);
802 }
803
804 /*
805 * File table vnode read routine.
806 */
807 static int
808 vn_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags,
809 struct thread *td)
810 {
811 struct vnode *vp;
812 off_t orig_offset;
813 int error, ioflag;
814 int advice;
815
816 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
817 uio->uio_td, td));
818 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
819 vp = fp->f_vnode;
820 ioflag = 0;
821 if (fp->f_flag & FNONBLOCK)
822 ioflag |= IO_NDELAY;
823 if (fp->f_flag & O_DIRECT)
824 ioflag |= IO_DIRECT;
825 advice = get_advice(fp, uio);
826 vn_lock(vp, LK_SHARED | LK_RETRY);
827
828 switch (advice) {
829 case POSIX_FADV_NORMAL:
830 case POSIX_FADV_SEQUENTIAL:
831 case POSIX_FADV_NOREUSE:
832 ioflag |= sequential_heuristic(uio, fp);
833 break;
834 case POSIX_FADV_RANDOM:
835 /* Disable read-ahead for random I/O. */
836 break;
837 }
838 orig_offset = uio->uio_offset;
839
840 #ifdef MAC
841 error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
842 if (error == 0)
843 #endif
844 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
845 fp->f_nextoff = uio->uio_offset;
846 VOP_UNLOCK(vp, 0);
847 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
848 orig_offset != uio->uio_offset)
849 /*
850 * Use POSIX_FADV_DONTNEED to flush pages and buffers
851 * for the backing file after a POSIX_FADV_NOREUSE
852 * read(2).
853 */
854 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1,
855 POSIX_FADV_DONTNEED);
856 return (error);
857 }
858
859 /*
860 * File table vnode write routine.
861 */
862 static int
863 vn_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags,
864 struct thread *td)
865 {
866 struct vnode *vp;
867 struct mount *mp;
868 off_t orig_offset;
869 int error, ioflag, lock_flags;
870 int advice;
871
872 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
873 uio->uio_td, td));
874 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
875 vp = fp->f_vnode;
876 if (vp->v_type == VREG)
877 bwillwrite();
878 ioflag = IO_UNIT;
879 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
880 ioflag |= IO_APPEND;
881 if (fp->f_flag & FNONBLOCK)
882 ioflag |= IO_NDELAY;
883 if (fp->f_flag & O_DIRECT)
884 ioflag |= IO_DIRECT;
885 if ((fp->f_flag & O_FSYNC) ||
886 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
887 ioflag |= IO_SYNC;
888 mp = NULL;
889 if (vp->v_type != VCHR &&
890 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
891 goto unlock;
892
893 advice = get_advice(fp, uio);
894
895 if (MNT_SHARED_WRITES(mp) ||
896 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) {
897 lock_flags = LK_SHARED;
898 } else {
899 lock_flags = LK_EXCLUSIVE;
900 }
901
902 vn_lock(vp, lock_flags | LK_RETRY);
903 switch (advice) {
904 case POSIX_FADV_NORMAL:
905 case POSIX_FADV_SEQUENTIAL:
906 case POSIX_FADV_NOREUSE:
907 ioflag |= sequential_heuristic(uio, fp);
908 break;
909 case POSIX_FADV_RANDOM:
910 /* XXX: Is this correct? */
911 break;
912 }
913 orig_offset = uio->uio_offset;
914
915 #ifdef MAC
916 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
917 if (error == 0)
918 #endif
919 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
920 fp->f_nextoff = uio->uio_offset;
921 VOP_UNLOCK(vp, 0);
922 if (vp->v_type != VCHR)
923 vn_finished_write(mp);
924 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
925 orig_offset != uio->uio_offset)
926 /*
927 * Use POSIX_FADV_DONTNEED to flush pages and buffers
928 * for the backing file after a POSIX_FADV_NOREUSE
929 * write(2).
930 */
931 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1,
932 POSIX_FADV_DONTNEED);
933 unlock:
934 return (error);
935 }
936
937 /*
938 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to
939 * prevent the following deadlock:
940 *
941 * Assume that the thread A reads from the vnode vp1 into userspace
942 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is
943 * currently not resident, then system ends up with the call chain
944 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] ->
945 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2)
946 * which establishes lock order vp1->vn_lock, then vp2->vn_lock.
947 * If, at the same time, thread B reads from vnode vp2 into buffer buf2
948 * backed by the pages of vnode vp1, and some page in buf2 is not
949 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock.
950 *
951 * To prevent the lock order reversal and deadlock, vn_io_fault() does
952 * not allow page faults to happen during VOP_READ() or VOP_WRITE().
953 * Instead, it first tries to do the whole range i/o with pagefaults
954 * disabled. If all pages in the i/o buffer are resident and mapped,
955 * VOP will succeed (ignoring the genuine filesystem errors).
956 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do
957 * i/o in chunks, with all pages in the chunk prefaulted and held
958 * using vm_fault_quick_hold_pages().
959 *
960 * Filesystems using this deadlock avoidance scheme should use the
961 * array of the held pages from uio, saved in the curthread->td_ma,
962 * instead of doing uiomove(). A helper function
963 * vn_io_fault_uiomove() converts uiomove request into
964 * uiomove_fromphys() over td_ma array.
965 *
966 * Since vnode locks do not cover the whole i/o anymore, rangelocks
967 * make the current i/o request atomic with respect to other i/os and
968 * truncations.
969 */
970
971 /*
972 * Decode vn_io_fault_args and perform the corresponding i/o.
973 */
974 static int
975 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio,
976 struct thread *td)
977 {
978 int error, save;
979
980 error = 0;
981 save = vm_fault_disable_pagefaults();
982 switch (args->kind) {
983 case VN_IO_FAULT_FOP:
984 error = (args->args.fop_args.doio)(args->args.fop_args.fp,
985 uio, args->cred, args->flags, td);
986 break;
987 case VN_IO_FAULT_VOP:
988 if (uio->uio_rw == UIO_READ) {
989 error = VOP_READ(args->args.vop_args.vp, uio,
990 args->flags, args->cred);
991 } else if (uio->uio_rw == UIO_WRITE) {
992 error = VOP_WRITE(args->args.vop_args.vp, uio,
993 args->flags, args->cred);
994 }
995 break;
996 default:
997 panic("vn_io_fault_doio: unknown kind of io %d %d",
998 args->kind, uio->uio_rw);
999 }
1000 vm_fault_enable_pagefaults(save);
1001 return (error);
1002 }
1003
1004 static int
1005 vn_io_fault_touch(char *base, const struct uio *uio)
1006 {
1007 int r;
1008
1009 r = fubyte(base);
1010 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1))
1011 return (EFAULT);
1012 return (0);
1013 }
1014
1015 static int
1016 vn_io_fault_prefault_user(const struct uio *uio)
1017 {
1018 char *base;
1019 const struct iovec *iov;
1020 size_t len;
1021 ssize_t resid;
1022 int error, i;
1023
1024 KASSERT(uio->uio_segflg == UIO_USERSPACE,
1025 ("vn_io_fault_prefault userspace"));
1026
1027 error = i = 0;
1028 iov = uio->uio_iov;
1029 resid = uio->uio_resid;
1030 base = iov->iov_base;
1031 len = iov->iov_len;
1032 while (resid > 0) {
1033 error = vn_io_fault_touch(base, uio);
1034 if (error != 0)
1035 break;
1036 if (len < PAGE_SIZE) {
1037 if (len != 0) {
1038 error = vn_io_fault_touch(base + len - 1, uio);
1039 if (error != 0)
1040 break;
1041 resid -= len;
1042 }
1043 if (++i >= uio->uio_iovcnt)
1044 break;
1045 iov = uio->uio_iov + i;
1046 base = iov->iov_base;
1047 len = iov->iov_len;
1048 } else {
1049 len -= PAGE_SIZE;
1050 base += PAGE_SIZE;
1051 resid -= PAGE_SIZE;
1052 }
1053 }
1054 return (error);
1055 }
1056
1057 /*
1058 * Common code for vn_io_fault(), agnostic to the kind of i/o request.
1059 * Uses vn_io_fault_doio() to make the call to an actual i/o function.
1060 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request
1061 * into args and call vn_io_fault1() to handle faults during the user
1062 * mode buffer accesses.
1063 */
1064 static int
1065 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args,
1066 struct thread *td)
1067 {
1068 vm_page_t ma[io_hold_cnt + 2];
1069 struct uio *uio_clone, short_uio;
1070 struct iovec short_iovec[1];
1071 vm_page_t *prev_td_ma;
1072 vm_prot_t prot;
1073 vm_offset_t addr, end;
1074 size_t len, resid;
1075 ssize_t adv;
1076 int error, cnt, saveheld, prev_td_ma_cnt;
1077
1078 if (vn_io_fault_prefault) {
1079 error = vn_io_fault_prefault_user(uio);
1080 if (error != 0)
1081 return (error); /* Or ignore ? */
1082 }
1083
1084 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ;
1085
1086 /*
1087 * The UFS follows IO_UNIT directive and replays back both
1088 * uio_offset and uio_resid if an error is encountered during the
1089 * operation. But, since the iovec may be already advanced,
1090 * uio is still in an inconsistent state.
1091 *
1092 * Cache a copy of the original uio, which is advanced to the redo
1093 * point using UIO_NOCOPY below.
1094 */
1095 uio_clone = cloneuio(uio);
1096 resid = uio->uio_resid;
1097
1098 short_uio.uio_segflg = UIO_USERSPACE;
1099 short_uio.uio_rw = uio->uio_rw;
1100 short_uio.uio_td = uio->uio_td;
1101
1102 error = vn_io_fault_doio(args, uio, td);
1103 if (error != EFAULT)
1104 goto out;
1105
1106 atomic_add_long(&vn_io_faults_cnt, 1);
1107 uio_clone->uio_segflg = UIO_NOCOPY;
1108 uiomove(NULL, resid - uio->uio_resid, uio_clone);
1109 uio_clone->uio_segflg = uio->uio_segflg;
1110
1111 saveheld = curthread_pflags_set(TDP_UIOHELD);
1112 prev_td_ma = td->td_ma;
1113 prev_td_ma_cnt = td->td_ma_cnt;
1114
1115 while (uio_clone->uio_resid != 0) {
1116 len = uio_clone->uio_iov->iov_len;
1117 if (len == 0) {
1118 KASSERT(uio_clone->uio_iovcnt >= 1,
1119 ("iovcnt underflow"));
1120 uio_clone->uio_iov++;
1121 uio_clone->uio_iovcnt--;
1122 continue;
1123 }
1124 if (len > io_hold_cnt * PAGE_SIZE)
1125 len = io_hold_cnt * PAGE_SIZE;
1126 addr = (uintptr_t)uio_clone->uio_iov->iov_base;
1127 end = round_page(addr + len);
1128 if (end < addr) {
1129 error = EFAULT;
1130 break;
1131 }
1132 cnt = atop(end - trunc_page(addr));
1133 /*
1134 * A perfectly misaligned address and length could cause
1135 * both the start and the end of the chunk to use partial
1136 * page. +2 accounts for such a situation.
1137 */
1138 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
1139 addr, len, prot, ma, io_hold_cnt + 2);
1140 if (cnt == -1) {
1141 error = EFAULT;
1142 break;
1143 }
1144 short_uio.uio_iov = &short_iovec[0];
1145 short_iovec[0].iov_base = (void *)addr;
1146 short_uio.uio_iovcnt = 1;
1147 short_uio.uio_resid = short_iovec[0].iov_len = len;
1148 short_uio.uio_offset = uio_clone->uio_offset;
1149 td->td_ma = ma;
1150 td->td_ma_cnt = cnt;
1151
1152 error = vn_io_fault_doio(args, &short_uio, td);
1153 vm_page_unhold_pages(ma, cnt);
1154 adv = len - short_uio.uio_resid;
1155
1156 uio_clone->uio_iov->iov_base =
1157 (char *)uio_clone->uio_iov->iov_base + adv;
1158 uio_clone->uio_iov->iov_len -= adv;
1159 uio_clone->uio_resid -= adv;
1160 uio_clone->uio_offset += adv;
1161
1162 uio->uio_resid -= adv;
1163 uio->uio_offset += adv;
1164
1165 if (error != 0 || adv == 0)
1166 break;
1167 }
1168 td->td_ma = prev_td_ma;
1169 td->td_ma_cnt = prev_td_ma_cnt;
1170 curthread_pflags_restore(saveheld);
1171 out:
1172 free(uio_clone, M_IOV);
1173 return (error);
1174 }
1175
1176 static int
1177 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
1178 int flags, struct thread *td)
1179 {
1180 fo_rdwr_t *doio;
1181 struct vnode *vp;
1182 void *rl_cookie;
1183 struct vn_io_fault_args args;
1184 int error;
1185
1186 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write;
1187 vp = fp->f_vnode;
1188
1189 /*
1190 * The ability to read(2) on a directory has historically been
1191 * allowed for all users, but this can and has been the source of
1192 * at least one security issue in the past. As such, it is now hidden
1193 * away behind a sysctl for those that actually need it to use it.
1194 */
1195 if (vp->v_type == VDIR) {
1196 KASSERT(uio->uio_rw == UIO_READ,
1197 ("illegal write attempted on a directory"));
1198 if (!vfs_allow_read_dir)
1199 return (EISDIR);
1200 }
1201
1202 foffset_lock_uio(fp, uio, flags);
1203 if (do_vn_io_fault(vp, uio)) {
1204 args.kind = VN_IO_FAULT_FOP;
1205 args.args.fop_args.fp = fp;
1206 args.args.fop_args.doio = doio;
1207 args.cred = active_cred;
1208 args.flags = flags | FOF_OFFSET;
1209 if (uio->uio_rw == UIO_READ) {
1210 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset,
1211 uio->uio_offset + uio->uio_resid);
1212 } else if ((fp->f_flag & O_APPEND) != 0 ||
1213 (flags & FOF_OFFSET) == 0) {
1214 /* For appenders, punt and lock the whole range. */
1215 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1216 } else {
1217 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset,
1218 uio->uio_offset + uio->uio_resid);
1219 }
1220 error = vn_io_fault1(vp, uio, &args, td);
1221 vn_rangelock_unlock(vp, rl_cookie);
1222 } else {
1223 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
1224 }
1225 foffset_unlock_uio(fp, uio, flags);
1226 return (error);
1227 }
1228
1229 /*
1230 * Helper function to perform the requested uiomove operation using
1231 * the held pages for io->uio_iov[0].iov_base buffer instead of
1232 * copyin/copyout. Access to the pages with uiomove_fromphys()
1233 * instead of iov_base prevents page faults that could occur due to
1234 * pmap_collect() invalidating the mapping created by
1235 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or
1236 * object cleanup revoking the write access from page mappings.
1237 *
1238 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove()
1239 * instead of plain uiomove().
1240 */
1241 int
1242 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
1243 {
1244 struct uio transp_uio;
1245 struct iovec transp_iov[1];
1246 struct thread *td;
1247 size_t adv;
1248 int error, pgadv;
1249
1250 td = curthread;
1251 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1252 uio->uio_segflg != UIO_USERSPACE)
1253 return (uiomove(data, xfersize, uio));
1254
1255 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1256 transp_iov[0].iov_base = data;
1257 transp_uio.uio_iov = &transp_iov[0];
1258 transp_uio.uio_iovcnt = 1;
1259 if (xfersize > uio->uio_resid)
1260 xfersize = uio->uio_resid;
1261 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize;
1262 transp_uio.uio_offset = 0;
1263 transp_uio.uio_segflg = UIO_SYSSPACE;
1264 /*
1265 * Since transp_iov points to data, and td_ma page array
1266 * corresponds to original uio->uio_iov, we need to invert the
1267 * direction of the i/o operation as passed to
1268 * uiomove_fromphys().
1269 */
1270 switch (uio->uio_rw) {
1271 case UIO_WRITE:
1272 transp_uio.uio_rw = UIO_READ;
1273 break;
1274 case UIO_READ:
1275 transp_uio.uio_rw = UIO_WRITE;
1276 break;
1277 }
1278 transp_uio.uio_td = uio->uio_td;
1279 error = uiomove_fromphys(td->td_ma,
1280 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK,
1281 xfersize, &transp_uio);
1282 adv = xfersize - transp_uio.uio_resid;
1283 pgadv =
1284 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) -
1285 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT);
1286 td->td_ma += pgadv;
1287 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1288 pgadv));
1289 td->td_ma_cnt -= pgadv;
1290 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv;
1291 uio->uio_iov->iov_len -= adv;
1292 uio->uio_resid -= adv;
1293 uio->uio_offset += adv;
1294 return (error);
1295 }
1296
1297 int
1298 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
1299 struct uio *uio)
1300 {
1301 struct thread *td;
1302 vm_offset_t iov_base;
1303 int cnt, pgadv;
1304
1305 td = curthread;
1306 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1307 uio->uio_segflg != UIO_USERSPACE)
1308 return (uiomove_fromphys(ma, offset, xfersize, uio));
1309
1310 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1311 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize;
1312 iov_base = (vm_offset_t)uio->uio_iov->iov_base;
1313 switch (uio->uio_rw) {
1314 case UIO_WRITE:
1315 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma,
1316 offset, cnt);
1317 break;
1318 case UIO_READ:
1319 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK,
1320 cnt);
1321 break;
1322 }
1323 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT);
1324 td->td_ma += pgadv;
1325 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1326 pgadv));
1327 td->td_ma_cnt -= pgadv;
1328 uio->uio_iov->iov_base = (char *)(iov_base + cnt);
1329 uio->uio_iov->iov_len -= cnt;
1330 uio->uio_resid -= cnt;
1331 uio->uio_offset += cnt;
1332 return (0);
1333 }
1334
1335
1336 /*
1337 * File table truncate routine.
1338 */
1339 static int
1340 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1341 struct thread *td)
1342 {
1343 struct vattr vattr;
1344 struct mount *mp;
1345 struct vnode *vp;
1346 void *rl_cookie;
1347 int error;
1348
1349 vp = fp->f_vnode;
1350
1351 /*
1352 * Lock the whole range for truncation. Otherwise split i/o
1353 * might happen partly before and partly after the truncation.
1354 */
1355 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1356 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
1357 if (error)
1358 goto out1;
1359 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1360 AUDIT_ARG_VNODE1(vp);
1361 if (vp->v_type == VDIR) {
1362 error = EISDIR;
1363 goto out;
1364 }
1365 #ifdef MAC
1366 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
1367 if (error)
1368 goto out;
1369 #endif
1370 error = VOP_ADD_WRITECOUNT(vp, 1);
1371 if (error == 0) {
1372 VATTR_NULL(&vattr);
1373 vattr.va_size = length;
1374 if ((fp->f_flag & O_FSYNC) != 0)
1375 vattr.va_vaflags |= VA_SYNC;
1376 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
1377 VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
1378 }
1379 out:
1380 VOP_UNLOCK(vp, 0);
1381 vn_finished_write(mp);
1382 out1:
1383 vn_rangelock_unlock(vp, rl_cookie);
1384 return (error);
1385 }
1386
1387 /*
1388 * File table vnode stat routine.
1389 */
1390 static int
1391 vn_statfile(struct file *fp, struct stat *sb, struct ucred *active_cred,
1392 struct thread *td)
1393 {
1394 struct vnode *vp = fp->f_vnode;
1395 int error;
1396
1397 vn_lock(vp, LK_SHARED | LK_RETRY);
1398 error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
1399 VOP_UNLOCK(vp, 0);
1400
1401 return (error);
1402 }
1403
1404 /*
1405 * Stat a vnode; implementation for the stat syscall
1406 */
1407 int
1408 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred,
1409 struct ucred *file_cred, struct thread *td)
1410 {
1411 struct vattr vattr;
1412 struct vattr *vap;
1413 int error;
1414 u_short mode;
1415
1416 AUDIT_ARG_VNODE1(vp);
1417 #ifdef MAC
1418 error = mac_vnode_check_stat(active_cred, file_cred, vp);
1419 if (error)
1420 return (error);
1421 #endif
1422
1423 vap = &vattr;
1424
1425 /*
1426 * Initialize defaults for new and unusual fields, so that file
1427 * systems which don't support these fields don't need to know
1428 * about them.
1429 */
1430 vap->va_birthtime.tv_sec = -1;
1431 vap->va_birthtime.tv_nsec = 0;
1432 vap->va_fsid = VNOVAL;
1433 vap->va_rdev = NODEV;
1434
1435 error = VOP_GETATTR(vp, vap, active_cred);
1436 if (error)
1437 return (error);
1438
1439 /*
1440 * Zero the spare stat fields
1441 */
1442 bzero(sb, sizeof *sb);
1443
1444 /*
1445 * Copy from vattr table
1446 */
1447 if (vap->va_fsid != VNOVAL)
1448 sb->st_dev = vap->va_fsid;
1449 else
1450 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1451 sb->st_ino = vap->va_fileid;
1452 mode = vap->va_mode;
1453 switch (vap->va_type) {
1454 case VREG:
1455 mode |= S_IFREG;
1456 break;
1457 case VDIR:
1458 mode |= S_IFDIR;
1459 break;
1460 case VBLK:
1461 mode |= S_IFBLK;
1462 break;
1463 case VCHR:
1464 mode |= S_IFCHR;
1465 break;
1466 case VLNK:
1467 mode |= S_IFLNK;
1468 break;
1469 case VSOCK:
1470 mode |= S_IFSOCK;
1471 break;
1472 case VFIFO:
1473 mode |= S_IFIFO;
1474 break;
1475 default:
1476 return (EBADF);
1477 }
1478 sb->st_mode = mode;
1479 sb->st_nlink = vap->va_nlink;
1480 sb->st_uid = vap->va_uid;
1481 sb->st_gid = vap->va_gid;
1482 sb->st_rdev = vap->va_rdev;
1483 if (vap->va_size > OFF_MAX)
1484 return (EOVERFLOW);
1485 sb->st_size = vap->va_size;
1486 sb->st_atim = vap->va_atime;
1487 sb->st_mtim = vap->va_mtime;
1488 sb->st_ctim = vap->va_ctime;
1489 sb->st_birthtim = vap->va_birthtime;
1490
1491 /*
1492 * According to www.opengroup.org, the meaning of st_blksize is
1493 * "a filesystem-specific preferred I/O block size for this
1494 * object. In some filesystem types, this may vary from file
1495 * to file"
1496 * Use minimum/default of PAGE_SIZE (e.g. for VCHR).
1497 */
1498
1499 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1500
1501 sb->st_flags = vap->va_flags;
1502 if (priv_check(td, PRIV_VFS_GENERATION))
1503 sb->st_gen = 0;
1504 else
1505 sb->st_gen = vap->va_gen;
1506
1507 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1508 return (0);
1509 }
1510
1511 /*
1512 * File table vnode ioctl routine.
1513 */
1514 static int
1515 vn_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
1516 struct thread *td)
1517 {
1518 struct vattr vattr;
1519 struct vnode *vp;
1520 struct fiobmap2_arg *bmarg;
1521 int error;
1522
1523 vp = fp->f_vnode;
1524 switch (vp->v_type) {
1525 case VDIR:
1526 case VREG:
1527 switch (com) {
1528 case FIONREAD:
1529 vn_lock(vp, LK_SHARED | LK_RETRY);
1530 error = VOP_GETATTR(vp, &vattr, active_cred);
1531 VOP_UNLOCK(vp, 0);
1532 if (error == 0)
1533 *(int *)data = vattr.va_size - fp->f_offset;
1534 return (error);
1535 case FIOBMAP2:
1536 bmarg = (struct fiobmap2_arg *)data;
1537 vn_lock(vp, LK_SHARED | LK_RETRY);
1538 #ifdef MAC
1539 error = mac_vnode_check_read(active_cred, fp->f_cred,
1540 vp);
1541 if (error == 0)
1542 #endif
1543 error = VOP_BMAP(vp, bmarg->bn, NULL,
1544 &bmarg->bn, &bmarg->runp, &bmarg->runb);
1545 VOP_UNLOCK(vp, 0);
1546 return (error);
1547 case FIONBIO:
1548 case FIOASYNC:
1549 return (0);
1550 default:
1551 return (VOP_IOCTL(vp, com, data, fp->f_flag,
1552 active_cred, td));
1553 }
1554 break;
1555 case VCHR:
1556 return (VOP_IOCTL(vp, com, data, fp->f_flag,
1557 active_cred, td));
1558 default:
1559 return (ENOTTY);
1560 }
1561 }
1562
1563 /*
1564 * File table vnode poll routine.
1565 */
1566 static int
1567 vn_poll(struct file *fp, int events, struct ucred *active_cred,
1568 struct thread *td)
1569 {
1570 struct vnode *vp;
1571 int error;
1572
1573 vp = fp->f_vnode;
1574 #ifdef MAC
1575 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1576 AUDIT_ARG_VNODE1(vp);
1577 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
1578 VOP_UNLOCK(vp, 0);
1579 if (!error)
1580 #endif
1581
1582 error = VOP_POLL(vp, events, fp->f_cred, td);
1583 return (error);
1584 }
1585
1586 /*
1587 * Acquire the requested lock and then check for validity. LK_RETRY
1588 * permits vn_lock to return doomed vnodes.
1589 */
1590 int
1591 _vn_lock(struct vnode *vp, int flags, char *file, int line)
1592 {
1593 int error;
1594
1595 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1596 ("vn_lock: no locktype"));
1597 VNASSERT(vp->v_holdcnt != 0, vp, ("vn_lock: zero hold count"));
1598 retry:
1599 error = VOP_LOCK1(vp, flags, file, line);
1600 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */
1601 KASSERT((flags & LK_RETRY) == 0 || error == 0,
1602 ("vn_lock: error %d incompatible with flags %#x", error, flags));
1603
1604 if ((flags & LK_RETRY) == 0) {
1605 if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) {
1606 VOP_UNLOCK(vp, 0);
1607 error = ENOENT;
1608 }
1609 } else if (error != 0)
1610 goto retry;
1611 return (error);
1612 }
1613
1614 /*
1615 * File table vnode close routine.
1616 */
1617 static int
1618 vn_closefile(struct file *fp, struct thread *td)
1619 {
1620 struct vnode *vp;
1621 struct flock lf;
1622 int error;
1623 bool ref;
1624
1625 vp = fp->f_vnode;
1626 fp->f_ops = &badfileops;
1627 ref = (fp->f_flag & FHASLOCK) != 0 && fp->f_type == DTYPE_VNODE;
1628
1629 error = vn_close1(vp, fp->f_flag, fp->f_cred, td, ref);
1630
1631 if (__predict_false(ref)) {
1632 lf.l_whence = SEEK_SET;
1633 lf.l_start = 0;
1634 lf.l_len = 0;
1635 lf.l_type = F_UNLCK;
1636 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1637 vrele(vp);
1638 }
1639 return (error);
1640 }
1641
1642 static bool
1643 vn_suspendable(struct mount *mp)
1644 {
1645
1646 return (mp->mnt_op->vfs_susp_clean != NULL);
1647 }
1648
1649 /*
1650 * Preparing to start a filesystem write operation. If the operation is
1651 * permitted, then we bump the count of operations in progress and
1652 * proceed. If a suspend request is in progress, we wait until the
1653 * suspension is over, and then proceed.
1654 */
1655 static int
1656 vn_start_write_locked(struct mount *mp, int flags)
1657 {
1658 int error, mflags;
1659
1660 mtx_assert(MNT_MTX(mp), MA_OWNED);
1661 error = 0;
1662
1663 /*
1664 * Check on status of suspension.
1665 */
1666 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
1667 mp->mnt_susp_owner != curthread) {
1668 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ?
1669 (flags & PCATCH) : 0) | (PUSER - 1);
1670 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1671 if (flags & V_NOWAIT) {
1672 error = EWOULDBLOCK;
1673 goto unlock;
1674 }
1675 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags,
1676 "suspfs", 0);
1677 if (error)
1678 goto unlock;
1679 }
1680 }
1681 if (flags & V_XSLEEP)
1682 goto unlock;
1683 mp->mnt_writeopcount++;
1684 unlock:
1685 if (error != 0 || (flags & V_XSLEEP) != 0)
1686 MNT_REL(mp);
1687 MNT_IUNLOCK(mp);
1688 return (error);
1689 }
1690
1691 int
1692 vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
1693 {
1694 struct mount *mp;
1695 int error;
1696
1697 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL),
1698 ("V_MNTREF requires mp"));
1699
1700 error = 0;
1701 /*
1702 * If a vnode is provided, get and return the mount point that
1703 * to which it will write.
1704 */
1705 if (vp != NULL) {
1706 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1707 *mpp = NULL;
1708 if (error != EOPNOTSUPP)
1709 return (error);
1710 return (0);
1711 }
1712 }
1713 if ((mp = *mpp) == NULL)
1714 return (0);
1715
1716 if (!vn_suspendable(mp)) {
1717 if (vp != NULL || (flags & V_MNTREF) != 0)
1718 vfs_rel(mp);
1719 return (0);
1720 }
1721
1722 /*
1723 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1724 * a vfs_ref().
1725 * As long as a vnode is not provided we need to acquire a
1726 * refcount for the provided mountpoint too, in order to
1727 * emulate a vfs_ref().
1728 */
1729 MNT_ILOCK(mp);
1730 if (vp == NULL && (flags & V_MNTREF) == 0)
1731 MNT_REF(mp);
1732
1733 return (vn_start_write_locked(mp, flags));
1734 }
1735
1736 /*
1737 * Secondary suspension. Used by operations such as vop_inactive
1738 * routines that are needed by the higher level functions. These
1739 * are allowed to proceed until all the higher level functions have
1740 * completed (indicated by mnt_writeopcount dropping to zero). At that
1741 * time, these operations are halted until the suspension is over.
1742 */
1743 int
1744 vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags)
1745 {
1746 struct mount *mp;
1747 int error;
1748
1749 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL),
1750 ("V_MNTREF requires mp"));
1751
1752 retry:
1753 if (vp != NULL) {
1754 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1755 *mpp = NULL;
1756 if (error != EOPNOTSUPP)
1757 return (error);
1758 return (0);
1759 }
1760 }
1761 /*
1762 * If we are not suspended or have not yet reached suspended
1763 * mode, then let the operation proceed.
1764 */
1765 if ((mp = *mpp) == NULL)
1766 return (0);
1767
1768 if (!vn_suspendable(mp)) {
1769 if (vp != NULL || (flags & V_MNTREF) != 0)
1770 vfs_rel(mp);
1771 return (0);
1772 }
1773
1774 /*
1775 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1776 * a vfs_ref().
1777 * As long as a vnode is not provided we need to acquire a
1778 * refcount for the provided mountpoint too, in order to
1779 * emulate a vfs_ref().
1780 */
1781 MNT_ILOCK(mp);
1782 if (vp == NULL && (flags & V_MNTREF) == 0)
1783 MNT_REF(mp);
1784 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1785 mp->mnt_secondary_writes++;
1786 mp->mnt_secondary_accwrites++;
1787 MNT_IUNLOCK(mp);
1788 return (0);
1789 }
1790 if (flags & V_NOWAIT) {
1791 MNT_REL(mp);
1792 MNT_IUNLOCK(mp);
1793 return (EWOULDBLOCK);
1794 }
1795 /*
1796 * Wait for the suspension to finish.
1797 */
1798 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP |
1799 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0),
1800 "suspfs", 0);
1801 vfs_rel(mp);
1802 if (error == 0)
1803 goto retry;
1804 return (error);
1805 }
1806
1807 /*
1808 * Filesystem write operation has completed. If we are suspending and this
1809 * operation is the last one, notify the suspender that the suspension is
1810 * now in effect.
1811 */
1812 void
1813 vn_finished_write(struct mount *mp)
1814 {
1815 if (mp == NULL || !vn_suspendable(mp))
1816 return;
1817 MNT_ILOCK(mp);
1818 MNT_REL(mp);
1819 mp->mnt_writeopcount--;
1820 if (mp->mnt_writeopcount < 0)
1821 panic("vn_finished_write: neg cnt");
1822 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1823 mp->mnt_writeopcount <= 0)
1824 wakeup(&mp->mnt_writeopcount);
1825 MNT_IUNLOCK(mp);
1826 }
1827
1828
1829 /*
1830 * Filesystem secondary write operation has completed. If we are
1831 * suspending and this operation is the last one, notify the suspender
1832 * that the suspension is now in effect.
1833 */
1834 void
1835 vn_finished_secondary_write(struct mount *mp)
1836 {
1837 if (mp == NULL || !vn_suspendable(mp))
1838 return;
1839 MNT_ILOCK(mp);
1840 MNT_REL(mp);
1841 mp->mnt_secondary_writes--;
1842 if (mp->mnt_secondary_writes < 0)
1843 panic("vn_finished_secondary_write: neg cnt");
1844 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1845 mp->mnt_secondary_writes <= 0)
1846 wakeup(&mp->mnt_secondary_writes);
1847 MNT_IUNLOCK(mp);
1848 }
1849
1850
1851
1852 /*
1853 * Request a filesystem to suspend write operations.
1854 */
1855 int
1856 vfs_write_suspend(struct mount *mp, int flags)
1857 {
1858 int error;
1859
1860 MPASS(vn_suspendable(mp));
1861
1862 MNT_ILOCK(mp);
1863 if (mp->mnt_susp_owner == curthread) {
1864 MNT_IUNLOCK(mp);
1865 return (EALREADY);
1866 }
1867 while (mp->mnt_kern_flag & MNTK_SUSPEND)
1868 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1869
1870 /*
1871 * Unmount holds a write reference on the mount point. If we
1872 * own busy reference and drain for writers, we deadlock with
1873 * the reference draining in the unmount path. Callers of
1874 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if
1875 * vfs_busy() reference is owned and caller is not in the
1876 * unmount context.
1877 */
1878 if ((flags & VS_SKIP_UNMOUNT) != 0 &&
1879 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1880 MNT_IUNLOCK(mp);
1881 return (EBUSY);
1882 }
1883
1884 mp->mnt_kern_flag |= MNTK_SUSPEND;
1885 mp->mnt_susp_owner = curthread;
1886 if (mp->mnt_writeopcount > 0)
1887 (void) msleep(&mp->mnt_writeopcount,
1888 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1889 else
1890 MNT_IUNLOCK(mp);
1891 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1892 vfs_write_resume(mp, 0);
1893 return (error);
1894 }
1895
1896 /*
1897 * Request a filesystem to resume write operations.
1898 */
1899 void
1900 vfs_write_resume(struct mount *mp, int flags)
1901 {
1902
1903 MPASS(vn_suspendable(mp));
1904
1905 MNT_ILOCK(mp);
1906 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1907 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1908 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1909 MNTK_SUSPENDED);
1910 mp->mnt_susp_owner = NULL;
1911 wakeup(&mp->mnt_writeopcount);
1912 wakeup(&mp->mnt_flag);
1913 curthread->td_pflags &= ~TDP_IGNSUSP;
1914 if ((flags & VR_START_WRITE) != 0) {
1915 MNT_REF(mp);
1916 mp->mnt_writeopcount++;
1917 }
1918 MNT_IUNLOCK(mp);
1919 if ((flags & VR_NO_SUSPCLR) == 0)
1920 VFS_SUSP_CLEAN(mp);
1921 } else if ((flags & VR_START_WRITE) != 0) {
1922 MNT_REF(mp);
1923 vn_start_write_locked(mp, 0);
1924 } else {
1925 MNT_IUNLOCK(mp);
1926 }
1927 }
1928
1929 /*
1930 * Helper loop around vfs_write_suspend() for filesystem unmount VFS
1931 * methods.
1932 */
1933 int
1934 vfs_write_suspend_umnt(struct mount *mp)
1935 {
1936 int error;
1937
1938 MPASS(vn_suspendable(mp));
1939 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0,
1940 ("vfs_write_suspend_umnt: recursed"));
1941
1942 /* dounmount() already called vn_start_write(). */
1943 for (;;) {
1944 vn_finished_write(mp);
1945 error = vfs_write_suspend(mp, 0);
1946 if (error != 0) {
1947 vn_start_write(NULL, &mp, V_WAIT);
1948 return (error);
1949 }
1950 MNT_ILOCK(mp);
1951 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0)
1952 break;
1953 MNT_IUNLOCK(mp);
1954 vn_start_write(NULL, &mp, V_WAIT);
1955 }
1956 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
1957 wakeup(&mp->mnt_flag);
1958 MNT_IUNLOCK(mp);
1959 curthread->td_pflags |= TDP_IGNSUSP;
1960 return (0);
1961 }
1962
1963 /*
1964 * Implement kqueues for files by translating it to vnode operation.
1965 */
1966 static int
1967 vn_kqfilter(struct file *fp, struct knote *kn)
1968 {
1969
1970 return (VOP_KQFILTER(fp->f_vnode, kn));
1971 }
1972
1973 /*
1974 * Simplified in-kernel wrapper calls for extended attribute access.
1975 * Both calls pass in a NULL credential, authorizing as "kernel" access.
1976 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1977 */
1978 int
1979 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1980 const char *attrname, int *buflen, char *buf, struct thread *td)
1981 {
1982 struct uio auio;
1983 struct iovec iov;
1984 int error;
1985
1986 iov.iov_len = *buflen;
1987 iov.iov_base = buf;
1988
1989 auio.uio_iov = &iov;
1990 auio.uio_iovcnt = 1;
1991 auio.uio_rw = UIO_READ;
1992 auio.uio_segflg = UIO_SYSSPACE;
1993 auio.uio_td = td;
1994 auio.uio_offset = 0;
1995 auio.uio_resid = *buflen;
1996
1997 if ((ioflg & IO_NODELOCKED) == 0)
1998 vn_lock(vp, LK_SHARED | LK_RETRY);
1999
2000 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2001
2002 /* authorize attribute retrieval as kernel */
2003 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
2004 td);
2005
2006 if ((ioflg & IO_NODELOCKED) == 0)
2007 VOP_UNLOCK(vp, 0);
2008
2009 if (error == 0) {
2010 *buflen = *buflen - auio.uio_resid;
2011 }
2012
2013 return (error);
2014 }
2015
2016 /*
2017 * XXX failure mode if partially written?
2018 */
2019 int
2020 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
2021 const char *attrname, int buflen, char *buf, struct thread *td)
2022 {
2023 struct uio auio;
2024 struct iovec iov;
2025 struct mount *mp;
2026 int error;
2027
2028 iov.iov_len = buflen;
2029 iov.iov_base = buf;
2030
2031 auio.uio_iov = &iov;
2032 auio.uio_iovcnt = 1;
2033 auio.uio_rw = UIO_WRITE;
2034 auio.uio_segflg = UIO_SYSSPACE;
2035 auio.uio_td = td;
2036 auio.uio_offset = 0;
2037 auio.uio_resid = buflen;
2038
2039 if ((ioflg & IO_NODELOCKED) == 0) {
2040 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
2041 return (error);
2042 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2043 }
2044
2045 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2046
2047 /* authorize attribute setting as kernel */
2048 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
2049
2050 if ((ioflg & IO_NODELOCKED) == 0) {
2051 vn_finished_write(mp);
2052 VOP_UNLOCK(vp, 0);
2053 }
2054
2055 return (error);
2056 }
2057
2058 int
2059 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
2060 const char *attrname, struct thread *td)
2061 {
2062 struct mount *mp;
2063 int error;
2064
2065 if ((ioflg & IO_NODELOCKED) == 0) {
2066 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
2067 return (error);
2068 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2069 }
2070
2071 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2072
2073 /* authorize attribute removal as kernel */
2074 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
2075 if (error == EOPNOTSUPP)
2076 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
2077 NULL, td);
2078
2079 if ((ioflg & IO_NODELOCKED) == 0) {
2080 vn_finished_write(mp);
2081 VOP_UNLOCK(vp, 0);
2082 }
2083
2084 return (error);
2085 }
2086
2087 static int
2088 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags,
2089 struct vnode **rvp)
2090 {
2091
2092 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp));
2093 }
2094
2095 int
2096 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
2097 {
2098
2099 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino,
2100 lkflags, rvp));
2101 }
2102
2103 int
2104 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg,
2105 int lkflags, struct vnode **rvp)
2106 {
2107 struct mount *mp;
2108 int ltype, error;
2109
2110 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get");
2111 mp = vp->v_mount;
2112 ltype = VOP_ISLOCKED(vp);
2113 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
2114 ("vn_vget_ino: vp not locked"));
2115 error = vfs_busy(mp, MBF_NOWAIT);
2116 if (error != 0) {
2117 vfs_ref(mp);
2118 VOP_UNLOCK(vp, 0);
2119 error = vfs_busy(mp, 0);
2120 vn_lock(vp, ltype | LK_RETRY);
2121 vfs_rel(mp);
2122 if (error != 0)
2123 return (ENOENT);
2124 if (vp->v_iflag & VI_DOOMED) {
2125 vfs_unbusy(mp);
2126 return (ENOENT);
2127 }
2128 }
2129 VOP_UNLOCK(vp, 0);
2130 error = alloc(mp, alloc_arg, lkflags, rvp);
2131 vfs_unbusy(mp);
2132 if (error != 0 || *rvp != vp)
2133 vn_lock(vp, ltype | LK_RETRY);
2134 if (vp->v_iflag & VI_DOOMED) {
2135 if (error == 0) {
2136 if (*rvp == vp)
2137 vunref(vp);
2138 else
2139 vput(*rvp);
2140 }
2141 error = ENOENT;
2142 }
2143 return (error);
2144 }
2145
2146 int
2147 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
2148 struct thread *td)
2149 {
2150
2151 if (vp->v_type != VREG || td == NULL)
2152 return (0);
2153 if ((uoff_t)uio->uio_offset + uio->uio_resid >
2154 lim_cur(td, RLIMIT_FSIZE)) {
2155 PROC_LOCK(td->td_proc);
2156 kern_psignal(td->td_proc, SIGXFSZ);
2157 PROC_UNLOCK(td->td_proc);
2158 return (EFBIG);
2159 }
2160 return (0);
2161 }
2162
2163 int
2164 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2165 struct thread *td)
2166 {
2167 struct vnode *vp;
2168
2169 vp = fp->f_vnode;
2170 #ifdef AUDIT
2171 vn_lock(vp, LK_SHARED | LK_RETRY);
2172 AUDIT_ARG_VNODE1(vp);
2173 VOP_UNLOCK(vp, 0);
2174 #endif
2175 return (setfmode(td, active_cred, vp, mode));
2176 }
2177
2178 int
2179 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2180 struct thread *td)
2181 {
2182 struct vnode *vp;
2183
2184 vp = fp->f_vnode;
2185 #ifdef AUDIT
2186 vn_lock(vp, LK_SHARED | LK_RETRY);
2187 AUDIT_ARG_VNODE1(vp);
2188 VOP_UNLOCK(vp, 0);
2189 #endif
2190 return (setfown(td, active_cred, vp, uid, gid));
2191 }
2192
2193 void
2194 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
2195 {
2196 vm_object_t object;
2197
2198 if ((object = vp->v_object) == NULL)
2199 return;
2200 VM_OBJECT_WLOCK(object);
2201 vm_object_page_remove(object, start, end, 0);
2202 VM_OBJECT_WUNLOCK(object);
2203 }
2204
2205 int
2206 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
2207 {
2208 struct vattr va;
2209 daddr_t bn, bnp;
2210 uint64_t bsize;
2211 off_t noff;
2212 int error;
2213
2214 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
2215 ("Wrong command %lu", cmd));
2216
2217 if (vn_lock(vp, LK_SHARED) != 0)
2218 return (EBADF);
2219 if (vp->v_type != VREG) {
2220 error = ENOTTY;
2221 goto unlock;
2222 }
2223 error = VOP_GETATTR(vp, &va, cred);
2224 if (error != 0)
2225 goto unlock;
2226 noff = *off;
2227 if (noff >= va.va_size) {
2228 error = ENXIO;
2229 goto unlock;
2230 }
2231 bsize = vp->v_mount->mnt_stat.f_iosize;
2232 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize -
2233 noff % bsize) {
2234 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
2235 if (error == EOPNOTSUPP) {
2236 error = ENOTTY;
2237 goto unlock;
2238 }
2239 if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
2240 (bnp != -1 && cmd == FIOSEEKDATA)) {
2241 noff = bn * bsize;
2242 if (noff < *off)
2243 noff = *off;
2244 goto unlock;
2245 }
2246 }
2247 if (noff > va.va_size)
2248 noff = va.va_size;
2249 /* noff == va.va_size. There is an implicit hole at the end of file. */
2250 if (cmd == FIOSEEKDATA)
2251 error = ENXIO;
2252 unlock:
2253 VOP_UNLOCK(vp, 0);
2254 if (error == 0)
2255 *off = noff;
2256 return (error);
2257 }
2258
2259 int
2260 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td)
2261 {
2262 struct ucred *cred;
2263 struct vnode *vp;
2264 struct vattr vattr;
2265 off_t foffset, size;
2266 int error, noneg;
2267
2268 cred = td->td_ucred;
2269 vp = fp->f_vnode;
2270 foffset = foffset_lock(fp, 0);
2271 noneg = (vp->v_type != VCHR);
2272 error = 0;
2273 switch (whence) {
2274 case L_INCR:
2275 if (noneg &&
2276 (foffset < 0 ||
2277 (offset > 0 && foffset > OFF_MAX - offset))) {
2278 error = EOVERFLOW;
2279 break;
2280 }
2281 offset += foffset;
2282 break;
2283 case L_XTND:
2284 vn_lock(vp, LK_SHARED | LK_RETRY);
2285 error = VOP_GETATTR(vp, &vattr, cred);
2286 VOP_UNLOCK(vp, 0);
2287 if (error)
2288 break;
2289
2290 /*
2291 * If the file references a disk device, then fetch
2292 * the media size and use that to determine the ending
2293 * offset.
2294 */
2295 if (vattr.va_size == 0 && vp->v_type == VCHR &&
2296 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0)
2297 vattr.va_size = size;
2298 if (noneg &&
2299 (vattr.va_size > OFF_MAX ||
2300 (offset > 0 && vattr.va_size > OFF_MAX - offset))) {
2301 error = EOVERFLOW;
2302 break;
2303 }
2304 offset += vattr.va_size;
2305 break;
2306 case L_SET:
2307 break;
2308 case SEEK_DATA:
2309 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td);
2310 break;
2311 case SEEK_HOLE:
2312 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td);
2313 break;
2314 default:
2315 error = EINVAL;
2316 }
2317 if (error == 0 && noneg && offset < 0)
2318 error = EINVAL;
2319 if (error != 0)
2320 goto drop;
2321 VFS_KNOTE_UNLOCKED(vp, 0);
2322 td->td_uretoff.tdu_off = offset;
2323 drop:
2324 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
2325 return (error);
2326 }
2327
2328 int
2329 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred,
2330 struct thread *td)
2331 {
2332 int error;
2333
2334 /*
2335 * Grant permission if the caller is the owner of the file, or
2336 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on
2337 * on the file. If the time pointer is null, then write
2338 * permission on the file is also sufficient.
2339 *
2340 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes:
2341 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES
2342 * will be allowed to set the times [..] to the current
2343 * server time.
2344 */
2345 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td);
2346 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0)
2347 error = VOP_ACCESS(vp, VWRITE, cred, td);
2348 return (error);
2349 }
2350
2351 int
2352 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2353 {
2354 struct vnode *vp;
2355 int error;
2356
2357 if (fp->f_type == DTYPE_FIFO)
2358 kif->kf_type = KF_TYPE_FIFO;
2359 else
2360 kif->kf_type = KF_TYPE_VNODE;
2361 vp = fp->f_vnode;
2362 vref(vp);
2363 FILEDESC_SUNLOCK(fdp);
2364 error = vn_fill_kinfo_vnode(vp, kif);
2365 vrele(vp);
2366 FILEDESC_SLOCK(fdp);
2367 return (error);
2368 }
2369
2370 static inline void
2371 vn_fill_junk(struct kinfo_file *kif)
2372 {
2373 size_t len, olen;
2374
2375 /*
2376 * Simulate vn_fullpath returning changing values for a given
2377 * vp during e.g. coredump.
2378 */
2379 len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1;
2380 olen = strlen(kif->kf_path);
2381 if (len < olen)
2382 strcpy(&kif->kf_path[len - 1], "$");
2383 else
2384 for (; olen < len; olen++)
2385 strcpy(&kif->kf_path[olen], "A");
2386 }
2387
2388 int
2389 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif)
2390 {
2391 struct vattr va;
2392 char *fullpath, *freepath;
2393 int error;
2394
2395 kif->kf_un.kf_file.kf_file_type = vntype_to_kinfo(vp->v_type);
2396 freepath = NULL;
2397 fullpath = "-";
2398 error = vn_fullpath(curthread, vp, &fullpath, &freepath);
2399 if (error == 0) {
2400 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
2401 }
2402 if (freepath != NULL)
2403 free(freepath, M_TEMP);
2404
2405 KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path,
2406 vn_fill_junk(kif);
2407 );
2408
2409 /*
2410 * Retrieve vnode attributes.
2411 */
2412 va.va_fsid = VNOVAL;
2413 va.va_rdev = NODEV;
2414 vn_lock(vp, LK_SHARED | LK_RETRY);
2415 error = VOP_GETATTR(vp, &va, curthread->td_ucred);
2416 VOP_UNLOCK(vp, 0);
2417 if (error != 0)
2418 return (error);
2419 if (va.va_fsid != VNOVAL)
2420 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
2421 else
2422 kif->kf_un.kf_file.kf_file_fsid =
2423 vp->v_mount->mnt_stat.f_fsid.val[0];
2424 kif->kf_un.kf_file.kf_file_fsid_freebsd11 =
2425 kif->kf_un.kf_file.kf_file_fsid; /* truncate */
2426 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
2427 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
2428 kif->kf_un.kf_file.kf_file_size = va.va_size;
2429 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
2430 kif->kf_un.kf_file.kf_file_rdev_freebsd11 =
2431 kif->kf_un.kf_file.kf_file_rdev; /* truncate */
2432 return (0);
2433 }
2434
2435 int
2436 vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
2437 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
2438 struct thread *td)
2439 {
2440 #ifdef HWPMC_HOOKS
2441 struct pmckern_map_in pkm;
2442 #endif
2443 struct mount *mp;
2444 struct vnode *vp;
2445 vm_object_t object;
2446 vm_prot_t maxprot;
2447 boolean_t writecounted;
2448 int error;
2449
2450 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
2451 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
2452 /*
2453 * POSIX shared-memory objects are defined to have
2454 * kernel persistence, and are not defined to support
2455 * read(2)/write(2) -- or even open(2). Thus, we can
2456 * use MAP_ASYNC to trade on-disk coherence for speed.
2457 * The shm_open(3) library routine turns on the FPOSIXSHM
2458 * flag to request this behavior.
2459 */
2460 if ((fp->f_flag & FPOSIXSHM) != 0)
2461 flags |= MAP_NOSYNC;
2462 #endif
2463 vp = fp->f_vnode;
2464
2465 /*
2466 * Ensure that file and memory protections are
2467 * compatible. Note that we only worry about
2468 * writability if mapping is shared; in this case,
2469 * current and max prot are dictated by the open file.
2470 * XXX use the vnode instead? Problem is: what
2471 * credentials do we use for determination? What if
2472 * proc does a setuid?
2473 */
2474 mp = vp->v_mount;
2475 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
2476 maxprot = VM_PROT_NONE;
2477 if ((prot & VM_PROT_EXECUTE) != 0)
2478 return (EACCES);
2479 } else
2480 maxprot = VM_PROT_EXECUTE;
2481 if ((fp->f_flag & FREAD) != 0)
2482 maxprot |= VM_PROT_READ;
2483 else if ((prot & VM_PROT_READ) != 0)
2484 return (EACCES);
2485
2486 /*
2487 * If we are sharing potential changes via MAP_SHARED and we
2488 * are trying to get write permission although we opened it
2489 * without asking for it, bail out.
2490 */
2491 if ((flags & MAP_SHARED) != 0) {
2492 if ((fp->f_flag & FWRITE) != 0)
2493 maxprot |= VM_PROT_WRITE;
2494 else if ((prot & VM_PROT_WRITE) != 0)
2495 return (EACCES);
2496 } else {
2497 maxprot |= VM_PROT_WRITE;
2498 cap_maxprot |= VM_PROT_WRITE;
2499 }
2500 maxprot &= cap_maxprot;
2501
2502 /*
2503 * For regular files and shared memory, POSIX requires that
2504 * the value of foff be a legitimate offset within the data
2505 * object. In particular, negative offsets are invalid.
2506 * Blocking negative offsets and overflows here avoids
2507 * possible wraparound or user-level access into reserved
2508 * ranges of the data object later. In contrast, POSIX does
2509 * not dictate how offsets are used by device drivers, so in
2510 * the case of a device mapping a negative offset is passed
2511 * on.
2512 */
2513 if (
2514 #ifdef _LP64
2515 size > OFF_MAX ||
2516 #endif
2517 foff < 0 || foff > OFF_MAX - size)
2518 return (EINVAL);
2519
2520 writecounted = FALSE;
2521 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp,
2522 &foff, &object, &writecounted);
2523 if (error != 0)
2524 return (error);
2525 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
2526 foff, writecounted, td);
2527 if (error != 0) {
2528 /*
2529 * If this mapping was accounted for in the vnode's
2530 * writecount, then undo that now.
2531 */
2532 if (writecounted)
2533 vm_pager_release_writecount(object, 0, size);
2534 vm_object_deallocate(object);
2535 }
2536 #ifdef HWPMC_HOOKS
2537 /* Inform hwpmc(4) if an executable is being mapped. */
2538 if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) {
2539 if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) {
2540 pkm.pm_file = vp;
2541 pkm.pm_address = (uintptr_t) *addr;
2542 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_MMAP, (void *) &pkm);
2543 }
2544 }
2545 #endif
2546 return (error);
2547 }
2548
2549 void
2550 vn_fsid(struct vnode *vp, struct vattr *va)
2551 {
2552 fsid_t *f;
2553
2554 f = &vp->v_mount->mnt_stat.f_fsid;
2555 va->va_fsid = (uint32_t)f->val[1];
2556 va->va_fsid <<= sizeof(f->val[1]) * NBBY;
2557 va->va_fsid += (uint32_t)f->val[0];
2558 }
2559
2560 int
2561 vn_fsync_buf(struct vnode *vp, int waitfor)
2562 {
2563 struct buf *bp, *nbp;
2564 struct bufobj *bo;
2565 struct mount *mp;
2566 int error, maxretry;
2567
2568 error = 0;
2569 maxretry = 10000; /* large, arbitrarily chosen */
2570 mp = NULL;
2571 if (vp->v_type == VCHR) {
2572 VI_LOCK(vp);
2573 mp = vp->v_rdev->si_mountpt;
2574 VI_UNLOCK(vp);
2575 }
2576 bo = &vp->v_bufobj;
2577 BO_LOCK(bo);
2578 loop1:
2579 /*
2580 * MARK/SCAN initialization to avoid infinite loops.
2581 */
2582 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
2583 bp->b_vflags &= ~BV_SCANNED;
2584 bp->b_error = 0;
2585 }
2586
2587 /*
2588 * Flush all dirty buffers associated with a vnode.
2589 */
2590 loop2:
2591 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2592 if ((bp->b_vflags & BV_SCANNED) != 0)
2593 continue;
2594 bp->b_vflags |= BV_SCANNED;
2595 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2596 if (waitfor != MNT_WAIT)
2597 continue;
2598 if (BUF_LOCK(bp,
2599 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
2600 BO_LOCKPTR(bo)) != 0) {
2601 BO_LOCK(bo);
2602 goto loop1;
2603 }
2604 BO_LOCK(bo);
2605 }
2606 BO_UNLOCK(bo);
2607 KASSERT(bp->b_bufobj == bo,
2608 ("bp %p wrong b_bufobj %p should be %p",
2609 bp, bp->b_bufobj, bo));
2610 if ((bp->b_flags & B_DELWRI) == 0)
2611 panic("fsync: not dirty");
2612 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
2613 vfs_bio_awrite(bp);
2614 } else {
2615 bremfree(bp);
2616 bawrite(bp);
2617 }
2618 if (maxretry < 1000)
2619 pause("dirty", hz < 1000 ? 1 : hz / 1000);
2620 BO_LOCK(bo);
2621 goto loop2;
2622 }
2623
2624 /*
2625 * If synchronous the caller expects us to completely resolve all
2626 * dirty buffers in the system. Wait for in-progress I/O to
2627 * complete (which could include background bitmap writes), then
2628 * retry if dirty blocks still exist.
2629 */
2630 if (waitfor == MNT_WAIT) {
2631 bufobj_wwait(bo, 0, 0);
2632 if (bo->bo_dirty.bv_cnt > 0) {
2633 /*
2634 * If we are unable to write any of these buffers
2635 * then we fail now rather than trying endlessly
2636 * to write them out.
2637 */
2638 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
2639 if ((error = bp->b_error) != 0)
2640 break;
2641 if ((mp != NULL && mp->mnt_secondary_writes > 0) ||
2642 (error == 0 && --maxretry >= 0))
2643 goto loop1;
2644 if (error == 0)
2645 error = EAGAIN;
2646 }
2647 }
2648 BO_UNLOCK(bo);
2649 if (error != 0)
2650 vn_printf(vp, "fsync: giving up on dirty (error = %d) ", error);
2651
2652 return (error);
2653 }
Cache object: 9b2cd3a53db1117202b9083e31d60f58
|