FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_vnops.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
11 * Copyright (c) 2013 The FreeBSD Foundation
12 *
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/10.0/sys/kern/vfs_vnops.c 255510 2013-09-13 06:52:23Z kib $");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/disk.h>
49 #include <sys/fcntl.h>
50 #include <sys/file.h>
51 #include <sys/kdb.h>
52 #include <sys/stat.h>
53 #include <sys/priv.h>
54 #include <sys/proc.h>
55 #include <sys/limits.h>
56 #include <sys/lock.h>
57 #include <sys/mount.h>
58 #include <sys/mutex.h>
59 #include <sys/namei.h>
60 #include <sys/vnode.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/filio.h>
64 #include <sys/resourcevar.h>
65 #include <sys/rwlock.h>
66 #include <sys/sx.h>
67 #include <sys/sysctl.h>
68 #include <sys/ttycom.h>
69 #include <sys/conf.h>
70 #include <sys/syslog.h>
71 #include <sys/unistd.h>
72
73 #include <security/audit/audit.h>
74 #include <security/mac/mac_framework.h>
75
76 #include <vm/vm.h>
77 #include <vm/vm_extern.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_page.h>
82
83 static fo_rdwr_t vn_read;
84 static fo_rdwr_t vn_write;
85 static fo_rdwr_t vn_io_fault;
86 static fo_truncate_t vn_truncate;
87 static fo_ioctl_t vn_ioctl;
88 static fo_poll_t vn_poll;
89 static fo_kqfilter_t vn_kqfilter;
90 static fo_stat_t vn_statfile;
91 static fo_close_t vn_closefile;
92
93 struct fileops vnops = {
94 .fo_read = vn_io_fault,
95 .fo_write = vn_io_fault,
96 .fo_truncate = vn_truncate,
97 .fo_ioctl = vn_ioctl,
98 .fo_poll = vn_poll,
99 .fo_kqfilter = vn_kqfilter,
100 .fo_stat = vn_statfile,
101 .fo_close = vn_closefile,
102 .fo_chmod = vn_chmod,
103 .fo_chown = vn_chown,
104 .fo_sendfile = vn_sendfile,
105 .fo_seek = vn_seek,
106 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
107 };
108
109 int
110 vn_open(ndp, flagp, cmode, fp)
111 struct nameidata *ndp;
112 int *flagp, cmode;
113 struct file *fp;
114 {
115 struct thread *td = ndp->ni_cnd.cn_thread;
116
117 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
118 }
119
120 /*
121 * Common code for vnode open operations via a name lookup.
122 * Lookup the vnode and invoke VOP_CREATE if needed.
123 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
124 *
125 * Note that this does NOT free nameidata for the successful case,
126 * due to the NDINIT being done elsewhere.
127 */
128 int
129 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
130 struct ucred *cred, struct file *fp)
131 {
132 struct vnode *vp;
133 struct mount *mp;
134 struct thread *td = ndp->ni_cnd.cn_thread;
135 struct vattr vat;
136 struct vattr *vap = &vat;
137 int fmode, error;
138
139 restart:
140 fmode = *flagp;
141 if (fmode & O_CREAT) {
142 ndp->ni_cnd.cn_nameiop = CREATE;
143 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF;
144 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
145 ndp->ni_cnd.cn_flags |= FOLLOW;
146 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
147 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
148 if (vn_open_flags & VN_OPEN_NOCAPCHECK)
149 ndp->ni_cnd.cn_flags |= NOCAPCHECK;
150 bwillwrite();
151 if ((error = namei(ndp)) != 0)
152 return (error);
153 if (ndp->ni_vp == NULL) {
154 VATTR_NULL(vap);
155 vap->va_type = VREG;
156 vap->va_mode = cmode;
157 if (fmode & O_EXCL)
158 vap->va_vaflags |= VA_EXCLUSIVE;
159 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
160 NDFREE(ndp, NDF_ONLY_PNBUF);
161 vput(ndp->ni_dvp);
162 if ((error = vn_start_write(NULL, &mp,
163 V_XSLEEP | PCATCH)) != 0)
164 return (error);
165 goto restart;
166 }
167 #ifdef MAC
168 error = mac_vnode_check_create(cred, ndp->ni_dvp,
169 &ndp->ni_cnd, vap);
170 if (error == 0)
171 #endif
172 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
173 &ndp->ni_cnd, vap);
174 vput(ndp->ni_dvp);
175 vn_finished_write(mp);
176 if (error) {
177 NDFREE(ndp, NDF_ONLY_PNBUF);
178 return (error);
179 }
180 fmode &= ~O_TRUNC;
181 vp = ndp->ni_vp;
182 } else {
183 if (ndp->ni_dvp == ndp->ni_vp)
184 vrele(ndp->ni_dvp);
185 else
186 vput(ndp->ni_dvp);
187 ndp->ni_dvp = NULL;
188 vp = ndp->ni_vp;
189 if (fmode & O_EXCL) {
190 error = EEXIST;
191 goto bad;
192 }
193 fmode &= ~O_CREAT;
194 }
195 } else {
196 ndp->ni_cnd.cn_nameiop = LOOKUP;
197 ndp->ni_cnd.cn_flags = ISOPEN |
198 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
199 if (!(fmode & FWRITE))
200 ndp->ni_cnd.cn_flags |= LOCKSHARED;
201 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
202 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
203 if (vn_open_flags & VN_OPEN_NOCAPCHECK)
204 ndp->ni_cnd.cn_flags |= NOCAPCHECK;
205 if ((error = namei(ndp)) != 0)
206 return (error);
207 vp = ndp->ni_vp;
208 }
209 error = vn_open_vnode(vp, fmode, cred, td, fp);
210 if (error)
211 goto bad;
212 *flagp = fmode;
213 return (0);
214 bad:
215 NDFREE(ndp, NDF_ONLY_PNBUF);
216 vput(vp);
217 *flagp = fmode;
218 ndp->ni_vp = NULL;
219 return (error);
220 }
221
222 /*
223 * Common code for vnode open operations once a vnode is located.
224 * Check permissions, and call the VOP_OPEN routine.
225 */
226 int
227 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
228 struct thread *td, struct file *fp)
229 {
230 struct mount *mp;
231 accmode_t accmode;
232 struct flock lf;
233 int error, have_flock, lock_flags, type;
234
235 if (vp->v_type == VLNK)
236 return (EMLINK);
237 if (vp->v_type == VSOCK)
238 return (EOPNOTSUPP);
239 if (vp->v_type != VDIR && fmode & O_DIRECTORY)
240 return (ENOTDIR);
241 accmode = 0;
242 if (fmode & (FWRITE | O_TRUNC)) {
243 if (vp->v_type == VDIR)
244 return (EISDIR);
245 accmode |= VWRITE;
246 }
247 if (fmode & FREAD)
248 accmode |= VREAD;
249 if (fmode & FEXEC)
250 accmode |= VEXEC;
251 if ((fmode & O_APPEND) && (fmode & FWRITE))
252 accmode |= VAPPEND;
253 #ifdef MAC
254 error = mac_vnode_check_open(cred, vp, accmode);
255 if (error)
256 return (error);
257 #endif
258 if ((fmode & O_CREAT) == 0) {
259 if (accmode & VWRITE) {
260 error = vn_writechk(vp);
261 if (error)
262 return (error);
263 }
264 if (accmode) {
265 error = VOP_ACCESS(vp, accmode, cred, td);
266 if (error)
267 return (error);
268 }
269 }
270 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
271 vn_lock(vp, LK_UPGRADE | LK_RETRY);
272 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
273 return (error);
274
275 if (fmode & (O_EXLOCK | O_SHLOCK)) {
276 KASSERT(fp != NULL, ("open with flock requires fp"));
277 lock_flags = VOP_ISLOCKED(vp);
278 VOP_UNLOCK(vp, 0);
279 lf.l_whence = SEEK_SET;
280 lf.l_start = 0;
281 lf.l_len = 0;
282 if (fmode & O_EXLOCK)
283 lf.l_type = F_WRLCK;
284 else
285 lf.l_type = F_RDLCK;
286 type = F_FLOCK;
287 if ((fmode & FNONBLOCK) == 0)
288 type |= F_WAIT;
289 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
290 have_flock = (error == 0);
291 vn_lock(vp, lock_flags | LK_RETRY);
292 if (error == 0 && vp->v_iflag & VI_DOOMED)
293 error = ENOENT;
294 /*
295 * Another thread might have used this vnode as an
296 * executable while the vnode lock was dropped.
297 * Ensure the vnode is still able to be opened for
298 * writing after the lock has been obtained.
299 */
300 if (error == 0 && accmode & VWRITE)
301 error = vn_writechk(vp);
302 if (error) {
303 VOP_UNLOCK(vp, 0);
304 if (have_flock) {
305 lf.l_whence = SEEK_SET;
306 lf.l_start = 0;
307 lf.l_len = 0;
308 lf.l_type = F_UNLCK;
309 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf,
310 F_FLOCK);
311 }
312 vn_start_write(vp, &mp, V_WAIT);
313 vn_lock(vp, lock_flags | LK_RETRY);
314 (void)VOP_CLOSE(vp, fmode, cred, td);
315 vn_finished_write(mp);
316 return (error);
317 }
318 fp->f_flag |= FHASLOCK;
319 }
320 if (fmode & FWRITE) {
321 VOP_ADD_WRITECOUNT(vp, 1);
322 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
323 __func__, vp, vp->v_writecount);
324 }
325 ASSERT_VOP_LOCKED(vp, "vn_open_vnode");
326 return (0);
327 }
328
329 /*
330 * Check for write permissions on the specified vnode.
331 * Prototype text segments cannot be written.
332 */
333 int
334 vn_writechk(vp)
335 register struct vnode *vp;
336 {
337
338 ASSERT_VOP_LOCKED(vp, "vn_writechk");
339 /*
340 * If there's shared text associated with
341 * the vnode, try to free it up once. If
342 * we fail, we can't allow writing.
343 */
344 if (VOP_IS_TEXT(vp))
345 return (ETXTBSY);
346
347 return (0);
348 }
349
350 /*
351 * Vnode close call
352 */
353 int
354 vn_close(vp, flags, file_cred, td)
355 register struct vnode *vp;
356 int flags;
357 struct ucred *file_cred;
358 struct thread *td;
359 {
360 struct mount *mp;
361 int error, lock_flags;
362
363 if (vp->v_type != VFIFO && !(flags & FWRITE) && vp->v_mount != NULL &&
364 vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
365 lock_flags = LK_SHARED;
366 else
367 lock_flags = LK_EXCLUSIVE;
368
369 vn_start_write(vp, &mp, V_WAIT);
370 vn_lock(vp, lock_flags | LK_RETRY);
371 if (flags & FWRITE) {
372 VNASSERT(vp->v_writecount > 0, vp,
373 ("vn_close: negative writecount"));
374 VOP_ADD_WRITECOUNT(vp, -1);
375 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
376 __func__, vp, vp->v_writecount);
377 }
378 error = VOP_CLOSE(vp, flags, file_cred, td);
379 vput(vp);
380 vn_finished_write(mp);
381 return (error);
382 }
383
384 /*
385 * Heuristic to detect sequential operation.
386 */
387 static int
388 sequential_heuristic(struct uio *uio, struct file *fp)
389 {
390
391 if (atomic_load_acq_int(&(fp->f_flag)) & FRDAHEAD)
392 return (fp->f_seqcount << IO_SEQSHIFT);
393
394 /*
395 * Offset 0 is handled specially. open() sets f_seqcount to 1 so
396 * that the first I/O is normally considered to be slightly
397 * sequential. Seeking to offset 0 doesn't change sequentiality
398 * unless previous seeks have reduced f_seqcount to 0, in which
399 * case offset 0 is not special.
400 */
401 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
402 uio->uio_offset == fp->f_nextoff) {
403 /*
404 * f_seqcount is in units of fixed-size blocks so that it
405 * depends mainly on the amount of sequential I/O and not
406 * much on the number of sequential I/O's. The fixed size
407 * of 16384 is hard-coded here since it is (not quite) just
408 * a magic size that works well here. This size is more
409 * closely related to the best I/O size for real disks than
410 * to any block size used by software.
411 */
412 fp->f_seqcount += howmany(uio->uio_resid, 16384);
413 if (fp->f_seqcount > IO_SEQMAX)
414 fp->f_seqcount = IO_SEQMAX;
415 return (fp->f_seqcount << IO_SEQSHIFT);
416 }
417
418 /* Not sequential. Quickly draw-down sequentiality. */
419 if (fp->f_seqcount > 1)
420 fp->f_seqcount = 1;
421 else
422 fp->f_seqcount = 0;
423 return (0);
424 }
425
426 /*
427 * Package up an I/O request on a vnode into a uio and do it.
428 */
429 int
430 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
431 enum uio_seg segflg, int ioflg, struct ucred *active_cred,
432 struct ucred *file_cred, ssize_t *aresid, struct thread *td)
433 {
434 struct uio auio;
435 struct iovec aiov;
436 struct mount *mp;
437 struct ucred *cred;
438 void *rl_cookie;
439 int error, lock_flags;
440
441 auio.uio_iov = &aiov;
442 auio.uio_iovcnt = 1;
443 aiov.iov_base = base;
444 aiov.iov_len = len;
445 auio.uio_resid = len;
446 auio.uio_offset = offset;
447 auio.uio_segflg = segflg;
448 auio.uio_rw = rw;
449 auio.uio_td = td;
450 error = 0;
451
452 if ((ioflg & IO_NODELOCKED) == 0) {
453 if (rw == UIO_READ) {
454 rl_cookie = vn_rangelock_rlock(vp, offset,
455 offset + len);
456 } else {
457 rl_cookie = vn_rangelock_wlock(vp, offset,
458 offset + len);
459 }
460 mp = NULL;
461 if (rw == UIO_WRITE) {
462 if (vp->v_type != VCHR &&
463 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
464 != 0)
465 goto out;
466 if (MNT_SHARED_WRITES(mp) ||
467 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount)))
468 lock_flags = LK_SHARED;
469 else
470 lock_flags = LK_EXCLUSIVE;
471 } else
472 lock_flags = LK_SHARED;
473 vn_lock(vp, lock_flags | LK_RETRY);
474 } else
475 rl_cookie = NULL;
476
477 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
478 #ifdef MAC
479 if ((ioflg & IO_NOMACCHECK) == 0) {
480 if (rw == UIO_READ)
481 error = mac_vnode_check_read(active_cred, file_cred,
482 vp);
483 else
484 error = mac_vnode_check_write(active_cred, file_cred,
485 vp);
486 }
487 #endif
488 if (error == 0) {
489 if (file_cred != NULL)
490 cred = file_cred;
491 else
492 cred = active_cred;
493 if (rw == UIO_READ)
494 error = VOP_READ(vp, &auio, ioflg, cred);
495 else
496 error = VOP_WRITE(vp, &auio, ioflg, cred);
497 }
498 if (aresid)
499 *aresid = auio.uio_resid;
500 else
501 if (auio.uio_resid && error == 0)
502 error = EIO;
503 if ((ioflg & IO_NODELOCKED) == 0) {
504 VOP_UNLOCK(vp, 0);
505 if (mp != NULL)
506 vn_finished_write(mp);
507 }
508 out:
509 if (rl_cookie != NULL)
510 vn_rangelock_unlock(vp, rl_cookie);
511 return (error);
512 }
513
514 /*
515 * Package up an I/O request on a vnode into a uio and do it. The I/O
516 * request is split up into smaller chunks and we try to avoid saturating
517 * the buffer cache while potentially holding a vnode locked, so we
518 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield()
519 * to give other processes a chance to lock the vnode (either other processes
520 * core'ing the same binary, or unrelated processes scanning the directory).
521 */
522 int
523 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
524 file_cred, aresid, td)
525 enum uio_rw rw;
526 struct vnode *vp;
527 void *base;
528 size_t len;
529 off_t offset;
530 enum uio_seg segflg;
531 int ioflg;
532 struct ucred *active_cred;
533 struct ucred *file_cred;
534 size_t *aresid;
535 struct thread *td;
536 {
537 int error = 0;
538 ssize_t iaresid;
539
540 do {
541 int chunk;
542
543 /*
544 * Force `offset' to a multiple of MAXBSIZE except possibly
545 * for the first chunk, so that filesystems only need to
546 * write full blocks except possibly for the first and last
547 * chunks.
548 */
549 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
550
551 if (chunk > len)
552 chunk = len;
553 if (rw != UIO_READ && vp->v_type == VREG)
554 bwillwrite();
555 iaresid = 0;
556 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
557 ioflg, active_cred, file_cred, &iaresid, td);
558 len -= chunk; /* aresid calc already includes length */
559 if (error)
560 break;
561 offset += chunk;
562 base = (char *)base + chunk;
563 kern_yield(PRI_USER);
564 } while (len);
565 if (aresid)
566 *aresid = len + iaresid;
567 return (error);
568 }
569
570 off_t
571 foffset_lock(struct file *fp, int flags)
572 {
573 struct mtx *mtxp;
574 off_t res;
575
576 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
577
578 #if OFF_MAX <= LONG_MAX
579 /*
580 * Caller only wants the current f_offset value. Assume that
581 * the long and shorter integer types reads are atomic.
582 */
583 if ((flags & FOF_NOLOCK) != 0)
584 return (fp->f_offset);
585 #endif
586
587 /*
588 * According to McKusick the vn lock was protecting f_offset here.
589 * It is now protected by the FOFFSET_LOCKED flag.
590 */
591 mtxp = mtx_pool_find(mtxpool_sleep, fp);
592 mtx_lock(mtxp);
593 if ((flags & FOF_NOLOCK) == 0) {
594 while (fp->f_vnread_flags & FOFFSET_LOCKED) {
595 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
596 msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
597 "vofflock", 0);
598 }
599 fp->f_vnread_flags |= FOFFSET_LOCKED;
600 }
601 res = fp->f_offset;
602 mtx_unlock(mtxp);
603 return (res);
604 }
605
606 void
607 foffset_unlock(struct file *fp, off_t val, int flags)
608 {
609 struct mtx *mtxp;
610
611 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
612
613 #if OFF_MAX <= LONG_MAX
614 if ((flags & FOF_NOLOCK) != 0) {
615 if ((flags & FOF_NOUPDATE) == 0)
616 fp->f_offset = val;
617 if ((flags & FOF_NEXTOFF) != 0)
618 fp->f_nextoff = val;
619 return;
620 }
621 #endif
622
623 mtxp = mtx_pool_find(mtxpool_sleep, fp);
624 mtx_lock(mtxp);
625 if ((flags & FOF_NOUPDATE) == 0)
626 fp->f_offset = val;
627 if ((flags & FOF_NEXTOFF) != 0)
628 fp->f_nextoff = val;
629 if ((flags & FOF_NOLOCK) == 0) {
630 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
631 ("Lost FOFFSET_LOCKED"));
632 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
633 wakeup(&fp->f_vnread_flags);
634 fp->f_vnread_flags = 0;
635 }
636 mtx_unlock(mtxp);
637 }
638
639 void
640 foffset_lock_uio(struct file *fp, struct uio *uio, int flags)
641 {
642
643 if ((flags & FOF_OFFSET) == 0)
644 uio->uio_offset = foffset_lock(fp, flags);
645 }
646
647 void
648 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags)
649 {
650
651 if ((flags & FOF_OFFSET) == 0)
652 foffset_unlock(fp, uio->uio_offset, flags);
653 }
654
655 static int
656 get_advice(struct file *fp, struct uio *uio)
657 {
658 struct mtx *mtxp;
659 int ret;
660
661 ret = POSIX_FADV_NORMAL;
662 if (fp->f_advice == NULL)
663 return (ret);
664
665 mtxp = mtx_pool_find(mtxpool_sleep, fp);
666 mtx_lock(mtxp);
667 if (uio->uio_offset >= fp->f_advice->fa_start &&
668 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
669 ret = fp->f_advice->fa_advice;
670 mtx_unlock(mtxp);
671 return (ret);
672 }
673
674 /*
675 * File table vnode read routine.
676 */
677 static int
678 vn_read(fp, uio, active_cred, flags, td)
679 struct file *fp;
680 struct uio *uio;
681 struct ucred *active_cred;
682 int flags;
683 struct thread *td;
684 {
685 struct vnode *vp;
686 struct mtx *mtxp;
687 int error, ioflag;
688 int advice;
689 off_t offset, start, end;
690
691 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
692 uio->uio_td, td));
693 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
694 vp = fp->f_vnode;
695 ioflag = 0;
696 if (fp->f_flag & FNONBLOCK)
697 ioflag |= IO_NDELAY;
698 if (fp->f_flag & O_DIRECT)
699 ioflag |= IO_DIRECT;
700 advice = get_advice(fp, uio);
701 vn_lock(vp, LK_SHARED | LK_RETRY);
702
703 switch (advice) {
704 case POSIX_FADV_NORMAL:
705 case POSIX_FADV_SEQUENTIAL:
706 case POSIX_FADV_NOREUSE:
707 ioflag |= sequential_heuristic(uio, fp);
708 break;
709 case POSIX_FADV_RANDOM:
710 /* Disable read-ahead for random I/O. */
711 break;
712 }
713 offset = uio->uio_offset;
714
715 #ifdef MAC
716 error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
717 if (error == 0)
718 #endif
719 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
720 fp->f_nextoff = uio->uio_offset;
721 VOP_UNLOCK(vp, 0);
722 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
723 offset != uio->uio_offset) {
724 /*
725 * Use POSIX_FADV_DONTNEED to flush clean pages and
726 * buffers for the backing file after a
727 * POSIX_FADV_NOREUSE read(2). To optimize the common
728 * case of using POSIX_FADV_NOREUSE with sequential
729 * access, track the previous implicit DONTNEED
730 * request and grow this request to include the
731 * current read(2) in addition to the previous
732 * DONTNEED. With purely sequential access this will
733 * cause the DONTNEED requests to continously grow to
734 * cover all of the previously read regions of the
735 * file. This allows filesystem blocks that are
736 * accessed by multiple calls to read(2) to be flushed
737 * once the last read(2) finishes.
738 */
739 start = offset;
740 end = uio->uio_offset - 1;
741 mtxp = mtx_pool_find(mtxpool_sleep, fp);
742 mtx_lock(mtxp);
743 if (fp->f_advice != NULL &&
744 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
745 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
746 start = fp->f_advice->fa_prevstart;
747 else if (fp->f_advice->fa_prevstart != 0 &&
748 fp->f_advice->fa_prevstart == end + 1)
749 end = fp->f_advice->fa_prevend;
750 fp->f_advice->fa_prevstart = start;
751 fp->f_advice->fa_prevend = end;
752 }
753 mtx_unlock(mtxp);
754 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
755 }
756 return (error);
757 }
758
759 /*
760 * File table vnode write routine.
761 */
762 static int
763 vn_write(fp, uio, active_cred, flags, td)
764 struct file *fp;
765 struct uio *uio;
766 struct ucred *active_cred;
767 int flags;
768 struct thread *td;
769 {
770 struct vnode *vp;
771 struct mount *mp;
772 struct mtx *mtxp;
773 int error, ioflag, lock_flags;
774 int advice;
775 off_t offset, start, end;
776
777 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
778 uio->uio_td, td));
779 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
780 vp = fp->f_vnode;
781 if (vp->v_type == VREG)
782 bwillwrite();
783 ioflag = IO_UNIT;
784 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
785 ioflag |= IO_APPEND;
786 if (fp->f_flag & FNONBLOCK)
787 ioflag |= IO_NDELAY;
788 if (fp->f_flag & O_DIRECT)
789 ioflag |= IO_DIRECT;
790 if ((fp->f_flag & O_FSYNC) ||
791 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
792 ioflag |= IO_SYNC;
793 mp = NULL;
794 if (vp->v_type != VCHR &&
795 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
796 goto unlock;
797
798 advice = get_advice(fp, uio);
799
800 if (MNT_SHARED_WRITES(mp) ||
801 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) {
802 lock_flags = LK_SHARED;
803 } else {
804 lock_flags = LK_EXCLUSIVE;
805 }
806
807 vn_lock(vp, lock_flags | LK_RETRY);
808 switch (advice) {
809 case POSIX_FADV_NORMAL:
810 case POSIX_FADV_SEQUENTIAL:
811 case POSIX_FADV_NOREUSE:
812 ioflag |= sequential_heuristic(uio, fp);
813 break;
814 case POSIX_FADV_RANDOM:
815 /* XXX: Is this correct? */
816 break;
817 }
818 offset = uio->uio_offset;
819
820 #ifdef MAC
821 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
822 if (error == 0)
823 #endif
824 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
825 fp->f_nextoff = uio->uio_offset;
826 VOP_UNLOCK(vp, 0);
827 if (vp->v_type != VCHR)
828 vn_finished_write(mp);
829 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
830 offset != uio->uio_offset) {
831 /*
832 * Use POSIX_FADV_DONTNEED to flush clean pages and
833 * buffers for the backing file after a
834 * POSIX_FADV_NOREUSE write(2). To optimize the
835 * common case of using POSIX_FADV_NOREUSE with
836 * sequential access, track the previous implicit
837 * DONTNEED request and grow this request to include
838 * the current write(2) in addition to the previous
839 * DONTNEED. With purely sequential access this will
840 * cause the DONTNEED requests to continously grow to
841 * cover all of the previously written regions of the
842 * file.
843 *
844 * Note that the blocks just written are almost
845 * certainly still dirty, so this only works when
846 * VOP_ADVISE() calls from subsequent writes push out
847 * the data written by this write(2) once the backing
848 * buffers are clean. However, as compared to forcing
849 * IO_DIRECT, this gives much saner behavior. Write
850 * clustering is still allowed, and clean pages are
851 * merely moved to the cache page queue rather than
852 * outright thrown away. This means a subsequent
853 * read(2) can still avoid hitting the disk if the
854 * pages have not been reclaimed.
855 *
856 * This does make POSIX_FADV_NOREUSE largely useless
857 * with non-sequential access. However, sequential
858 * access is the more common use case and the flag is
859 * merely advisory.
860 */
861 start = offset;
862 end = uio->uio_offset - 1;
863 mtxp = mtx_pool_find(mtxpool_sleep, fp);
864 mtx_lock(mtxp);
865 if (fp->f_advice != NULL &&
866 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
867 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
868 start = fp->f_advice->fa_prevstart;
869 else if (fp->f_advice->fa_prevstart != 0 &&
870 fp->f_advice->fa_prevstart == end + 1)
871 end = fp->f_advice->fa_prevend;
872 fp->f_advice->fa_prevstart = start;
873 fp->f_advice->fa_prevend = end;
874 }
875 mtx_unlock(mtxp);
876 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
877 }
878
879 unlock:
880 return (error);
881 }
882
883 static const int io_hold_cnt = 16;
884 static int vn_io_fault_enable = 1;
885 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW,
886 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance");
887 static u_long vn_io_faults_cnt;
888 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
889 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers");
890
891 /*
892 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to
893 * prevent the following deadlock:
894 *
895 * Assume that the thread A reads from the vnode vp1 into userspace
896 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is
897 * currently not resident, then system ends up with the call chain
898 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] ->
899 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2)
900 * which establishes lock order vp1->vn_lock, then vp2->vn_lock.
901 * If, at the same time, thread B reads from vnode vp2 into buffer buf2
902 * backed by the pages of vnode vp1, and some page in buf2 is not
903 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock.
904 *
905 * To prevent the lock order reversal and deadlock, vn_io_fault() does
906 * not allow page faults to happen during VOP_READ() or VOP_WRITE().
907 * Instead, it first tries to do the whole range i/o with pagefaults
908 * disabled. If all pages in the i/o buffer are resident and mapped,
909 * VOP will succeed (ignoring the genuine filesystem errors).
910 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do
911 * i/o in chunks, with all pages in the chunk prefaulted and held
912 * using vm_fault_quick_hold_pages().
913 *
914 * Filesystems using this deadlock avoidance scheme should use the
915 * array of the held pages from uio, saved in the curthread->td_ma,
916 * instead of doing uiomove(). A helper function
917 * vn_io_fault_uiomove() converts uiomove request into
918 * uiomove_fromphys() over td_ma array.
919 *
920 * Since vnode locks do not cover the whole i/o anymore, rangelocks
921 * make the current i/o request atomic with respect to other i/os and
922 * truncations.
923 */
924 static int
925 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
926 int flags, struct thread *td)
927 {
928 vm_page_t ma[io_hold_cnt + 2];
929 struct uio *uio_clone, short_uio;
930 struct iovec short_iovec[1];
931 fo_rdwr_t *doio;
932 struct vnode *vp;
933 void *rl_cookie;
934 struct mount *mp;
935 vm_page_t *prev_td_ma;
936 int cnt, error, save, saveheld, prev_td_ma_cnt;
937 vm_offset_t addr, end;
938 vm_prot_t prot;
939 size_t len, resid;
940 ssize_t adv;
941
942 if (uio->uio_rw == UIO_READ)
943 doio = vn_read;
944 else
945 doio = vn_write;
946 vp = fp->f_vnode;
947 foffset_lock_uio(fp, uio, flags);
948
949 if (uio->uio_segflg != UIO_USERSPACE || vp->v_type != VREG ||
950 ((mp = vp->v_mount) != NULL &&
951 (mp->mnt_kern_flag & MNTK_NO_IOPF) == 0) ||
952 !vn_io_fault_enable) {
953 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
954 goto out_last;
955 }
956
957 /*
958 * The UFS follows IO_UNIT directive and replays back both
959 * uio_offset and uio_resid if an error is encountered during the
960 * operation. But, since the iovec may be already advanced,
961 * uio is still in an inconsistent state.
962 *
963 * Cache a copy of the original uio, which is advanced to the redo
964 * point using UIO_NOCOPY below.
965 */
966 uio_clone = cloneuio(uio);
967 resid = uio->uio_resid;
968
969 short_uio.uio_segflg = UIO_USERSPACE;
970 short_uio.uio_rw = uio->uio_rw;
971 short_uio.uio_td = uio->uio_td;
972
973 if (uio->uio_rw == UIO_READ) {
974 prot = VM_PROT_WRITE;
975 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset,
976 uio->uio_offset + uio->uio_resid);
977 } else {
978 prot = VM_PROT_READ;
979 if ((fp->f_flag & O_APPEND) != 0 || (flags & FOF_OFFSET) == 0)
980 /* For appenders, punt and lock the whole range. */
981 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
982 else
983 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset,
984 uio->uio_offset + uio->uio_resid);
985 }
986
987 save = vm_fault_disable_pagefaults();
988 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
989 if (error != EFAULT)
990 goto out;
991
992 atomic_add_long(&vn_io_faults_cnt, 1);
993 uio_clone->uio_segflg = UIO_NOCOPY;
994 uiomove(NULL, resid - uio->uio_resid, uio_clone);
995 uio_clone->uio_segflg = uio->uio_segflg;
996
997 saveheld = curthread_pflags_set(TDP_UIOHELD);
998 prev_td_ma = td->td_ma;
999 prev_td_ma_cnt = td->td_ma_cnt;
1000
1001 while (uio_clone->uio_resid != 0) {
1002 len = uio_clone->uio_iov->iov_len;
1003 if (len == 0) {
1004 KASSERT(uio_clone->uio_iovcnt >= 1,
1005 ("iovcnt underflow"));
1006 uio_clone->uio_iov++;
1007 uio_clone->uio_iovcnt--;
1008 continue;
1009 }
1010
1011 addr = (vm_offset_t)uio_clone->uio_iov->iov_base;
1012 end = round_page(addr + len);
1013 cnt = howmany(end - trunc_page(addr), PAGE_SIZE);
1014 /*
1015 * A perfectly misaligned address and length could cause
1016 * both the start and the end of the chunk to use partial
1017 * page. +2 accounts for such a situation.
1018 */
1019 if (cnt > io_hold_cnt + 2) {
1020 len = io_hold_cnt * PAGE_SIZE;
1021 KASSERT(howmany(round_page(addr + len) -
1022 trunc_page(addr), PAGE_SIZE) <= io_hold_cnt + 2,
1023 ("cnt overflow"));
1024 }
1025 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
1026 addr, len, prot, ma, io_hold_cnt + 2);
1027 if (cnt == -1) {
1028 error = EFAULT;
1029 break;
1030 }
1031 short_uio.uio_iov = &short_iovec[0];
1032 short_iovec[0].iov_base = (void *)addr;
1033 short_uio.uio_iovcnt = 1;
1034 short_uio.uio_resid = short_iovec[0].iov_len = len;
1035 short_uio.uio_offset = uio_clone->uio_offset;
1036 td->td_ma = ma;
1037 td->td_ma_cnt = cnt;
1038
1039 error = doio(fp, &short_uio, active_cred, flags | FOF_OFFSET,
1040 td);
1041 vm_page_unhold_pages(ma, cnt);
1042 adv = len - short_uio.uio_resid;
1043
1044 uio_clone->uio_iov->iov_base =
1045 (char *)uio_clone->uio_iov->iov_base + adv;
1046 uio_clone->uio_iov->iov_len -= adv;
1047 uio_clone->uio_resid -= adv;
1048 uio_clone->uio_offset += adv;
1049
1050 uio->uio_resid -= adv;
1051 uio->uio_offset += adv;
1052
1053 if (error != 0 || adv == 0)
1054 break;
1055 }
1056 td->td_ma = prev_td_ma;
1057 td->td_ma_cnt = prev_td_ma_cnt;
1058 curthread_pflags_restore(saveheld);
1059 out:
1060 vm_fault_enable_pagefaults(save);
1061 vn_rangelock_unlock(vp, rl_cookie);
1062 free(uio_clone, M_IOV);
1063 out_last:
1064 foffset_unlock_uio(fp, uio, flags);
1065 return (error);
1066 }
1067
1068 /*
1069 * Helper function to perform the requested uiomove operation using
1070 * the held pages for io->uio_iov[0].iov_base buffer instead of
1071 * copyin/copyout. Access to the pages with uiomove_fromphys()
1072 * instead of iov_base prevents page faults that could occur due to
1073 * pmap_collect() invalidating the mapping created by
1074 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or
1075 * object cleanup revoking the write access from page mappings.
1076 *
1077 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove()
1078 * instead of plain uiomove().
1079 */
1080 int
1081 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
1082 {
1083 struct uio transp_uio;
1084 struct iovec transp_iov[1];
1085 struct thread *td;
1086 size_t adv;
1087 int error, pgadv;
1088
1089 td = curthread;
1090 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1091 uio->uio_segflg != UIO_USERSPACE)
1092 return (uiomove(data, xfersize, uio));
1093
1094 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1095 transp_iov[0].iov_base = data;
1096 transp_uio.uio_iov = &transp_iov[0];
1097 transp_uio.uio_iovcnt = 1;
1098 if (xfersize > uio->uio_resid)
1099 xfersize = uio->uio_resid;
1100 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize;
1101 transp_uio.uio_offset = 0;
1102 transp_uio.uio_segflg = UIO_SYSSPACE;
1103 /*
1104 * Since transp_iov points to data, and td_ma page array
1105 * corresponds to original uio->uio_iov, we need to invert the
1106 * direction of the i/o operation as passed to
1107 * uiomove_fromphys().
1108 */
1109 switch (uio->uio_rw) {
1110 case UIO_WRITE:
1111 transp_uio.uio_rw = UIO_READ;
1112 break;
1113 case UIO_READ:
1114 transp_uio.uio_rw = UIO_WRITE;
1115 break;
1116 }
1117 transp_uio.uio_td = uio->uio_td;
1118 error = uiomove_fromphys(td->td_ma,
1119 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK,
1120 xfersize, &transp_uio);
1121 adv = xfersize - transp_uio.uio_resid;
1122 pgadv =
1123 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) -
1124 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT);
1125 td->td_ma += pgadv;
1126 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1127 pgadv));
1128 td->td_ma_cnt -= pgadv;
1129 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv;
1130 uio->uio_iov->iov_len -= adv;
1131 uio->uio_resid -= adv;
1132 uio->uio_offset += adv;
1133 return (error);
1134 }
1135
1136 int
1137 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
1138 struct uio *uio)
1139 {
1140 struct thread *td;
1141 vm_offset_t iov_base;
1142 int cnt, pgadv;
1143
1144 td = curthread;
1145 if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1146 uio->uio_segflg != UIO_USERSPACE)
1147 return (uiomove_fromphys(ma, offset, xfersize, uio));
1148
1149 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1150 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize;
1151 iov_base = (vm_offset_t)uio->uio_iov->iov_base;
1152 switch (uio->uio_rw) {
1153 case UIO_WRITE:
1154 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma,
1155 offset, cnt);
1156 break;
1157 case UIO_READ:
1158 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK,
1159 cnt);
1160 break;
1161 }
1162 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT);
1163 td->td_ma += pgadv;
1164 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1165 pgadv));
1166 td->td_ma_cnt -= pgadv;
1167 uio->uio_iov->iov_base = (char *)(iov_base + cnt);
1168 uio->uio_iov->iov_len -= cnt;
1169 uio->uio_resid -= cnt;
1170 uio->uio_offset += cnt;
1171 return (0);
1172 }
1173
1174
1175 /*
1176 * File table truncate routine.
1177 */
1178 static int
1179 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1180 struct thread *td)
1181 {
1182 struct vattr vattr;
1183 struct mount *mp;
1184 struct vnode *vp;
1185 void *rl_cookie;
1186 int error;
1187
1188 vp = fp->f_vnode;
1189
1190 /*
1191 * Lock the whole range for truncation. Otherwise split i/o
1192 * might happen partly before and partly after the truncation.
1193 */
1194 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1195 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
1196 if (error)
1197 goto out1;
1198 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1199 if (vp->v_type == VDIR) {
1200 error = EISDIR;
1201 goto out;
1202 }
1203 #ifdef MAC
1204 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
1205 if (error)
1206 goto out;
1207 #endif
1208 error = vn_writechk(vp);
1209 if (error == 0) {
1210 VATTR_NULL(&vattr);
1211 vattr.va_size = length;
1212 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
1213 }
1214 out:
1215 VOP_UNLOCK(vp, 0);
1216 vn_finished_write(mp);
1217 out1:
1218 vn_rangelock_unlock(vp, rl_cookie);
1219 return (error);
1220 }
1221
1222 /*
1223 * File table vnode stat routine.
1224 */
1225 static int
1226 vn_statfile(fp, sb, active_cred, td)
1227 struct file *fp;
1228 struct stat *sb;
1229 struct ucred *active_cred;
1230 struct thread *td;
1231 {
1232 struct vnode *vp = fp->f_vnode;
1233 int error;
1234
1235 vn_lock(vp, LK_SHARED | LK_RETRY);
1236 error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
1237 VOP_UNLOCK(vp, 0);
1238
1239 return (error);
1240 }
1241
1242 /*
1243 * Stat a vnode; implementation for the stat syscall
1244 */
1245 int
1246 vn_stat(vp, sb, active_cred, file_cred, td)
1247 struct vnode *vp;
1248 register struct stat *sb;
1249 struct ucred *active_cred;
1250 struct ucred *file_cred;
1251 struct thread *td;
1252 {
1253 struct vattr vattr;
1254 register struct vattr *vap;
1255 int error;
1256 u_short mode;
1257
1258 #ifdef MAC
1259 error = mac_vnode_check_stat(active_cred, file_cred, vp);
1260 if (error)
1261 return (error);
1262 #endif
1263
1264 vap = &vattr;
1265
1266 /*
1267 * Initialize defaults for new and unusual fields, so that file
1268 * systems which don't support these fields don't need to know
1269 * about them.
1270 */
1271 vap->va_birthtime.tv_sec = -1;
1272 vap->va_birthtime.tv_nsec = 0;
1273 vap->va_fsid = VNOVAL;
1274 vap->va_rdev = NODEV;
1275
1276 error = VOP_GETATTR(vp, vap, active_cred);
1277 if (error)
1278 return (error);
1279
1280 /*
1281 * Zero the spare stat fields
1282 */
1283 bzero(sb, sizeof *sb);
1284
1285 /*
1286 * Copy from vattr table
1287 */
1288 if (vap->va_fsid != VNOVAL)
1289 sb->st_dev = vap->va_fsid;
1290 else
1291 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1292 sb->st_ino = vap->va_fileid;
1293 mode = vap->va_mode;
1294 switch (vap->va_type) {
1295 case VREG:
1296 mode |= S_IFREG;
1297 break;
1298 case VDIR:
1299 mode |= S_IFDIR;
1300 break;
1301 case VBLK:
1302 mode |= S_IFBLK;
1303 break;
1304 case VCHR:
1305 mode |= S_IFCHR;
1306 break;
1307 case VLNK:
1308 mode |= S_IFLNK;
1309 break;
1310 case VSOCK:
1311 mode |= S_IFSOCK;
1312 break;
1313 case VFIFO:
1314 mode |= S_IFIFO;
1315 break;
1316 default:
1317 return (EBADF);
1318 };
1319 sb->st_mode = mode;
1320 sb->st_nlink = vap->va_nlink;
1321 sb->st_uid = vap->va_uid;
1322 sb->st_gid = vap->va_gid;
1323 sb->st_rdev = vap->va_rdev;
1324 if (vap->va_size > OFF_MAX)
1325 return (EOVERFLOW);
1326 sb->st_size = vap->va_size;
1327 sb->st_atim = vap->va_atime;
1328 sb->st_mtim = vap->va_mtime;
1329 sb->st_ctim = vap->va_ctime;
1330 sb->st_birthtim = vap->va_birthtime;
1331
1332 /*
1333 * According to www.opengroup.org, the meaning of st_blksize is
1334 * "a filesystem-specific preferred I/O block size for this
1335 * object. In some filesystem types, this may vary from file
1336 * to file"
1337 * Use miminum/default of PAGE_SIZE (e.g. for VCHR).
1338 */
1339
1340 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1341
1342 sb->st_flags = vap->va_flags;
1343 if (priv_check(td, PRIV_VFS_GENERATION))
1344 sb->st_gen = 0;
1345 else
1346 sb->st_gen = vap->va_gen;
1347
1348 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1349 return (0);
1350 }
1351
1352 /*
1353 * File table vnode ioctl routine.
1354 */
1355 static int
1356 vn_ioctl(fp, com, data, active_cred, td)
1357 struct file *fp;
1358 u_long com;
1359 void *data;
1360 struct ucred *active_cred;
1361 struct thread *td;
1362 {
1363 struct vattr vattr;
1364 struct vnode *vp;
1365 int error;
1366
1367 vp = fp->f_vnode;
1368 switch (vp->v_type) {
1369 case VDIR:
1370 case VREG:
1371 switch (com) {
1372 case FIONREAD:
1373 vn_lock(vp, LK_SHARED | LK_RETRY);
1374 error = VOP_GETATTR(vp, &vattr, active_cred);
1375 VOP_UNLOCK(vp, 0);
1376 if (error == 0)
1377 *(int *)data = vattr.va_size - fp->f_offset;
1378 return (error);
1379 case FIONBIO:
1380 case FIOASYNC:
1381 return (0);
1382 default:
1383 return (VOP_IOCTL(vp, com, data, fp->f_flag,
1384 active_cred, td));
1385 }
1386 default:
1387 return (ENOTTY);
1388 }
1389 }
1390
1391 /*
1392 * File table vnode poll routine.
1393 */
1394 static int
1395 vn_poll(fp, events, active_cred, td)
1396 struct file *fp;
1397 int events;
1398 struct ucred *active_cred;
1399 struct thread *td;
1400 {
1401 struct vnode *vp;
1402 int error;
1403
1404 vp = fp->f_vnode;
1405 #ifdef MAC
1406 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1407 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
1408 VOP_UNLOCK(vp, 0);
1409 if (!error)
1410 #endif
1411
1412 error = VOP_POLL(vp, events, fp->f_cred, td);
1413 return (error);
1414 }
1415
1416 /*
1417 * Acquire the requested lock and then check for validity. LK_RETRY
1418 * permits vn_lock to return doomed vnodes.
1419 */
1420 int
1421 _vn_lock(struct vnode *vp, int flags, char *file, int line)
1422 {
1423 int error;
1424
1425 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1426 ("vn_lock called with no locktype."));
1427 do {
1428 #ifdef DEBUG_VFS_LOCKS
1429 KASSERT(vp->v_holdcnt != 0,
1430 ("vn_lock %p: zero hold count", vp));
1431 #endif
1432 error = VOP_LOCK1(vp, flags, file, line);
1433 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */
1434 KASSERT((flags & LK_RETRY) == 0 || error == 0,
1435 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
1436 flags, error));
1437 /*
1438 * Callers specify LK_RETRY if they wish to get dead vnodes.
1439 * If RETRY is not set, we return ENOENT instead.
1440 */
1441 if (error == 0 && vp->v_iflag & VI_DOOMED &&
1442 (flags & LK_RETRY) == 0) {
1443 VOP_UNLOCK(vp, 0);
1444 error = ENOENT;
1445 break;
1446 }
1447 } while (flags & LK_RETRY && error != 0);
1448 return (error);
1449 }
1450
1451 /*
1452 * File table vnode close routine.
1453 */
1454 static int
1455 vn_closefile(fp, td)
1456 struct file *fp;
1457 struct thread *td;
1458 {
1459 struct vnode *vp;
1460 struct flock lf;
1461 int error;
1462
1463 vp = fp->f_vnode;
1464 fp->f_ops = &badfileops;
1465
1466 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK)
1467 vref(vp);
1468
1469 error = vn_close(vp, fp->f_flag, fp->f_cred, td);
1470
1471 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
1472 lf.l_whence = SEEK_SET;
1473 lf.l_start = 0;
1474 lf.l_len = 0;
1475 lf.l_type = F_UNLCK;
1476 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1477 vrele(vp);
1478 }
1479 return (error);
1480 }
1481
1482 /*
1483 * Preparing to start a filesystem write operation. If the operation is
1484 * permitted, then we bump the count of operations in progress and
1485 * proceed. If a suspend request is in progress, we wait until the
1486 * suspension is over, and then proceed.
1487 */
1488 static int
1489 vn_start_write_locked(struct mount *mp, int flags)
1490 {
1491 int error;
1492
1493 mtx_assert(MNT_MTX(mp), MA_OWNED);
1494 error = 0;
1495
1496 /*
1497 * Check on status of suspension.
1498 */
1499 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
1500 mp->mnt_susp_owner != curthread) {
1501 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1502 if (flags & V_NOWAIT) {
1503 error = EWOULDBLOCK;
1504 goto unlock;
1505 }
1506 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1507 (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
1508 if (error)
1509 goto unlock;
1510 }
1511 }
1512 if (flags & V_XSLEEP)
1513 goto unlock;
1514 mp->mnt_writeopcount++;
1515 unlock:
1516 if (error != 0 || (flags & V_XSLEEP) != 0)
1517 MNT_REL(mp);
1518 MNT_IUNLOCK(mp);
1519 return (error);
1520 }
1521
1522 int
1523 vn_start_write(vp, mpp, flags)
1524 struct vnode *vp;
1525 struct mount **mpp;
1526 int flags;
1527 {
1528 struct mount *mp;
1529 int error;
1530
1531 error = 0;
1532 /*
1533 * If a vnode is provided, get and return the mount point that
1534 * to which it will write.
1535 */
1536 if (vp != NULL) {
1537 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1538 *mpp = NULL;
1539 if (error != EOPNOTSUPP)
1540 return (error);
1541 return (0);
1542 }
1543 }
1544 if ((mp = *mpp) == NULL)
1545 return (0);
1546
1547 /*
1548 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1549 * a vfs_ref().
1550 * As long as a vnode is not provided we need to acquire a
1551 * refcount for the provided mountpoint too, in order to
1552 * emulate a vfs_ref().
1553 */
1554 MNT_ILOCK(mp);
1555 if (vp == NULL)
1556 MNT_REF(mp);
1557
1558 return (vn_start_write_locked(mp, flags));
1559 }
1560
1561 /*
1562 * Secondary suspension. Used by operations such as vop_inactive
1563 * routines that are needed by the higher level functions. These
1564 * are allowed to proceed until all the higher level functions have
1565 * completed (indicated by mnt_writeopcount dropping to zero). At that
1566 * time, these operations are halted until the suspension is over.
1567 */
1568 int
1569 vn_start_secondary_write(vp, mpp, flags)
1570 struct vnode *vp;
1571 struct mount **mpp;
1572 int flags;
1573 {
1574 struct mount *mp;
1575 int error;
1576
1577 retry:
1578 if (vp != NULL) {
1579 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1580 *mpp = NULL;
1581 if (error != EOPNOTSUPP)
1582 return (error);
1583 return (0);
1584 }
1585 }
1586 /*
1587 * If we are not suspended or have not yet reached suspended
1588 * mode, then let the operation proceed.
1589 */
1590 if ((mp = *mpp) == NULL)
1591 return (0);
1592
1593 /*
1594 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1595 * a vfs_ref().
1596 * As long as a vnode is not provided we need to acquire a
1597 * refcount for the provided mountpoint too, in order to
1598 * emulate a vfs_ref().
1599 */
1600 MNT_ILOCK(mp);
1601 if (vp == NULL)
1602 MNT_REF(mp);
1603 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1604 mp->mnt_secondary_writes++;
1605 mp->mnt_secondary_accwrites++;
1606 MNT_IUNLOCK(mp);
1607 return (0);
1608 }
1609 if (flags & V_NOWAIT) {
1610 MNT_REL(mp);
1611 MNT_IUNLOCK(mp);
1612 return (EWOULDBLOCK);
1613 }
1614 /*
1615 * Wait for the suspension to finish.
1616 */
1617 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1618 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1619 vfs_rel(mp);
1620 if (error == 0)
1621 goto retry;
1622 return (error);
1623 }
1624
1625 /*
1626 * Filesystem write operation has completed. If we are suspending and this
1627 * operation is the last one, notify the suspender that the suspension is
1628 * now in effect.
1629 */
1630 void
1631 vn_finished_write(mp)
1632 struct mount *mp;
1633 {
1634 if (mp == NULL)
1635 return;
1636 MNT_ILOCK(mp);
1637 MNT_REL(mp);
1638 mp->mnt_writeopcount--;
1639 if (mp->mnt_writeopcount < 0)
1640 panic("vn_finished_write: neg cnt");
1641 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1642 mp->mnt_writeopcount <= 0)
1643 wakeup(&mp->mnt_writeopcount);
1644 MNT_IUNLOCK(mp);
1645 }
1646
1647
1648 /*
1649 * Filesystem secondary write operation has completed. If we are
1650 * suspending and this operation is the last one, notify the suspender
1651 * that the suspension is now in effect.
1652 */
1653 void
1654 vn_finished_secondary_write(mp)
1655 struct mount *mp;
1656 {
1657 if (mp == NULL)
1658 return;
1659 MNT_ILOCK(mp);
1660 MNT_REL(mp);
1661 mp->mnt_secondary_writes--;
1662 if (mp->mnt_secondary_writes < 0)
1663 panic("vn_finished_secondary_write: neg cnt");
1664 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1665 mp->mnt_secondary_writes <= 0)
1666 wakeup(&mp->mnt_secondary_writes);
1667 MNT_IUNLOCK(mp);
1668 }
1669
1670
1671
1672 /*
1673 * Request a filesystem to suspend write operations.
1674 */
1675 int
1676 vfs_write_suspend(struct mount *mp, int flags)
1677 {
1678 int error;
1679
1680 MNT_ILOCK(mp);
1681 if (mp->mnt_susp_owner == curthread) {
1682 MNT_IUNLOCK(mp);
1683 return (EALREADY);
1684 }
1685 while (mp->mnt_kern_flag & MNTK_SUSPEND)
1686 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1687
1688 /*
1689 * Unmount holds a write reference on the mount point. If we
1690 * own busy reference and drain for writers, we deadlock with
1691 * the reference draining in the unmount path. Callers of
1692 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if
1693 * vfs_busy() reference is owned and caller is not in the
1694 * unmount context.
1695 */
1696 if ((flags & VS_SKIP_UNMOUNT) != 0 &&
1697 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
1698 MNT_IUNLOCK(mp);
1699 return (EBUSY);
1700 }
1701
1702 mp->mnt_kern_flag |= MNTK_SUSPEND;
1703 mp->mnt_susp_owner = curthread;
1704 if (mp->mnt_writeopcount > 0)
1705 (void) msleep(&mp->mnt_writeopcount,
1706 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1707 else
1708 MNT_IUNLOCK(mp);
1709 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1710 vfs_write_resume(mp, 0);
1711 return (error);
1712 }
1713
1714 /*
1715 * Request a filesystem to resume write operations.
1716 */
1717 void
1718 vfs_write_resume(struct mount *mp, int flags)
1719 {
1720
1721 MNT_ILOCK(mp);
1722 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1723 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1724 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1725 MNTK_SUSPENDED);
1726 mp->mnt_susp_owner = NULL;
1727 wakeup(&mp->mnt_writeopcount);
1728 wakeup(&mp->mnt_flag);
1729 curthread->td_pflags &= ~TDP_IGNSUSP;
1730 if ((flags & VR_START_WRITE) != 0) {
1731 MNT_REF(mp);
1732 mp->mnt_writeopcount++;
1733 }
1734 MNT_IUNLOCK(mp);
1735 if ((flags & VR_NO_SUSPCLR) == 0)
1736 VFS_SUSP_CLEAN(mp);
1737 } else if ((flags & VR_START_WRITE) != 0) {
1738 MNT_REF(mp);
1739 vn_start_write_locked(mp, 0);
1740 } else {
1741 MNT_IUNLOCK(mp);
1742 }
1743 }
1744
1745 /*
1746 * Implement kqueues for files by translating it to vnode operation.
1747 */
1748 static int
1749 vn_kqfilter(struct file *fp, struct knote *kn)
1750 {
1751
1752 return (VOP_KQFILTER(fp->f_vnode, kn));
1753 }
1754
1755 /*
1756 * Simplified in-kernel wrapper calls for extended attribute access.
1757 * Both calls pass in a NULL credential, authorizing as "kernel" access.
1758 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1759 */
1760 int
1761 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1762 const char *attrname, int *buflen, char *buf, struct thread *td)
1763 {
1764 struct uio auio;
1765 struct iovec iov;
1766 int error;
1767
1768 iov.iov_len = *buflen;
1769 iov.iov_base = buf;
1770
1771 auio.uio_iov = &iov;
1772 auio.uio_iovcnt = 1;
1773 auio.uio_rw = UIO_READ;
1774 auio.uio_segflg = UIO_SYSSPACE;
1775 auio.uio_td = td;
1776 auio.uio_offset = 0;
1777 auio.uio_resid = *buflen;
1778
1779 if ((ioflg & IO_NODELOCKED) == 0)
1780 vn_lock(vp, LK_SHARED | LK_RETRY);
1781
1782 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1783
1784 /* authorize attribute retrieval as kernel */
1785 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1786 td);
1787
1788 if ((ioflg & IO_NODELOCKED) == 0)
1789 VOP_UNLOCK(vp, 0);
1790
1791 if (error == 0) {
1792 *buflen = *buflen - auio.uio_resid;
1793 }
1794
1795 return (error);
1796 }
1797
1798 /*
1799 * XXX failure mode if partially written?
1800 */
1801 int
1802 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1803 const char *attrname, int buflen, char *buf, struct thread *td)
1804 {
1805 struct uio auio;
1806 struct iovec iov;
1807 struct mount *mp;
1808 int error;
1809
1810 iov.iov_len = buflen;
1811 iov.iov_base = buf;
1812
1813 auio.uio_iov = &iov;
1814 auio.uio_iovcnt = 1;
1815 auio.uio_rw = UIO_WRITE;
1816 auio.uio_segflg = UIO_SYSSPACE;
1817 auio.uio_td = td;
1818 auio.uio_offset = 0;
1819 auio.uio_resid = buflen;
1820
1821 if ((ioflg & IO_NODELOCKED) == 0) {
1822 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1823 return (error);
1824 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1825 }
1826
1827 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1828
1829 /* authorize attribute setting as kernel */
1830 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1831
1832 if ((ioflg & IO_NODELOCKED) == 0) {
1833 vn_finished_write(mp);
1834 VOP_UNLOCK(vp, 0);
1835 }
1836
1837 return (error);
1838 }
1839
1840 int
1841 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1842 const char *attrname, struct thread *td)
1843 {
1844 struct mount *mp;
1845 int error;
1846
1847 if ((ioflg & IO_NODELOCKED) == 0) {
1848 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1849 return (error);
1850 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1851 }
1852
1853 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1854
1855 /* authorize attribute removal as kernel */
1856 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1857 if (error == EOPNOTSUPP)
1858 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1859 NULL, td);
1860
1861 if ((ioflg & IO_NODELOCKED) == 0) {
1862 vn_finished_write(mp);
1863 VOP_UNLOCK(vp, 0);
1864 }
1865
1866 return (error);
1867 }
1868
1869 int
1870 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1871 {
1872 struct mount *mp;
1873 int ltype, error;
1874
1875 mp = vp->v_mount;
1876 ltype = VOP_ISLOCKED(vp);
1877 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1878 ("vn_vget_ino: vp not locked"));
1879 error = vfs_busy(mp, MBF_NOWAIT);
1880 if (error != 0) {
1881 vfs_ref(mp);
1882 VOP_UNLOCK(vp, 0);
1883 error = vfs_busy(mp, 0);
1884 vn_lock(vp, ltype | LK_RETRY);
1885 vfs_rel(mp);
1886 if (error != 0)
1887 return (ENOENT);
1888 if (vp->v_iflag & VI_DOOMED) {
1889 vfs_unbusy(mp);
1890 return (ENOENT);
1891 }
1892 }
1893 VOP_UNLOCK(vp, 0);
1894 error = VFS_VGET(mp, ino, lkflags, rvp);
1895 vfs_unbusy(mp);
1896 vn_lock(vp, ltype | LK_RETRY);
1897 if (vp->v_iflag & VI_DOOMED) {
1898 if (error == 0)
1899 vput(*rvp);
1900 error = ENOENT;
1901 }
1902 return (error);
1903 }
1904
1905 int
1906 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
1907 const struct thread *td)
1908 {
1909
1910 if (vp->v_type != VREG || td == NULL)
1911 return (0);
1912 PROC_LOCK(td->td_proc);
1913 if ((uoff_t)uio->uio_offset + uio->uio_resid >
1914 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
1915 kern_psignal(td->td_proc, SIGXFSZ);
1916 PROC_UNLOCK(td->td_proc);
1917 return (EFBIG);
1918 }
1919 PROC_UNLOCK(td->td_proc);
1920 return (0);
1921 }
1922
1923 int
1924 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1925 struct thread *td)
1926 {
1927 struct vnode *vp;
1928
1929 vp = fp->f_vnode;
1930 #ifdef AUDIT
1931 vn_lock(vp, LK_SHARED | LK_RETRY);
1932 AUDIT_ARG_VNODE1(vp);
1933 VOP_UNLOCK(vp, 0);
1934 #endif
1935 return (setfmode(td, active_cred, vp, mode));
1936 }
1937
1938 int
1939 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1940 struct thread *td)
1941 {
1942 struct vnode *vp;
1943
1944 vp = fp->f_vnode;
1945 #ifdef AUDIT
1946 vn_lock(vp, LK_SHARED | LK_RETRY);
1947 AUDIT_ARG_VNODE1(vp);
1948 VOP_UNLOCK(vp, 0);
1949 #endif
1950 return (setfown(td, active_cred, vp, uid, gid));
1951 }
1952
1953 void
1954 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
1955 {
1956 vm_object_t object;
1957
1958 if ((object = vp->v_object) == NULL)
1959 return;
1960 VM_OBJECT_WLOCK(object);
1961 vm_object_page_remove(object, start, end, 0);
1962 VM_OBJECT_WUNLOCK(object);
1963 }
1964
1965 int
1966 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
1967 {
1968 struct vattr va;
1969 daddr_t bn, bnp;
1970 uint64_t bsize;
1971 off_t noff;
1972 int error;
1973
1974 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
1975 ("Wrong command %lu", cmd));
1976
1977 if (vn_lock(vp, LK_SHARED) != 0)
1978 return (EBADF);
1979 if (vp->v_type != VREG) {
1980 error = ENOTTY;
1981 goto unlock;
1982 }
1983 error = VOP_GETATTR(vp, &va, cred);
1984 if (error != 0)
1985 goto unlock;
1986 noff = *off;
1987 if (noff >= va.va_size) {
1988 error = ENXIO;
1989 goto unlock;
1990 }
1991 bsize = vp->v_mount->mnt_stat.f_iosize;
1992 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) {
1993 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
1994 if (error == EOPNOTSUPP) {
1995 error = ENOTTY;
1996 goto unlock;
1997 }
1998 if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
1999 (bnp != -1 && cmd == FIOSEEKDATA)) {
2000 noff = bn * bsize;
2001 if (noff < *off)
2002 noff = *off;
2003 goto unlock;
2004 }
2005 }
2006 if (noff > va.va_size)
2007 noff = va.va_size;
2008 /* noff == va.va_size. There is an implicit hole at the end of file. */
2009 if (cmd == FIOSEEKDATA)
2010 error = ENXIO;
2011 unlock:
2012 VOP_UNLOCK(vp, 0);
2013 if (error == 0)
2014 *off = noff;
2015 return (error);
2016 }
2017
2018 int
2019 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td)
2020 {
2021 struct ucred *cred;
2022 struct vnode *vp;
2023 struct vattr vattr;
2024 off_t foffset, size;
2025 int error, noneg;
2026
2027 cred = td->td_ucred;
2028 vp = fp->f_vnode;
2029 foffset = foffset_lock(fp, 0);
2030 noneg = (vp->v_type != VCHR);
2031 error = 0;
2032 switch (whence) {
2033 case L_INCR:
2034 if (noneg &&
2035 (foffset < 0 ||
2036 (offset > 0 && foffset > OFF_MAX - offset))) {
2037 error = EOVERFLOW;
2038 break;
2039 }
2040 offset += foffset;
2041 break;
2042 case L_XTND:
2043 vn_lock(vp, LK_SHARED | LK_RETRY);
2044 error = VOP_GETATTR(vp, &vattr, cred);
2045 VOP_UNLOCK(vp, 0);
2046 if (error)
2047 break;
2048
2049 /*
2050 * If the file references a disk device, then fetch
2051 * the media size and use that to determine the ending
2052 * offset.
2053 */
2054 if (vattr.va_size == 0 && vp->v_type == VCHR &&
2055 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0)
2056 vattr.va_size = size;
2057 if (noneg &&
2058 (vattr.va_size > OFF_MAX ||
2059 (offset > 0 && vattr.va_size > OFF_MAX - offset))) {
2060 error = EOVERFLOW;
2061 break;
2062 }
2063 offset += vattr.va_size;
2064 break;
2065 case L_SET:
2066 break;
2067 case SEEK_DATA:
2068 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td);
2069 break;
2070 case SEEK_HOLE:
2071 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td);
2072 break;
2073 default:
2074 error = EINVAL;
2075 }
2076 if (error == 0 && noneg && offset < 0)
2077 error = EINVAL;
2078 if (error != 0)
2079 goto drop;
2080 VFS_KNOTE_UNLOCKED(vp, 0);
2081 *(off_t *)(td->td_retval) = offset;
2082 drop:
2083 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
2084 return (error);
2085 }
Cache object: 888a7f15b1ed0f86a9d1aa837ac7d73b
|