FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_vnops.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/file.h>
44 #include <sys/kdb.h>
45 #include <sys/stat.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/namei.h>
53 #include <sys/vnode.h>
54 #include <sys/bio.h>
55 #include <sys/buf.h>
56 #include <sys/filio.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sx.h>
59 #include <sys/ttycom.h>
60 #include <sys/conf.h>
61 #include <sys/syslog.h>
62 #include <sys/unistd.h>
63
64 #include <security/mac/mac_framework.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68
69 static fo_rdwr_t vn_read;
70 static fo_rdwr_t vn_write;
71 static fo_truncate_t vn_truncate;
72 static fo_ioctl_t vn_ioctl;
73 static fo_poll_t vn_poll;
74 static fo_kqfilter_t vn_kqfilter;
75 static fo_stat_t vn_statfile;
76 static fo_close_t vn_closefile;
77
78 struct fileops vnops = {
79 .fo_read = vn_read,
80 .fo_write = vn_write,
81 .fo_truncate = vn_truncate,
82 .fo_ioctl = vn_ioctl,
83 .fo_poll = vn_poll,
84 .fo_kqfilter = vn_kqfilter,
85 .fo_stat = vn_statfile,
86 .fo_close = vn_closefile,
87 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
88 };
89
90 int
91 vn_open(ndp, flagp, cmode, fp)
92 struct nameidata *ndp;
93 int *flagp, cmode;
94 struct file *fp;
95 {
96 struct thread *td = ndp->ni_cnd.cn_thread;
97
98 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
99 }
100
101 /*
102 * Common code for vnode open operations.
103 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
104 *
105 * Note that this does NOT free nameidata for the successful case,
106 * due to the NDINIT being done elsewhere.
107 */
108 int
109 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
110 struct ucred *cred, struct file *fp)
111 {
112 struct vnode *vp;
113 struct mount *mp;
114 struct thread *td = ndp->ni_cnd.cn_thread;
115 struct vattr vat;
116 struct vattr *vap = &vat;
117 int fmode, error;
118 accmode_t accmode;
119 int vfslocked, mpsafe;
120
121 mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
122 restart:
123 vfslocked = 0;
124 fmode = *flagp;
125 if (fmode & O_CREAT) {
126 ndp->ni_cnd.cn_nameiop = CREATE;
127 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
128 MPSAFE;
129 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
130 ndp->ni_cnd.cn_flags |= FOLLOW;
131 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
132 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
133 bwillwrite();
134 if ((error = namei(ndp)) != 0)
135 return (error);
136 vfslocked = NDHASGIANT(ndp);
137 if (!mpsafe)
138 ndp->ni_cnd.cn_flags &= ~MPSAFE;
139 if (ndp->ni_vp == NULL) {
140 VATTR_NULL(vap);
141 vap->va_type = VREG;
142 vap->va_mode = cmode;
143 if (fmode & O_EXCL)
144 vap->va_vaflags |= VA_EXCLUSIVE;
145 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
146 NDFREE(ndp, NDF_ONLY_PNBUF);
147 vput(ndp->ni_dvp);
148 VFS_UNLOCK_GIANT(vfslocked);
149 if ((error = vn_start_write(NULL, &mp,
150 V_XSLEEP | PCATCH)) != 0)
151 return (error);
152 goto restart;
153 }
154 #ifdef MAC
155 error = mac_vnode_check_create(cred, ndp->ni_dvp,
156 &ndp->ni_cnd, vap);
157 if (error == 0)
158 #endif
159 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
160 &ndp->ni_cnd, vap);
161 vput(ndp->ni_dvp);
162 vn_finished_write(mp);
163 if (error) {
164 VFS_UNLOCK_GIANT(vfslocked);
165 NDFREE(ndp, NDF_ONLY_PNBUF);
166 return (error);
167 }
168 fmode &= ~O_TRUNC;
169 vp = ndp->ni_vp;
170 } else {
171 if (ndp->ni_dvp == ndp->ni_vp)
172 vrele(ndp->ni_dvp);
173 else
174 vput(ndp->ni_dvp);
175 ndp->ni_dvp = NULL;
176 vp = ndp->ni_vp;
177 if (fmode & O_EXCL) {
178 error = EEXIST;
179 goto bad;
180 }
181 fmode &= ~O_CREAT;
182 }
183 } else {
184 ndp->ni_cnd.cn_nameiop = LOOKUP;
185 ndp->ni_cnd.cn_flags = ISOPEN |
186 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
187 LOCKLEAF | MPSAFE;
188 if (!(fmode & FWRITE))
189 ndp->ni_cnd.cn_flags |= LOCKSHARED;
190 if (!(vn_open_flags & VN_OPEN_NOAUDIT))
191 ndp->ni_cnd.cn_flags |= AUDITVNODE1;
192 if ((error = namei(ndp)) != 0)
193 return (error);
194 if (!mpsafe)
195 ndp->ni_cnd.cn_flags &= ~MPSAFE;
196 vfslocked = NDHASGIANT(ndp);
197 vp = ndp->ni_vp;
198 }
199 if (vp->v_type == VLNK) {
200 error = EMLINK;
201 goto bad;
202 }
203 if (vp->v_type == VSOCK) {
204 error = EOPNOTSUPP;
205 goto bad;
206 }
207 accmode = 0;
208 if (fmode & (FWRITE | O_TRUNC)) {
209 if (vp->v_type == VDIR) {
210 error = EISDIR;
211 goto bad;
212 }
213 accmode |= VWRITE;
214 }
215 if (fmode & FREAD)
216 accmode |= VREAD;
217 if (fmode & FEXEC)
218 accmode |= VEXEC;
219 if ((fmode & O_APPEND) && (fmode & FWRITE))
220 accmode |= VAPPEND;
221 #ifdef MAC
222 error = mac_vnode_check_open(cred, vp, accmode);
223 if (error)
224 goto bad;
225 #endif
226 if ((fmode & O_CREAT) == 0) {
227 if (accmode & VWRITE) {
228 error = vn_writechk(vp);
229 if (error)
230 goto bad;
231 }
232 if (accmode) {
233 error = VOP_ACCESS(vp, accmode, cred, td);
234 if (error)
235 goto bad;
236 }
237 }
238 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
239 goto bad;
240
241 if (fmode & FWRITE)
242 vp->v_writecount++;
243 *flagp = fmode;
244 ASSERT_VOP_LOCKED(vp, "vn_open_cred");
245 if (!mpsafe)
246 VFS_UNLOCK_GIANT(vfslocked);
247 return (0);
248 bad:
249 NDFREE(ndp, NDF_ONLY_PNBUF);
250 vput(vp);
251 VFS_UNLOCK_GIANT(vfslocked);
252 *flagp = fmode;
253 ndp->ni_vp = NULL;
254 return (error);
255 }
256
257 /*
258 * Check for write permissions on the specified vnode.
259 * Prototype text segments cannot be written.
260 */
261 int
262 vn_writechk(vp)
263 register struct vnode *vp;
264 {
265
266 ASSERT_VOP_LOCKED(vp, "vn_writechk");
267 /*
268 * If there's shared text associated with
269 * the vnode, try to free it up once. If
270 * we fail, we can't allow writing.
271 */
272 if (vp->v_vflag & VV_TEXT)
273 return (ETXTBSY);
274
275 return (0);
276 }
277
278 /*
279 * Vnode close call
280 */
281 int
282 vn_close(vp, flags, file_cred, td)
283 register struct vnode *vp;
284 int flags;
285 struct ucred *file_cred;
286 struct thread *td;
287 {
288 struct mount *mp;
289 int error, lock_flags;
290
291 if (!(flags & FWRITE) && vp->v_mount != NULL &&
292 vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
293 lock_flags = LK_SHARED;
294 else
295 lock_flags = LK_EXCLUSIVE;
296
297 VFS_ASSERT_GIANT(vp->v_mount);
298
299 vn_start_write(vp, &mp, V_WAIT);
300 vn_lock(vp, lock_flags | LK_RETRY);
301 if (flags & FWRITE) {
302 VNASSERT(vp->v_writecount > 0, vp,
303 ("vn_close: negative writecount"));
304 vp->v_writecount--;
305 }
306 error = VOP_CLOSE(vp, flags, file_cred, td);
307 vput(vp);
308 vn_finished_write(mp);
309 return (error);
310 }
311
312 /*
313 * Heuristic to detect sequential operation.
314 */
315 static int
316 sequential_heuristic(struct uio *uio, struct file *fp)
317 {
318
319 if (atomic_load_acq_int(&(fp->f_flag)) & FRDAHEAD)
320 return (fp->f_seqcount << IO_SEQSHIFT);
321
322 /*
323 * Offset 0 is handled specially. open() sets f_seqcount to 1 so
324 * that the first I/O is normally considered to be slightly
325 * sequential. Seeking to offset 0 doesn't change sequentiality
326 * unless previous seeks have reduced f_seqcount to 0, in which
327 * case offset 0 is not special.
328 */
329 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
330 uio->uio_offset == fp->f_nextoff) {
331 /*
332 * f_seqcount is in units of fixed-size blocks so that it
333 * depends mainly on the amount of sequential I/O and not
334 * much on the number of sequential I/O's. The fixed size
335 * of 16384 is hard-coded here since it is (not quite) just
336 * a magic size that works well here. This size is more
337 * closely related to the best I/O size for real disks than
338 * to any block size used by software.
339 */
340 fp->f_seqcount += howmany(uio->uio_resid, 16384);
341 if (fp->f_seqcount > IO_SEQMAX)
342 fp->f_seqcount = IO_SEQMAX;
343 return (fp->f_seqcount << IO_SEQSHIFT);
344 }
345
346 /* Not sequential. Quickly draw-down sequentiality. */
347 if (fp->f_seqcount > 1)
348 fp->f_seqcount = 1;
349 else
350 fp->f_seqcount = 0;
351 return (0);
352 }
353
354 /*
355 * Package up an I/O request on a vnode into a uio and do it.
356 */
357 int
358 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
359 aresid, td)
360 enum uio_rw rw;
361 struct vnode *vp;
362 void *base;
363 int len;
364 off_t offset;
365 enum uio_seg segflg;
366 int ioflg;
367 struct ucred *active_cred;
368 struct ucred *file_cred;
369 int *aresid;
370 struct thread *td;
371 {
372 struct uio auio;
373 struct iovec aiov;
374 struct mount *mp;
375 struct ucred *cred;
376 int error, lock_flags;
377
378 VFS_ASSERT_GIANT(vp->v_mount);
379
380 if ((ioflg & IO_NODELOCKED) == 0) {
381 mp = NULL;
382 if (rw == UIO_WRITE) {
383 if (vp->v_type != VCHR &&
384 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
385 != 0)
386 return (error);
387 if (MNT_SHARED_WRITES(mp) ||
388 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) {
389 lock_flags = LK_SHARED;
390 } else {
391 lock_flags = LK_EXCLUSIVE;
392 }
393 vn_lock(vp, lock_flags | LK_RETRY);
394 } else
395 vn_lock(vp, LK_SHARED | LK_RETRY);
396
397 }
398 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
399 auio.uio_iov = &aiov;
400 auio.uio_iovcnt = 1;
401 aiov.iov_base = base;
402 aiov.iov_len = len;
403 auio.uio_resid = len;
404 auio.uio_offset = offset;
405 auio.uio_segflg = segflg;
406 auio.uio_rw = rw;
407 auio.uio_td = td;
408 error = 0;
409 #ifdef MAC
410 if ((ioflg & IO_NOMACCHECK) == 0) {
411 if (rw == UIO_READ)
412 error = mac_vnode_check_read(active_cred, file_cred,
413 vp);
414 else
415 error = mac_vnode_check_write(active_cred, file_cred,
416 vp);
417 }
418 #endif
419 if (error == 0) {
420 if (file_cred)
421 cred = file_cred;
422 else
423 cred = active_cred;
424 if (rw == UIO_READ)
425 error = VOP_READ(vp, &auio, ioflg, cred);
426 else
427 error = VOP_WRITE(vp, &auio, ioflg, cred);
428 }
429 if (aresid)
430 *aresid = auio.uio_resid;
431 else
432 if (auio.uio_resid && error == 0)
433 error = EIO;
434 if ((ioflg & IO_NODELOCKED) == 0) {
435 if (rw == UIO_WRITE && vp->v_type != VCHR)
436 vn_finished_write(mp);
437 VOP_UNLOCK(vp, 0);
438 }
439 return (error);
440 }
441
442 /*
443 * Package up an I/O request on a vnode into a uio and do it. The I/O
444 * request is split up into smaller chunks and we try to avoid saturating
445 * the buffer cache while potentially holding a vnode locked, so we
446 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
447 * to give other processes a chance to lock the vnode (either other processes
448 * core'ing the same binary, or unrelated processes scanning the directory).
449 */
450 int
451 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
452 file_cred, aresid, td)
453 enum uio_rw rw;
454 struct vnode *vp;
455 void *base;
456 size_t len;
457 off_t offset;
458 enum uio_seg segflg;
459 int ioflg;
460 struct ucred *active_cred;
461 struct ucred *file_cred;
462 size_t *aresid;
463 struct thread *td;
464 {
465 int error = 0;
466 int iaresid;
467
468 VFS_ASSERT_GIANT(vp->v_mount);
469
470 do {
471 int chunk;
472
473 /*
474 * Force `offset' to a multiple of MAXBSIZE except possibly
475 * for the first chunk, so that filesystems only need to
476 * write full blocks except possibly for the first and last
477 * chunks.
478 */
479 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
480
481 if (chunk > len)
482 chunk = len;
483 if (rw != UIO_READ && vp->v_type == VREG)
484 bwillwrite();
485 iaresid = 0;
486 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
487 ioflg, active_cred, file_cred, &iaresid, td);
488 len -= chunk; /* aresid calc already includes length */
489 if (error)
490 break;
491 offset += chunk;
492 base = (char *)base + chunk;
493 uio_yield();
494 } while (len);
495 if (aresid)
496 *aresid = len + iaresid;
497 return (error);
498 }
499
500 /*
501 * File table vnode read routine.
502 */
503 static int
504 vn_read(fp, uio, active_cred, flags, td)
505 struct file *fp;
506 struct uio *uio;
507 struct ucred *active_cred;
508 struct thread *td;
509 int flags;
510 {
511 struct vnode *vp;
512 int error, ioflag;
513 struct mtx *mtxp;
514 int advice, vfslocked;
515 off_t offset, start, end;
516
517 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
518 uio->uio_td, td));
519 mtxp = NULL;
520 vp = fp->f_vnode;
521 ioflag = 0;
522 if (fp->f_flag & FNONBLOCK)
523 ioflag |= IO_NDELAY;
524 if (fp->f_flag & O_DIRECT)
525 ioflag |= IO_DIRECT;
526 advice = POSIX_FADV_NORMAL;
527 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
528 /*
529 * According to McKusick the vn lock was protecting f_offset here.
530 * It is now protected by the FOFFSET_LOCKED flag.
531 */
532 if ((flags & FOF_OFFSET) == 0 || fp->f_advice != NULL) {
533 mtxp = mtx_pool_find(mtxpool_sleep, fp);
534 mtx_lock(mtxp);
535 if ((flags & FOF_OFFSET) == 0) {
536 while (fp->f_vnread_flags & FOFFSET_LOCKED) {
537 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
538 msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
539 "vnread offlock", 0);
540 }
541 fp->f_vnread_flags |= FOFFSET_LOCKED;
542 uio->uio_offset = fp->f_offset;
543 }
544 if (fp->f_advice != NULL &&
545 uio->uio_offset >= fp->f_advice->fa_start &&
546 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
547 advice = fp->f_advice->fa_advice;
548 mtx_unlock(mtxp);
549 }
550 vn_lock(vp, LK_SHARED | LK_RETRY);
551
552 switch (advice) {
553 case POSIX_FADV_NORMAL:
554 case POSIX_FADV_SEQUENTIAL:
555 case POSIX_FADV_NOREUSE:
556 ioflag |= sequential_heuristic(uio, fp);
557 break;
558 case POSIX_FADV_RANDOM:
559 /* Disable read-ahead for random I/O. */
560 break;
561 }
562 offset = uio->uio_offset;
563
564 #ifdef MAC
565 error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
566 if (error == 0)
567 #endif
568 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
569 if ((flags & FOF_OFFSET) == 0) {
570 fp->f_offset = uio->uio_offset;
571 mtx_lock(mtxp);
572 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
573 wakeup(&fp->f_vnread_flags);
574 fp->f_vnread_flags = 0;
575 mtx_unlock(mtxp);
576 }
577 fp->f_nextoff = uio->uio_offset;
578 VOP_UNLOCK(vp, 0);
579 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
580 offset != uio->uio_offset) {
581 /*
582 * Use POSIX_FADV_DONTNEED to flush clean pages and
583 * buffers for the backing file after a
584 * POSIX_FADV_NOREUSE read(2). To optimize the common
585 * case of using POSIX_FADV_NOREUSE with sequential
586 * access, track the previous implicit DONTNEED
587 * request and grow this request to include the
588 * current read(2) in addition to the previous
589 * DONTNEED. With purely sequential access this will
590 * cause the DONTNEED requests to continously grow to
591 * cover all of the previously read regions of the
592 * file. This allows filesystem blocks that are
593 * accessed by multiple calls to read(2) to be flushed
594 * once the last read(2) finishes.
595 */
596 start = offset;
597 end = uio->uio_offset - 1;
598 mtx_lock(mtxp);
599 if (fp->f_advice != NULL &&
600 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
601 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
602 start = fp->f_advice->fa_prevstart;
603 else if (fp->f_advice->fa_prevstart != 0 &&
604 fp->f_advice->fa_prevstart == end + 1)
605 end = fp->f_advice->fa_prevend;
606 fp->f_advice->fa_prevstart = start;
607 fp->f_advice->fa_prevend = end;
608 }
609 mtx_unlock(mtxp);
610 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
611 }
612 VFS_UNLOCK_GIANT(vfslocked);
613 return (error);
614 }
615
616 /*
617 * File table vnode write routine.
618 */
619 static int
620 vn_write(fp, uio, active_cred, flags, td)
621 struct file *fp;
622 struct uio *uio;
623 struct ucred *active_cred;
624 struct thread *td;
625 int flags;
626 {
627 struct vnode *vp;
628 struct mount *mp;
629 int error, ioflag, lock_flags;
630 struct mtx *mtxp;
631 int advice, vfslocked;
632 off_t offset, start, end;
633
634 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
635 uio->uio_td, td));
636 vp = fp->f_vnode;
637 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
638 if (vp->v_type == VREG)
639 bwillwrite();
640 ioflag = IO_UNIT;
641 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
642 ioflag |= IO_APPEND;
643 if (fp->f_flag & FNONBLOCK)
644 ioflag |= IO_NDELAY;
645 if (fp->f_flag & O_DIRECT)
646 ioflag |= IO_DIRECT;
647 if ((fp->f_flag & O_FSYNC) ||
648 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
649 ioflag |= IO_SYNC;
650 mp = NULL;
651 if (vp->v_type != VCHR &&
652 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
653 goto unlock;
654
655 if ((MNT_SHARED_WRITES(mp) ||
656 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) &&
657 (flags & FOF_OFFSET) != 0) {
658 lock_flags = LK_SHARED;
659 } else {
660 lock_flags = LK_EXCLUSIVE;
661 }
662
663 vn_lock(vp, lock_flags | LK_RETRY);
664 if ((flags & FOF_OFFSET) == 0)
665 uio->uio_offset = fp->f_offset;
666 advice = POSIX_FADV_NORMAL;
667 mtxp = NULL;
668 if (fp->f_advice != NULL) {
669 mtxp = mtx_pool_find(mtxpool_sleep, fp);
670 mtx_lock(mtxp);
671 if (fp->f_advice != NULL &&
672 uio->uio_offset >= fp->f_advice->fa_start &&
673 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
674 advice = fp->f_advice->fa_advice;
675 mtx_unlock(mtxp);
676 }
677 switch (advice) {
678 case POSIX_FADV_NORMAL:
679 case POSIX_FADV_SEQUENTIAL:
680 case POSIX_FADV_NOREUSE:
681 ioflag |= sequential_heuristic(uio, fp);
682 break;
683 case POSIX_FADV_RANDOM:
684 /* XXX: Is this correct? */
685 break;
686 }
687 offset = uio->uio_offset;
688
689 #ifdef MAC
690 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
691 if (error == 0)
692 #endif
693 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
694 if ((flags & FOF_OFFSET) == 0)
695 fp->f_offset = uio->uio_offset;
696 fp->f_nextoff = uio->uio_offset;
697 VOP_UNLOCK(vp, 0);
698 if (vp->v_type != VCHR)
699 vn_finished_write(mp);
700 if (error == 0 && advice == POSIX_FADV_NOREUSE &&
701 offset != uio->uio_offset) {
702 /*
703 * Use POSIX_FADV_DONTNEED to flush clean pages and
704 * buffers for the backing file after a
705 * POSIX_FADV_NOREUSE write(2). To optimize the
706 * common case of using POSIX_FADV_NOREUSE with
707 * sequential access, track the previous implicit
708 * DONTNEED request and grow this request to include
709 * the current write(2) in addition to the previous
710 * DONTNEED. With purely sequential access this will
711 * cause the DONTNEED requests to continously grow to
712 * cover all of the previously written regions of the
713 * file.
714 *
715 * Note that the blocks just written are almost
716 * certainly still dirty, so this only works when
717 * VOP_ADVISE() calls from subsequent writes push out
718 * the data written by this write(2) once the backing
719 * buffers are clean. However, as compared to forcing
720 * IO_DIRECT, this gives much saner behavior. Write
721 * clustering is still allowed, and clean pages are
722 * merely moved to the cache page queue rather than
723 * outright thrown away. This means a subsequent
724 * read(2) can still avoid hitting the disk if the
725 * pages have not been reclaimed.
726 *
727 * This does make POSIX_FADV_NOREUSE largely useless
728 * with non-sequential access. However, sequential
729 * access is the more common use case and the flag is
730 * merely advisory.
731 */
732 start = offset;
733 end = uio->uio_offset - 1;
734 mtx_lock(mtxp);
735 if (fp->f_advice != NULL &&
736 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
737 if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
738 start = fp->f_advice->fa_prevstart;
739 else if (fp->f_advice->fa_prevstart != 0 &&
740 fp->f_advice->fa_prevstart == end + 1)
741 end = fp->f_advice->fa_prevend;
742 fp->f_advice->fa_prevstart = start;
743 fp->f_advice->fa_prevend = end;
744 }
745 mtx_unlock(mtxp);
746 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
747 }
748
749 unlock:
750 VFS_UNLOCK_GIANT(vfslocked);
751 return (error);
752 }
753
754 /*
755 * File table truncate routine.
756 */
757 static int
758 vn_truncate(fp, length, active_cred, td)
759 struct file *fp;
760 off_t length;
761 struct ucred *active_cred;
762 struct thread *td;
763 {
764 struct vattr vattr;
765 struct mount *mp;
766 struct vnode *vp;
767 int vfslocked;
768 int error;
769
770 vp = fp->f_vnode;
771 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
772 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
773 if (error) {
774 VFS_UNLOCK_GIANT(vfslocked);
775 return (error);
776 }
777 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
778 if (vp->v_type == VDIR) {
779 error = EISDIR;
780 goto out;
781 }
782 #ifdef MAC
783 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
784 if (error)
785 goto out;
786 #endif
787 error = vn_writechk(vp);
788 if (error == 0) {
789 VATTR_NULL(&vattr);
790 vattr.va_size = length;
791 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
792 }
793 out:
794 VOP_UNLOCK(vp, 0);
795 vn_finished_write(mp);
796 VFS_UNLOCK_GIANT(vfslocked);
797 return (error);
798 }
799
800 /*
801 * File table vnode stat routine.
802 */
803 static int
804 vn_statfile(fp, sb, active_cred, td)
805 struct file *fp;
806 struct stat *sb;
807 struct ucred *active_cred;
808 struct thread *td;
809 {
810 struct vnode *vp = fp->f_vnode;
811 int vfslocked;
812 int error;
813
814 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
815 vn_lock(vp, LK_SHARED | LK_RETRY);
816 error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
817 VOP_UNLOCK(vp, 0);
818 VFS_UNLOCK_GIANT(vfslocked);
819
820 return (error);
821 }
822
823 /*
824 * Stat a vnode; implementation for the stat syscall
825 */
826 int
827 vn_stat(vp, sb, active_cred, file_cred, td)
828 struct vnode *vp;
829 register struct stat *sb;
830 struct ucred *active_cred;
831 struct ucred *file_cred;
832 struct thread *td;
833 {
834 struct vattr vattr;
835 register struct vattr *vap;
836 int error;
837 u_short mode;
838
839 #ifdef MAC
840 error = mac_vnode_check_stat(active_cred, file_cred, vp);
841 if (error)
842 return (error);
843 #endif
844
845 vap = &vattr;
846
847 /*
848 * Initialize defaults for new and unusual fields, so that file
849 * systems which don't support these fields don't need to know
850 * about them.
851 */
852 vap->va_birthtime.tv_sec = -1;
853 vap->va_birthtime.tv_nsec = 0;
854 vap->va_fsid = VNOVAL;
855 vap->va_rdev = NODEV;
856
857 error = VOP_GETATTR(vp, vap, active_cred);
858 if (error)
859 return (error);
860
861 /*
862 * Zero the spare stat fields
863 */
864 bzero(sb, sizeof *sb);
865
866 /*
867 * Copy from vattr table
868 */
869 if (vap->va_fsid != VNOVAL)
870 sb->st_dev = vap->va_fsid;
871 else
872 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
873 sb->st_ino = vap->va_fileid;
874 mode = vap->va_mode;
875 switch (vap->va_type) {
876 case VREG:
877 mode |= S_IFREG;
878 break;
879 case VDIR:
880 mode |= S_IFDIR;
881 break;
882 case VBLK:
883 mode |= S_IFBLK;
884 break;
885 case VCHR:
886 mode |= S_IFCHR;
887 break;
888 case VLNK:
889 mode |= S_IFLNK;
890 break;
891 case VSOCK:
892 mode |= S_IFSOCK;
893 break;
894 case VFIFO:
895 mode |= S_IFIFO;
896 break;
897 default:
898 return (EBADF);
899 };
900 sb->st_mode = mode;
901 sb->st_nlink = vap->va_nlink;
902 sb->st_uid = vap->va_uid;
903 sb->st_gid = vap->va_gid;
904 sb->st_rdev = vap->va_rdev;
905 if (vap->va_size > OFF_MAX)
906 return (EOVERFLOW);
907 sb->st_size = vap->va_size;
908 sb->st_atimespec = vap->va_atime;
909 sb->st_mtimespec = vap->va_mtime;
910 sb->st_ctimespec = vap->va_ctime;
911 sb->st_birthtimespec = vap->va_birthtime;
912
913 /*
914 * According to www.opengroup.org, the meaning of st_blksize is
915 * "a filesystem-specific preferred I/O block size for this
916 * object. In some filesystem types, this may vary from file
917 * to file"
918 * Use miminum/default of PAGE_SIZE (e.g. for VCHR).
919 */
920
921 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
922
923 sb->st_flags = vap->va_flags;
924 if (priv_check(td, PRIV_VFS_GENERATION))
925 sb->st_gen = 0;
926 else
927 sb->st_gen = vap->va_gen;
928
929 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
930 return (0);
931 }
932
933 /*
934 * File table vnode ioctl routine.
935 */
936 static int
937 vn_ioctl(fp, com, data, active_cred, td)
938 struct file *fp;
939 u_long com;
940 void *data;
941 struct ucred *active_cred;
942 struct thread *td;
943 {
944 struct vnode *vp = fp->f_vnode;
945 struct vattr vattr;
946 int vfslocked;
947 int error;
948
949 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
950 error = ENOTTY;
951 switch (vp->v_type) {
952 case VREG:
953 case VDIR:
954 if (com == FIONREAD) {
955 vn_lock(vp, LK_SHARED | LK_RETRY);
956 error = VOP_GETATTR(vp, &vattr, active_cred);
957 VOP_UNLOCK(vp, 0);
958 if (!error)
959 *(int *)data = vattr.va_size - fp->f_offset;
960 } else if (com == FIONBIO || com == FIOASYNC) /* XXX */
961 error = 0;
962 else
963 error = VOP_IOCTL(vp, com, data, fp->f_flag,
964 active_cred, td);
965 break;
966
967 default:
968 break;
969 }
970 VFS_UNLOCK_GIANT(vfslocked);
971 return (error);
972 }
973
974 /*
975 * File table vnode poll routine.
976 */
977 static int
978 vn_poll(fp, events, active_cred, td)
979 struct file *fp;
980 int events;
981 struct ucred *active_cred;
982 struct thread *td;
983 {
984 struct vnode *vp;
985 int vfslocked;
986 int error;
987
988 vp = fp->f_vnode;
989 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
990 #ifdef MAC
991 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
992 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
993 VOP_UNLOCK(vp, 0);
994 if (!error)
995 #endif
996
997 error = VOP_POLL(vp, events, fp->f_cred, td);
998 VFS_UNLOCK_GIANT(vfslocked);
999 return (error);
1000 }
1001
1002 /*
1003 * Acquire the requested lock and then check for validity. LK_RETRY
1004 * permits vn_lock to return doomed vnodes.
1005 */
1006 int
1007 _vn_lock(struct vnode *vp, int flags, char *file, int line)
1008 {
1009 int error;
1010
1011 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1012 ("vn_lock called with no locktype."));
1013 do {
1014 #ifdef DEBUG_VFS_LOCKS
1015 KASSERT(vp->v_holdcnt != 0,
1016 ("vn_lock %p: zero hold count", vp));
1017 #endif
1018 error = VOP_LOCK1(vp, flags, file, line);
1019 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */
1020 KASSERT((flags & LK_RETRY) == 0 || error == 0,
1021 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
1022 flags, error));
1023 /*
1024 * Callers specify LK_RETRY if they wish to get dead vnodes.
1025 * If RETRY is not set, we return ENOENT instead.
1026 */
1027 if (error == 0 && vp->v_iflag & VI_DOOMED &&
1028 (flags & LK_RETRY) == 0) {
1029 VOP_UNLOCK(vp, 0);
1030 error = ENOENT;
1031 break;
1032 }
1033 } while (flags & LK_RETRY && error != 0);
1034 return (error);
1035 }
1036
1037 /*
1038 * File table vnode close routine.
1039 */
1040 static int
1041 vn_closefile(fp, td)
1042 struct file *fp;
1043 struct thread *td;
1044 {
1045 struct vnode *vp;
1046 struct flock lf;
1047 int vfslocked;
1048 int error;
1049
1050 vp = fp->f_vnode;
1051
1052 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1053 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
1054 lf.l_whence = SEEK_SET;
1055 lf.l_start = 0;
1056 lf.l_len = 0;
1057 lf.l_type = F_UNLCK;
1058 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1059 }
1060
1061 fp->f_ops = &badfileops;
1062
1063 error = vn_close(vp, fp->f_flag, fp->f_cred, td);
1064 VFS_UNLOCK_GIANT(vfslocked);
1065 return (error);
1066 }
1067
1068 /*
1069 * Preparing to start a filesystem write operation. If the operation is
1070 * permitted, then we bump the count of operations in progress and
1071 * proceed. If a suspend request is in progress, we wait until the
1072 * suspension is over, and then proceed.
1073 */
1074 int
1075 vn_start_write(vp, mpp, flags)
1076 struct vnode *vp;
1077 struct mount **mpp;
1078 int flags;
1079 {
1080 struct mount *mp;
1081 int error;
1082
1083 error = 0;
1084 /*
1085 * If a vnode is provided, get and return the mount point that
1086 * to which it will write.
1087 */
1088 if (vp != NULL) {
1089 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1090 *mpp = NULL;
1091 if (error != EOPNOTSUPP)
1092 return (error);
1093 return (0);
1094 }
1095 }
1096 if ((mp = *mpp) == NULL)
1097 return (0);
1098
1099 /*
1100 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1101 * a vfs_ref().
1102 * As long as a vnode is not provided we need to acquire a
1103 * refcount for the provided mountpoint too, in order to
1104 * emulate a vfs_ref().
1105 */
1106 MNT_ILOCK(mp);
1107 if (vp == NULL)
1108 MNT_REF(mp);
1109
1110 /*
1111 * Check on status of suspension.
1112 */
1113 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
1114 mp->mnt_susp_owner != curthread) {
1115 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1116 if (flags & V_NOWAIT) {
1117 error = EWOULDBLOCK;
1118 goto unlock;
1119 }
1120 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1121 (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
1122 if (error)
1123 goto unlock;
1124 }
1125 }
1126 if (flags & V_XSLEEP)
1127 goto unlock;
1128 mp->mnt_writeopcount++;
1129 unlock:
1130 if (error != 0 || (flags & V_XSLEEP) != 0)
1131 MNT_REL(mp);
1132 MNT_IUNLOCK(mp);
1133 return (error);
1134 }
1135
1136 /*
1137 * Secondary suspension. Used by operations such as vop_inactive
1138 * routines that are needed by the higher level functions. These
1139 * are allowed to proceed until all the higher level functions have
1140 * completed (indicated by mnt_writeopcount dropping to zero). At that
1141 * time, these operations are halted until the suspension is over.
1142 */
1143 int
1144 vn_start_secondary_write(vp, mpp, flags)
1145 struct vnode *vp;
1146 struct mount **mpp;
1147 int flags;
1148 {
1149 struct mount *mp;
1150 int error;
1151
1152 retry:
1153 if (vp != NULL) {
1154 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1155 *mpp = NULL;
1156 if (error != EOPNOTSUPP)
1157 return (error);
1158 return (0);
1159 }
1160 }
1161 /*
1162 * If we are not suspended or have not yet reached suspended
1163 * mode, then let the operation proceed.
1164 */
1165 if ((mp = *mpp) == NULL)
1166 return (0);
1167
1168 /*
1169 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1170 * a vfs_ref().
1171 * As long as a vnode is not provided we need to acquire a
1172 * refcount for the provided mountpoint too, in order to
1173 * emulate a vfs_ref().
1174 */
1175 MNT_ILOCK(mp);
1176 if (vp == NULL)
1177 MNT_REF(mp);
1178 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1179 mp->mnt_secondary_writes++;
1180 mp->mnt_secondary_accwrites++;
1181 MNT_IUNLOCK(mp);
1182 return (0);
1183 }
1184 if (flags & V_NOWAIT) {
1185 MNT_REL(mp);
1186 MNT_IUNLOCK(mp);
1187 return (EWOULDBLOCK);
1188 }
1189 /*
1190 * Wait for the suspension to finish.
1191 */
1192 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1193 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1194 vfs_rel(mp);
1195 if (error == 0)
1196 goto retry;
1197 return (error);
1198 }
1199
1200 /*
1201 * Filesystem write operation has completed. If we are suspending and this
1202 * operation is the last one, notify the suspender that the suspension is
1203 * now in effect.
1204 */
1205 void
1206 vn_finished_write(mp)
1207 struct mount *mp;
1208 {
1209 if (mp == NULL)
1210 return;
1211 MNT_ILOCK(mp);
1212 MNT_REL(mp);
1213 mp->mnt_writeopcount--;
1214 if (mp->mnt_writeopcount < 0)
1215 panic("vn_finished_write: neg cnt");
1216 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1217 mp->mnt_writeopcount <= 0)
1218 wakeup(&mp->mnt_writeopcount);
1219 MNT_IUNLOCK(mp);
1220 }
1221
1222
1223 /*
1224 * Filesystem secondary write operation has completed. If we are
1225 * suspending and this operation is the last one, notify the suspender
1226 * that the suspension is now in effect.
1227 */
1228 void
1229 vn_finished_secondary_write(mp)
1230 struct mount *mp;
1231 {
1232 if (mp == NULL)
1233 return;
1234 MNT_ILOCK(mp);
1235 MNT_REL(mp);
1236 mp->mnt_secondary_writes--;
1237 if (mp->mnt_secondary_writes < 0)
1238 panic("vn_finished_secondary_write: neg cnt");
1239 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1240 mp->mnt_secondary_writes <= 0)
1241 wakeup(&mp->mnt_secondary_writes);
1242 MNT_IUNLOCK(mp);
1243 }
1244
1245
1246
1247 /*
1248 * Request a filesystem to suspend write operations.
1249 */
1250 int
1251 vfs_write_suspend(mp)
1252 struct mount *mp;
1253 {
1254 int error;
1255
1256 MNT_ILOCK(mp);
1257 if (mp->mnt_susp_owner == curthread) {
1258 MNT_IUNLOCK(mp);
1259 return (EALREADY);
1260 }
1261 while (mp->mnt_kern_flag & MNTK_SUSPEND)
1262 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
1263 mp->mnt_kern_flag |= MNTK_SUSPEND;
1264 mp->mnt_susp_owner = curthread;
1265 if (mp->mnt_writeopcount > 0)
1266 (void) msleep(&mp->mnt_writeopcount,
1267 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1268 else
1269 MNT_IUNLOCK(mp);
1270 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
1271 vfs_write_resume(mp);
1272 return (error);
1273 }
1274
1275 /*
1276 * Request a filesystem to resume write operations.
1277 */
1278 void
1279 vfs_write_resume(mp)
1280 struct mount *mp;
1281 {
1282
1283 MNT_ILOCK(mp);
1284 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1285 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
1286 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1287 MNTK_SUSPENDED);
1288 mp->mnt_susp_owner = NULL;
1289 wakeup(&mp->mnt_writeopcount);
1290 wakeup(&mp->mnt_flag);
1291 curthread->td_pflags &= ~TDP_IGNSUSP;
1292 MNT_IUNLOCK(mp);
1293 VFS_SUSP_CLEAN(mp);
1294 } else
1295 MNT_IUNLOCK(mp);
1296 }
1297
1298 /*
1299 * Implement kqueues for files by translating it to vnode operation.
1300 */
1301 static int
1302 vn_kqfilter(struct file *fp, struct knote *kn)
1303 {
1304 int vfslocked;
1305 int error;
1306
1307 vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1308 error = VOP_KQFILTER(fp->f_vnode, kn);
1309 VFS_UNLOCK_GIANT(vfslocked);
1310
1311 return error;
1312 }
1313
1314 /*
1315 * Simplified in-kernel wrapper calls for extended attribute access.
1316 * Both calls pass in a NULL credential, authorizing as "kernel" access.
1317 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1318 */
1319 int
1320 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1321 const char *attrname, int *buflen, char *buf, struct thread *td)
1322 {
1323 struct uio auio;
1324 struct iovec iov;
1325 int error;
1326
1327 iov.iov_len = *buflen;
1328 iov.iov_base = buf;
1329
1330 auio.uio_iov = &iov;
1331 auio.uio_iovcnt = 1;
1332 auio.uio_rw = UIO_READ;
1333 auio.uio_segflg = UIO_SYSSPACE;
1334 auio.uio_td = td;
1335 auio.uio_offset = 0;
1336 auio.uio_resid = *buflen;
1337
1338 if ((ioflg & IO_NODELOCKED) == 0)
1339 vn_lock(vp, LK_SHARED | LK_RETRY);
1340
1341 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1342
1343 /* authorize attribute retrieval as kernel */
1344 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1345 td);
1346
1347 if ((ioflg & IO_NODELOCKED) == 0)
1348 VOP_UNLOCK(vp, 0);
1349
1350 if (error == 0) {
1351 *buflen = *buflen - auio.uio_resid;
1352 }
1353
1354 return (error);
1355 }
1356
1357 /*
1358 * XXX failure mode if partially written?
1359 */
1360 int
1361 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1362 const char *attrname, int buflen, char *buf, struct thread *td)
1363 {
1364 struct uio auio;
1365 struct iovec iov;
1366 struct mount *mp;
1367 int error;
1368
1369 iov.iov_len = buflen;
1370 iov.iov_base = buf;
1371
1372 auio.uio_iov = &iov;
1373 auio.uio_iovcnt = 1;
1374 auio.uio_rw = UIO_WRITE;
1375 auio.uio_segflg = UIO_SYSSPACE;
1376 auio.uio_td = td;
1377 auio.uio_offset = 0;
1378 auio.uio_resid = buflen;
1379
1380 if ((ioflg & IO_NODELOCKED) == 0) {
1381 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1382 return (error);
1383 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1384 }
1385
1386 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1387
1388 /* authorize attribute setting as kernel */
1389 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1390
1391 if ((ioflg & IO_NODELOCKED) == 0) {
1392 vn_finished_write(mp);
1393 VOP_UNLOCK(vp, 0);
1394 }
1395
1396 return (error);
1397 }
1398
1399 int
1400 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1401 const char *attrname, struct thread *td)
1402 {
1403 struct mount *mp;
1404 int error;
1405
1406 if ((ioflg & IO_NODELOCKED) == 0) {
1407 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1408 return (error);
1409 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1410 }
1411
1412 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1413
1414 /* authorize attribute removal as kernel */
1415 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1416 if (error == EOPNOTSUPP)
1417 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1418 NULL, td);
1419
1420 if ((ioflg & IO_NODELOCKED) == 0) {
1421 vn_finished_write(mp);
1422 VOP_UNLOCK(vp, 0);
1423 }
1424
1425 return (error);
1426 }
1427
1428 int
1429 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
1430 {
1431 struct mount *mp;
1432 int ltype, error;
1433
1434 mp = vp->v_mount;
1435 ltype = VOP_ISLOCKED(vp);
1436 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
1437 ("vn_vget_ino: vp not locked"));
1438 error = vfs_busy(mp, MBF_NOWAIT);
1439 if (error != 0) {
1440 vfs_ref(mp);
1441 VOP_UNLOCK(vp, 0);
1442 error = vfs_busy(mp, 0);
1443 vn_lock(vp, ltype | LK_RETRY);
1444 vfs_rel(mp);
1445 if (error != 0)
1446 return (ENOENT);
1447 if (vp->v_iflag & VI_DOOMED) {
1448 vfs_unbusy(mp);
1449 return (ENOENT);
1450 }
1451 }
1452 VOP_UNLOCK(vp, 0);
1453 error = VFS_VGET(mp, ino, lkflags, rvp);
1454 vfs_unbusy(mp);
1455 vn_lock(vp, ltype | LK_RETRY);
1456 if (vp->v_iflag & VI_DOOMED) {
1457 if (error == 0)
1458 vput(*rvp);
1459 error = ENOENT;
1460 }
1461 return (error);
1462 }
1463
1464 int
1465 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, const struct thread *td)
1466 {
1467 if (vp->v_type != VREG || td == NULL)
1468 return (0);
1469
1470 PROC_LOCK(td->td_proc);
1471 if (uio->uio_offset + uio->uio_resid >
1472 lim_cur(td->td_proc, RLIMIT_FSIZE)) {
1473 psignal(td->td_proc, SIGXFSZ);
1474 PROC_UNLOCK(td->td_proc);
1475 return (EFBIG);
1476 }
1477 PROC_UNLOCK(td->td_proc);
1478
1479 return (0);
1480 }
1481
1482 void
1483 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
1484 {
1485 vm_object_t object;
1486
1487 if ((object = vp->v_object) == NULL)
1488 return;
1489 VM_OBJECT_LOCK(object);
1490 vm_object_page_remove(object, start, end, 0);
1491 VM_OBJECT_UNLOCK(object);
1492 }
1493
1494 int
1495 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
1496 {
1497 struct vattr va;
1498 daddr_t bn, bnp;
1499 uint64_t bsize;
1500 off_t noff;
1501 int error;
1502
1503 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
1504 ("Wrong command %lu", cmd));
1505
1506 if (vn_lock(vp, LK_SHARED) != 0)
1507 return (EBADF);
1508 if (vp->v_type != VREG) {
1509 error = ENOTTY;
1510 goto unlock;
1511 }
1512 error = VOP_GETATTR(vp, &va, cred);
1513 if (error != 0)
1514 goto unlock;
1515 noff = *off;
1516 if (noff >= va.va_size) {
1517 error = ENXIO;
1518 goto unlock;
1519 }
1520 bsize = vp->v_mount->mnt_stat.f_iosize;
1521 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) {
1522 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
1523 if (error == EOPNOTSUPP) {
1524 error = ENOTTY;
1525 goto unlock;
1526 }
1527 if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
1528 (bnp != -1 && cmd == FIOSEEKDATA)) {
1529 noff = bn * bsize;
1530 if (noff < *off)
1531 noff = *off;
1532 goto unlock;
1533 }
1534 }
1535 if (noff > va.va_size)
1536 noff = va.va_size;
1537 /* noff == va.va_size. There is an implicit hole at the end of file. */
1538 if (cmd == FIOSEEKDATA)
1539 error = ENXIO;
1540 unlock:
1541 VOP_UNLOCK(vp, 0);
1542 if (error == 0)
1543 *off = noff;
1544 return (error);
1545 }
Cache object: 22d89e92c3a26fea1b575f1640d51936
|