FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_vnops.c
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/5.4/sys/kern/vfs_vnops.c 145335 2005-04-20 19:11:07Z cvs2svn $");
39
40 #include "opt_mac.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/proc.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/mac.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
64
65 static fo_rdwr_t vn_read;
66 static fo_rdwr_t vn_write;
67 static fo_ioctl_t vn_ioctl;
68 static fo_poll_t vn_poll;
69 static fo_kqfilter_t vn_kqfilter;
70 static fo_stat_t vn_statfile;
71 static fo_close_t vn_closefile;
72
73 struct fileops vnops = {
74 .fo_read = vn_read,
75 .fo_write = vn_write,
76 .fo_ioctl = vn_ioctl,
77 .fo_poll = vn_poll,
78 .fo_kqfilter = vn_kqfilter,
79 .fo_stat = vn_statfile,
80 .fo_close = vn_closefile,
81 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
82 };
83
84 int
85 vn_open(ndp, flagp, cmode, fdidx)
86 struct nameidata *ndp;
87 int *flagp, cmode, fdidx;
88 {
89 struct thread *td = ndp->ni_cnd.cn_thread;
90
91 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fdidx));
92 }
93
94 /*
95 * Common code for vnode open operations.
96 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
97 *
98 * Note that this does NOT free nameidata for the successful case,
99 * due to the NDINIT being done elsewhere.
100 */
101 int
102 vn_open_cred(ndp, flagp, cmode, cred, fdidx)
103 struct nameidata *ndp;
104 int *flagp, cmode;
105 struct ucred *cred;
106 int fdidx;
107 {
108 struct vnode *vp;
109 struct mount *mp;
110 struct thread *td = ndp->ni_cnd.cn_thread;
111 struct vattr vat;
112 struct vattr *vap = &vat;
113 int mode, fmode, error;
114 #ifdef LOOKUP_SHARED
115 int exclusive; /* The current intended lock state */
116
117 exclusive = 0;
118 #endif
119
120 GIANT_REQUIRED;
121
122 restart:
123 fmode = *flagp;
124 if (fmode & O_CREAT) {
125 ndp->ni_cnd.cn_nameiop = CREATE;
126 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
127 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
128 ndp->ni_cnd.cn_flags |= FOLLOW;
129 bwillwrite();
130 if ((error = namei(ndp)) != 0)
131 return (error);
132 if (ndp->ni_vp == NULL) {
133 VATTR_NULL(vap);
134 vap->va_type = VREG;
135 vap->va_mode = cmode;
136 if (fmode & O_EXCL)
137 vap->va_vaflags |= VA_EXCLUSIVE;
138 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
139 NDFREE(ndp, NDF_ONLY_PNBUF);
140 vput(ndp->ni_dvp);
141 if ((error = vn_start_write(NULL, &mp,
142 V_XSLEEP | PCATCH)) != 0)
143 return (error);
144 goto restart;
145 }
146 #ifdef MAC
147 error = mac_check_vnode_create(cred, ndp->ni_dvp,
148 &ndp->ni_cnd, vap);
149 if (error == 0) {
150 #endif
151 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
152 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
153 &ndp->ni_cnd, vap);
154 #ifdef MAC
155 }
156 #endif
157 vput(ndp->ni_dvp);
158 vn_finished_write(mp);
159 if (error) {
160 NDFREE(ndp, NDF_ONLY_PNBUF);
161 return (error);
162 }
163 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
164 ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
165 fmode &= ~O_TRUNC;
166 vp = ndp->ni_vp;
167 #ifdef LOOKUP_SHARED
168 exclusive = 1;
169 #endif
170 } else {
171 if (ndp->ni_dvp == ndp->ni_vp)
172 vrele(ndp->ni_dvp);
173 else
174 vput(ndp->ni_dvp);
175 ndp->ni_dvp = NULL;
176 vp = ndp->ni_vp;
177 if (fmode & O_EXCL) {
178 error = EEXIST;
179 goto bad;
180 }
181 fmode &= ~O_CREAT;
182 }
183 } else {
184 ndp->ni_cnd.cn_nameiop = LOOKUP;
185 #ifdef LOOKUP_SHARED
186 ndp->ni_cnd.cn_flags =
187 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
188 LOCKSHARED | LOCKLEAF;
189 #else
190 ndp->ni_cnd.cn_flags =
191 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
192 #endif
193 if ((error = namei(ndp)) != 0)
194 return (error);
195 vp = ndp->ni_vp;
196 }
197 if (vp->v_type == VLNK) {
198 error = EMLINK;
199 goto bad;
200 }
201 if (vp->v_type == VSOCK) {
202 error = EOPNOTSUPP;
203 goto bad;
204 }
205 mode = 0;
206 if (fmode & (FWRITE | O_TRUNC)) {
207 if (vp->v_type == VDIR) {
208 error = EISDIR;
209 goto bad;
210 }
211 mode |= VWRITE;
212 }
213 if (fmode & FREAD)
214 mode |= VREAD;
215 if (fmode & O_APPEND)
216 mode |= VAPPEND;
217 #ifdef MAC
218 error = mac_check_vnode_open(cred, vp, mode);
219 if (error)
220 goto bad;
221 #endif
222 if ((fmode & O_CREAT) == 0) {
223 if (mode & VWRITE) {
224 error = vn_writechk(vp);
225 if (error)
226 goto bad;
227 }
228 if (mode) {
229 error = VOP_ACCESS(vp, mode, cred, td);
230 if (error)
231 goto bad;
232 }
233 }
234 if ((error = VOP_GETATTR(vp, vap, cred, td)) == 0) {
235 vp->v_cachedfs = vap->va_fsid;
236 vp->v_cachedid = vap->va_fileid;
237 }
238 if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
239 goto bad;
240 /*
241 * Make sure that a VM object is created for VMIO support.
242 */
243 if (vn_canvmio(vp) == TRUE) {
244 #ifdef LOOKUP_SHARED
245 int flock;
246
247 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
248 VOP_LOCK(vp, LK_UPGRADE, td);
249 /*
250 * In cases where the object is marked as dead object_create
251 * will unlock and relock exclusive. It is safe to call in
252 * here with a shared lock because we only examine fields that
253 * the shared lock guarantees will be stable. In the UPGRADE
254 * case it is not likely that anyone has used this vnode yet
255 * so there will be no contention. The logic after this call
256 * restores the requested locking state.
257 */
258 #endif
259 if ((error = vfs_object_create(vp, td, cred)) != 0) {
260 VOP_UNLOCK(vp, 0, td);
261 VOP_CLOSE(vp, fmode, cred, td);
262 NDFREE(ndp, NDF_ONLY_PNBUF);
263 vrele(vp);
264 *flagp = fmode;
265 return (error);
266 }
267 #ifdef LOOKUP_SHARED
268 flock = VOP_ISLOCKED(vp, td);
269 if (!exclusive && flock == LK_EXCLUSIVE)
270 VOP_LOCK(vp, LK_DOWNGRADE, td);
271 #endif
272 }
273
274 if (fmode & FWRITE)
275 vp->v_writecount++;
276 *flagp = fmode;
277 ASSERT_VOP_LOCKED(vp, "vn_open_cred");
278 return (0);
279 bad:
280 NDFREE(ndp, NDF_ONLY_PNBUF);
281 vput(vp);
282 *flagp = fmode;
283 ndp->ni_vp = NULL;
284 return (error);
285 }
286
287 /*
288 * Check for write permissions on the specified vnode.
289 * Prototype text segments cannot be written.
290 */
291 int
292 vn_writechk(vp)
293 register struct vnode *vp;
294 {
295
296 ASSERT_VOP_LOCKED(vp, "vn_writechk");
297 /*
298 * If there's shared text associated with
299 * the vnode, try to free it up once. If
300 * we fail, we can't allow writing.
301 */
302 if (vp->v_vflag & VV_TEXT)
303 return (ETXTBSY);
304
305 return (0);
306 }
307
308 /*
309 * Vnode close call
310 */
311 int
312 vn_close(vp, flags, file_cred, td)
313 register struct vnode *vp;
314 int flags;
315 struct ucred *file_cred;
316 struct thread *td;
317 {
318 int error;
319
320 GIANT_REQUIRED;
321
322 if (flags & FWRITE)
323 vp->v_writecount--;
324 error = VOP_CLOSE(vp, flags, file_cred, td);
325 /*
326 * XXX - In certain instances VOP_CLOSE has to do the vrele
327 * itself. If the vrele has been done, it will return EAGAIN
328 * to indicate that the vrele should not be done again. When
329 * this happens, we just return success. The correct thing to
330 * do would be to have all VOP_CLOSE instances do the vrele.
331 */
332 if (error == EAGAIN)
333 return (0);
334 vrele(vp);
335 return (error);
336 }
337
338 /*
339 * Sequential heuristic - detect sequential operation
340 */
341 static __inline
342 int
343 sequential_heuristic(struct uio *uio, struct file *fp)
344 {
345
346 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
347 uio->uio_offset == fp->f_nextoff) {
348 /*
349 * XXX we assume that the filesystem block size is
350 * the default. Not true, but still gives us a pretty
351 * good indicator of how sequential the read operations
352 * are.
353 */
354 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
355 if (fp->f_seqcount > IO_SEQMAX)
356 fp->f_seqcount = IO_SEQMAX;
357 return(fp->f_seqcount << IO_SEQSHIFT);
358 }
359
360 /*
361 * Not sequential, quick draw-down of seqcount
362 */
363 if (fp->f_seqcount > 1)
364 fp->f_seqcount = 1;
365 else
366 fp->f_seqcount = 0;
367 return(0);
368 }
369
370 /*
371 * Package up an I/O request on a vnode into a uio and do it.
372 */
373 int
374 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
375 aresid, td)
376 enum uio_rw rw;
377 struct vnode *vp;
378 caddr_t base;
379 int len;
380 off_t offset;
381 enum uio_seg segflg;
382 int ioflg;
383 struct ucred *active_cred;
384 struct ucred *file_cred;
385 int *aresid;
386 struct thread *td;
387 {
388 struct uio auio;
389 struct iovec aiov;
390 struct mount *mp;
391 struct ucred *cred;
392 int error;
393
394 GIANT_REQUIRED;
395
396 if ((ioflg & IO_NODELOCKED) == 0) {
397 mp = NULL;
398 if (rw == UIO_WRITE) {
399 if (vp->v_type != VCHR &&
400 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
401 != 0)
402 return (error);
403 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
404 } else {
405 /*
406 * XXX This should be LK_SHARED but I don't trust VFS
407 * enough to leave it like that until it has been
408 * reviewed further.
409 */
410 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
411 }
412
413 }
414 auio.uio_iov = &aiov;
415 auio.uio_iovcnt = 1;
416 aiov.iov_base = base;
417 aiov.iov_len = len;
418 auio.uio_resid = len;
419 auio.uio_offset = offset;
420 auio.uio_segflg = segflg;
421 auio.uio_rw = rw;
422 auio.uio_td = td;
423 error = 0;
424 #ifdef MAC
425 if ((ioflg & IO_NOMACCHECK) == 0) {
426 if (rw == UIO_READ)
427 error = mac_check_vnode_read(active_cred, file_cred,
428 vp);
429 else
430 error = mac_check_vnode_write(active_cred, file_cred,
431 vp);
432 }
433 #endif
434 if (error == 0) {
435 if (file_cred)
436 cred = file_cred;
437 else
438 cred = active_cred;
439 if (rw == UIO_READ)
440 error = VOP_READ(vp, &auio, ioflg, cred);
441 else
442 error = VOP_WRITE(vp, &auio, ioflg, cred);
443 }
444 if (aresid)
445 *aresid = auio.uio_resid;
446 else
447 if (auio.uio_resid && error == 0)
448 error = EIO;
449 if ((ioflg & IO_NODELOCKED) == 0) {
450 if (rw == UIO_WRITE)
451 vn_finished_write(mp);
452 VOP_UNLOCK(vp, 0, td);
453 }
454 return (error);
455 }
456
457 /*
458 * Package up an I/O request on a vnode into a uio and do it. The I/O
459 * request is split up into smaller chunks and we try to avoid saturating
460 * the buffer cache while potentially holding a vnode locked, so we
461 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
462 * to give other processes a chance to lock the vnode (either other processes
463 * core'ing the same binary, or unrelated processes scanning the directory).
464 */
465 int
466 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
467 file_cred, aresid, td)
468 enum uio_rw rw;
469 struct vnode *vp;
470 caddr_t base;
471 size_t len;
472 off_t offset;
473 enum uio_seg segflg;
474 int ioflg;
475 struct ucred *active_cred;
476 struct ucred *file_cred;
477 size_t *aresid;
478 struct thread *td;
479 {
480 int error = 0;
481 int iaresid;
482
483 GIANT_REQUIRED;
484
485 do {
486 int chunk;
487
488 /*
489 * Force `offset' to a multiple of MAXBSIZE except possibly
490 * for the first chunk, so that filesystems only need to
491 * write full blocks except possibly for the first and last
492 * chunks.
493 */
494 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
495
496 if (chunk > len)
497 chunk = len;
498 if (rw != UIO_READ && vp->v_type == VREG)
499 bwillwrite();
500 iaresid = 0;
501 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
502 ioflg, active_cred, file_cred, &iaresid, td);
503 len -= chunk; /* aresid calc already includes length */
504 if (error)
505 break;
506 offset += chunk;
507 base += chunk;
508 uio_yield();
509 } while (len);
510 if (aresid)
511 *aresid = len + iaresid;
512 return (error);
513 }
514
515 /*
516 * File table vnode read routine.
517 */
518 static int
519 vn_read(fp, uio, active_cred, flags, td)
520 struct file *fp;
521 struct uio *uio;
522 struct ucred *active_cred;
523 struct thread *td;
524 int flags;
525 {
526 struct vnode *vp;
527 int error, ioflag;
528
529 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
530 uio->uio_td, td));
531 vp = fp->f_vnode;
532 ioflag = 0;
533 if (fp->f_flag & FNONBLOCK)
534 ioflag |= IO_NDELAY;
535 if (fp->f_flag & O_DIRECT)
536 ioflag |= IO_DIRECT;
537 mtx_lock(&Giant);
538 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
539 /*
540 * According to McKusick the vn lock is protecting f_offset here.
541 * Once this field has it's own lock we can acquire this shared.
542 */
543 if ((flags & FOF_OFFSET) == 0) {
544 vn_lock(vp, LK_EXCLUSIVE | LK_NOPAUSE | LK_RETRY, td);
545 uio->uio_offset = fp->f_offset;
546 } else
547 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
548
549 ioflag |= sequential_heuristic(uio, fp);
550
551 #ifdef MAC
552 error = mac_check_vnode_read(active_cred, fp->f_cred, vp);
553 if (error == 0)
554 #endif
555 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
556 if ((flags & FOF_OFFSET) == 0)
557 fp->f_offset = uio->uio_offset;
558 fp->f_nextoff = uio->uio_offset;
559 VOP_UNLOCK(vp, 0, td);
560 mtx_unlock(&Giant);
561 return (error);
562 }
563
564 /*
565 * File table vnode write routine.
566 */
567 static int
568 vn_write(fp, uio, active_cred, flags, td)
569 struct file *fp;
570 struct uio *uio;
571 struct ucred *active_cred;
572 struct thread *td;
573 int flags;
574 {
575 struct vnode *vp;
576 struct mount *mp;
577 int error, ioflag;
578
579 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
580 uio->uio_td, td));
581 vp = fp->f_vnode;
582 mtx_lock(&Giant);
583 if (vp->v_type == VREG)
584 bwillwrite();
585 ioflag = IO_UNIT;
586 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
587 ioflag |= IO_APPEND;
588 if (fp->f_flag & FNONBLOCK)
589 ioflag |= IO_NDELAY;
590 if (fp->f_flag & O_DIRECT)
591 ioflag |= IO_DIRECT;
592 if ((fp->f_flag & O_FSYNC) ||
593 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
594 ioflag |= IO_SYNC;
595 mp = NULL;
596 if (vp->v_type != VCHR &&
597 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) {
598 mtx_unlock(&Giant);
599 return (error);
600 }
601 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
602 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
603 if ((flags & FOF_OFFSET) == 0)
604 uio->uio_offset = fp->f_offset;
605 ioflag |= sequential_heuristic(uio, fp);
606 #ifdef MAC
607 error = mac_check_vnode_write(active_cred, fp->f_cred, vp);
608 if (error == 0)
609 #endif
610 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
611 if ((flags & FOF_OFFSET) == 0)
612 fp->f_offset = uio->uio_offset;
613 fp->f_nextoff = uio->uio_offset;
614 VOP_UNLOCK(vp, 0, td);
615 vn_finished_write(mp);
616 mtx_unlock(&Giant);
617 return (error);
618 }
619
620 /*
621 * File table vnode stat routine.
622 */
623 static int
624 vn_statfile(fp, sb, active_cred, td)
625 struct file *fp;
626 struct stat *sb;
627 struct ucred *active_cred;
628 struct thread *td;
629 {
630 struct vnode *vp = fp->f_vnode;
631 int error;
632
633 mtx_lock(&Giant);
634 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
635 error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
636 VOP_UNLOCK(vp, 0, td);
637 mtx_unlock(&Giant);
638
639 return (error);
640 }
641
642 /*
643 * Stat a vnode; implementation for the stat syscall
644 */
645 int
646 vn_stat(vp, sb, active_cred, file_cred, td)
647 struct vnode *vp;
648 register struct stat *sb;
649 struct ucred *active_cred;
650 struct ucred *file_cred;
651 struct thread *td;
652 {
653 struct vattr vattr;
654 register struct vattr *vap;
655 int error;
656 u_short mode;
657
658 GIANT_REQUIRED;
659
660 #ifdef MAC
661 error = mac_check_vnode_stat(active_cred, file_cred, vp);
662 if (error)
663 return (error);
664 #endif
665
666 vap = &vattr;
667 error = VOP_GETATTR(vp, vap, active_cred, td);
668 if (error)
669 return (error);
670
671 vp->v_cachedfs = vap->va_fsid;
672 vp->v_cachedid = vap->va_fileid;
673
674 /*
675 * Zero the spare stat fields
676 */
677 bzero(sb, sizeof *sb);
678
679 /*
680 * Copy from vattr table
681 */
682 if (vap->va_fsid != VNOVAL)
683 sb->st_dev = vap->va_fsid;
684 else
685 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
686 sb->st_ino = vap->va_fileid;
687 mode = vap->va_mode;
688 switch (vap->va_type) {
689 case VREG:
690 mode |= S_IFREG;
691 break;
692 case VDIR:
693 mode |= S_IFDIR;
694 break;
695 case VBLK:
696 mode |= S_IFBLK;
697 break;
698 case VCHR:
699 mode |= S_IFCHR;
700 break;
701 case VLNK:
702 mode |= S_IFLNK;
703 /* This is a cosmetic change, symlinks do not have a mode. */
704 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
705 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
706 else
707 sb->st_mode |= ACCESSPERMS; /* 0777 */
708 break;
709 case VSOCK:
710 mode |= S_IFSOCK;
711 break;
712 case VFIFO:
713 mode |= S_IFIFO;
714 break;
715 default:
716 return (EBADF);
717 };
718 sb->st_mode = mode;
719 sb->st_nlink = vap->va_nlink;
720 sb->st_uid = vap->va_uid;
721 sb->st_gid = vap->va_gid;
722 sb->st_rdev = vap->va_rdev;
723 if (vap->va_size > OFF_MAX)
724 return (EOVERFLOW);
725 sb->st_size = vap->va_size;
726 sb->st_atimespec = vap->va_atime;
727 sb->st_mtimespec = vap->va_mtime;
728 sb->st_ctimespec = vap->va_ctime;
729 sb->st_birthtimespec = vap->va_birthtime;
730
731 /*
732 * According to www.opengroup.org, the meaning of st_blksize is
733 * "a filesystem-specific preferred I/O block size for this
734 * object. In some filesystem types, this may vary from file
735 * to file"
736 * Default to PAGE_SIZE after much discussion.
737 */
738
739 if (vap->va_type == VREG) {
740 sb->st_blksize = vap->va_blocksize;
741 } else if (vn_isdisk(vp, NULL)) {
742 sb->st_blksize = vp->v_rdev->si_bsize_best;
743 if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
744 sb->st_blksize = vp->v_rdev->si_bsize_phys;
745 if (sb->st_blksize < BLKDEV_IOSIZE)
746 sb->st_blksize = BLKDEV_IOSIZE;
747 } else {
748 sb->st_blksize = PAGE_SIZE;
749 }
750
751 sb->st_flags = vap->va_flags;
752 if (suser(td))
753 sb->st_gen = 0;
754 else
755 sb->st_gen = vap->va_gen;
756
757 #if (S_BLKSIZE == 512)
758 /* Optimize this case */
759 sb->st_blocks = vap->va_bytes >> 9;
760 #else
761 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
762 #endif
763 return (0);
764 }
765
766 /*
767 * File table vnode ioctl routine.
768 */
769 static int
770 vn_ioctl(fp, com, data, active_cred, td)
771 struct file *fp;
772 u_long com;
773 void *data;
774 struct ucred *active_cred;
775 struct thread *td;
776 {
777 struct vnode *vp = fp->f_vnode;
778 struct vattr vattr;
779 int error;
780
781 mtx_lock(&Giant);
782 error = ENOTTY;
783 switch (vp->v_type) {
784 case VREG:
785 case VDIR:
786 if (com == FIONREAD) {
787 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
788 error = VOP_GETATTR(vp, &vattr, active_cred, td);
789 VOP_UNLOCK(vp, 0, td);
790 if (!error)
791 *(int *)data = vattr.va_size - fp->f_offset;
792 }
793 if (com == FIONBIO || com == FIOASYNC) /* XXX */
794 error = 0;
795 else
796 error = VOP_IOCTL(vp, com, data, fp->f_flag,
797 active_cred, td);
798 break;
799
800 default:
801 #if 0
802 break;
803 #endif
804 case VFIFO:
805 case VCHR:
806 case VBLK:
807 if (com == FIODTYPE) {
808 if (vp->v_type != VCHR && vp->v_type != VBLK)
809 break;
810 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
811 error = 0;
812 break;
813 }
814 error = VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td);
815 if (error == ENOIOCTL) {
816 #ifdef DIAGNOSTIC
817 kdb_enter("ENOIOCTL leaked through");
818 #endif
819 error = ENOTTY;
820 }
821 if (error == 0 && com == TIOCSCTTY) {
822 struct vnode *vpold;
823
824 /* Do nothing if reassigning same control tty */
825 sx_slock(&proctree_lock);
826 if (td->td_proc->p_session->s_ttyvp == vp) {
827 sx_sunlock(&proctree_lock);
828 error = 0;
829 break;
830 }
831
832 vpold = td->td_proc->p_session->s_ttyvp;
833 VREF(vp);
834 SESS_LOCK(td->td_proc->p_session);
835 td->td_proc->p_session->s_ttyvp = vp;
836 SESS_UNLOCK(td->td_proc->p_session);
837
838 sx_sunlock(&proctree_lock);
839
840 /* Get rid of reference to old control tty */
841 if (vpold)
842 vrele(vpold);
843 }
844 break;
845 }
846 mtx_unlock(&Giant);
847 return (error);
848 }
849
850 /*
851 * File table vnode poll routine.
852 */
853 static int
854 vn_poll(fp, events, active_cred, td)
855 struct file *fp;
856 int events;
857 struct ucred *active_cred;
858 struct thread *td;
859 {
860 struct vnode *vp;
861 int error;
862
863 mtx_lock(&Giant);
864
865 vp = fp->f_vnode;
866 #ifdef MAC
867 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
868 error = mac_check_vnode_poll(active_cred, fp->f_cred, vp);
869 VOP_UNLOCK(vp, 0, td);
870 if (!error)
871 #endif
872
873 error = VOP_POLL(vp, events, fp->f_cred, td);
874 mtx_unlock(&Giant);
875 return (error);
876 }
877
878 /*
879 * Check that the vnode is still valid, and if so
880 * acquire requested lock.
881 */
882 int
883 #ifndef DEBUG_LOCKS
884 vn_lock(vp, flags, td)
885 #else
886 debug_vn_lock(vp, flags, td, filename, line)
887 #endif
888 struct vnode *vp;
889 int flags;
890 struct thread *td;
891 #ifdef DEBUG_LOCKS
892 const char *filename;
893 int line;
894 #endif
895 {
896 int error;
897
898 do {
899 if ((flags & LK_INTERLOCK) == 0)
900 VI_LOCK(vp);
901 if ((vp->v_iflag & VI_XLOCK) && vp->v_vxthread != curthread) {
902 if ((flags & LK_NOWAIT) != 0) {
903 VI_UNLOCK(vp);
904 return (ENOENT);
905 }
906 vp->v_iflag |= VI_XWANT;
907 msleep(vp, VI_MTX(vp), PINOD, "vn_lock", 0);
908 if ((flags & LK_RETRY) == 0) {
909 VI_UNLOCK(vp);
910 return (ENOENT);
911 }
912 }
913 #ifdef DEBUG_LOCKS
914 vp->filename = filename;
915 vp->line = line;
916 #endif
917 /*
918 * lockmgr drops interlock before it will return for
919 * any reason. So force the code above to relock it.
920 */
921 error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, td);
922 flags &= ~LK_INTERLOCK;
923 } while (flags & LK_RETRY && error != 0);
924 return (error);
925 }
926
927 /*
928 * File table vnode close routine.
929 */
930 static int
931 vn_closefile(fp, td)
932 struct file *fp;
933 struct thread *td;
934 {
935 struct vnode *vp;
936 struct flock lf;
937 int error;
938
939 vp = fp->f_vnode;
940
941 mtx_lock(&Giant);
942 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
943 lf.l_whence = SEEK_SET;
944 lf.l_start = 0;
945 lf.l_len = 0;
946 lf.l_type = F_UNLCK;
947 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
948 }
949
950 fp->f_ops = &badfileops;
951
952 error = vn_close(vp, fp->f_flag, fp->f_cred, td);
953 mtx_unlock(&Giant);
954 return (error);
955 }
956
957 /*
958 * Preparing to start a filesystem write operation. If the operation is
959 * permitted, then we bump the count of operations in progress and
960 * proceed. If a suspend request is in progress, we wait until the
961 * suspension is over, and then proceed.
962 */
963 int
964 vn_start_write(vp, mpp, flags)
965 struct vnode *vp;
966 struct mount **mpp;
967 int flags;
968 {
969 struct mount *mp;
970 int error;
971
972 GIANT_REQUIRED;
973
974 /*
975 * If a vnode is provided, get and return the mount point that
976 * to which it will write.
977 */
978 if (vp != NULL) {
979 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
980 *mpp = NULL;
981 if (error != EOPNOTSUPP)
982 return (error);
983 return (0);
984 }
985 }
986 if ((mp = *mpp) == NULL)
987 return (0);
988 /*
989 * Check on status of suspension.
990 */
991 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
992 if (flags & V_NOWAIT)
993 return (EWOULDBLOCK);
994 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
995 "suspfs", 0);
996 if (error)
997 return (error);
998 }
999 if (flags & V_XSLEEP)
1000 return (0);
1001 mp->mnt_writeopcount++;
1002 return (0);
1003 }
1004
1005 /*
1006 * Secondary suspension. Used by operations such as vop_inactive
1007 * routines that are needed by the higher level functions. These
1008 * are allowed to proceed until all the higher level functions have
1009 * completed (indicated by mnt_writeopcount dropping to zero). At that
1010 * time, these operations are halted until the suspension is over.
1011 */
1012 int
1013 vn_write_suspend_wait(vp, mp, flags)
1014 struct vnode *vp;
1015 struct mount *mp;
1016 int flags;
1017 {
1018 int error;
1019
1020 GIANT_REQUIRED;
1021
1022 if (vp != NULL) {
1023 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
1024 if (error != EOPNOTSUPP)
1025 return (error);
1026 return (0);
1027 }
1028 }
1029 /*
1030 * If we are not suspended or have not yet reached suspended
1031 * mode, then let the operation proceed.
1032 */
1033 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
1034 return (0);
1035 if (flags & V_NOWAIT)
1036 return (EWOULDBLOCK);
1037 /*
1038 * Wait for the suspension to finish.
1039 */
1040 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
1041 "suspfs", 0));
1042 }
1043
1044 /*
1045 * Filesystem write operation has completed. If we are suspending and this
1046 * operation is the last one, notify the suspender that the suspension is
1047 * now in effect.
1048 */
1049 void
1050 vn_finished_write(mp)
1051 struct mount *mp;
1052 {
1053
1054 GIANT_REQUIRED;
1055
1056 if (mp == NULL)
1057 return;
1058 mp->mnt_writeopcount--;
1059 if (mp->mnt_writeopcount < 0)
1060 panic("vn_finished_write: neg cnt");
1061 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1062 mp->mnt_writeopcount <= 0)
1063 wakeup(&mp->mnt_writeopcount);
1064 }
1065
1066 /*
1067 * Request a filesystem to suspend write operations.
1068 */
1069 int
1070 vfs_write_suspend(mp)
1071 struct mount *mp;
1072 {
1073 struct thread *td = curthread;
1074 int error;
1075
1076 GIANT_REQUIRED;
1077
1078 if (mp->mnt_kern_flag & MNTK_SUSPEND)
1079 return (0);
1080 mp->mnt_kern_flag |= MNTK_SUSPEND;
1081 if (mp->mnt_writeopcount > 0)
1082 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
1083 if ((error = VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td)) != 0) {
1084 vfs_write_resume(mp);
1085 return (error);
1086 }
1087 mp->mnt_kern_flag |= MNTK_SUSPENDED;
1088 return (0);
1089 }
1090
1091 /*
1092 * Request a filesystem to resume write operations.
1093 */
1094 void
1095 vfs_write_resume(mp)
1096 struct mount *mp;
1097 {
1098
1099 GIANT_REQUIRED;
1100
1101 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
1102 return;
1103 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
1104 wakeup(&mp->mnt_writeopcount);
1105 wakeup(&mp->mnt_flag);
1106 }
1107
1108 /*
1109 * Implement kqueues for files by translating it to vnode operation.
1110 */
1111 static int
1112 vn_kqfilter(struct file *fp, struct knote *kn)
1113 {
1114 int error;
1115
1116 mtx_lock(&Giant);
1117 error = VOP_KQFILTER(fp->f_vnode, kn);
1118 mtx_unlock(&Giant);
1119
1120 return error;
1121 }
1122
1123 /*
1124 * Simplified in-kernel wrapper calls for extended attribute access.
1125 * Both calls pass in a NULL credential, authorizing as "kernel" access.
1126 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1127 */
1128 int
1129 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1130 const char *attrname, int *buflen, char *buf, struct thread *td)
1131 {
1132 struct uio auio;
1133 struct iovec iov;
1134 int error;
1135
1136 iov.iov_len = *buflen;
1137 iov.iov_base = buf;
1138
1139 auio.uio_iov = &iov;
1140 auio.uio_iovcnt = 1;
1141 auio.uio_rw = UIO_READ;
1142 auio.uio_segflg = UIO_SYSSPACE;
1143 auio.uio_td = td;
1144 auio.uio_offset = 0;
1145 auio.uio_resid = *buflen;
1146
1147 if ((ioflg & IO_NODELOCKED) == 0)
1148 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1149
1150 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1151
1152 /* authorize attribute retrieval as kernel */
1153 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1154 td);
1155
1156 if ((ioflg & IO_NODELOCKED) == 0)
1157 VOP_UNLOCK(vp, 0, td);
1158
1159 if (error == 0) {
1160 *buflen = *buflen - auio.uio_resid;
1161 }
1162
1163 return (error);
1164 }
1165
1166 /*
1167 * XXX failure mode if partially written?
1168 */
1169 int
1170 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1171 const char *attrname, int buflen, char *buf, struct thread *td)
1172 {
1173 struct uio auio;
1174 struct iovec iov;
1175 struct mount *mp;
1176 int error;
1177
1178 iov.iov_len = buflen;
1179 iov.iov_base = buf;
1180
1181 auio.uio_iov = &iov;
1182 auio.uio_iovcnt = 1;
1183 auio.uio_rw = UIO_WRITE;
1184 auio.uio_segflg = UIO_SYSSPACE;
1185 auio.uio_td = td;
1186 auio.uio_offset = 0;
1187 auio.uio_resid = buflen;
1188
1189 if ((ioflg & IO_NODELOCKED) == 0) {
1190 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1191 return (error);
1192 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1193 }
1194
1195 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1196
1197 /* authorize attribute setting as kernel */
1198 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1199
1200 if ((ioflg & IO_NODELOCKED) == 0) {
1201 vn_finished_write(mp);
1202 VOP_UNLOCK(vp, 0, td);
1203 }
1204
1205 return (error);
1206 }
1207
1208 int
1209 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1210 const char *attrname, struct thread *td)
1211 {
1212 struct mount *mp;
1213 int error;
1214
1215 if ((ioflg & IO_NODELOCKED) == 0) {
1216 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1217 return (error);
1218 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1219 }
1220
1221 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1222
1223 /* authorize attribute removal as kernel */
1224 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1225 if (error == EOPNOTSUPP)
1226 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1227 NULL, td);
1228
1229 if ((ioflg & IO_NODELOCKED) == 0) {
1230 vn_finished_write(mp);
1231 VOP_UNLOCK(vp, 0, td);
1232 }
1233
1234 return (error);
1235 }
Cache object: 086605f4e109d9c9f63a769521ea0695
|