1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.2/sys/kern/vfs_default.c 212137 2010-09-02 04:56:01Z brian $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57
58 #include <security/mac/mac_framework.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
68
69 static int vop_nolookup(struct vop_lookup_args *);
70 static int vop_norename(struct vop_rename_args *);
71 static int vop_nostrategy(struct vop_strategy_args *);
72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
73 char *dirbuf, int dirbuflen, off_t *off,
74 char **cpos, int *len, int *eofflag,
75 struct thread *td);
76 static int dirent_exists(struct vnode *vp, const char *dirname,
77 struct thread *td);
78
79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80
81 /*
82 * This vnode table stores what we want to do if the filesystem doesn't
83 * implement a particular VOP.
84 *
85 * If there is no specific entry here, we will return EOPNOTSUPP.
86 *
87 * Note that every filesystem has to implement either vop_access
88 * or vop_accessx; failing to do so will result in immediate crash
89 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
90 * which calls vop_stdaccess() etc.
91 */
92
93 struct vop_vector default_vnodeops = {
94 .vop_default = NULL,
95 .vop_bypass = VOP_EOPNOTSUPP,
96
97 .vop_access = vop_stdaccess,
98 .vop_accessx = vop_stdaccessx,
99 .vop_advlock = vop_stdadvlock,
100 .vop_advlockasync = vop_stdadvlockasync,
101 .vop_bmap = vop_stdbmap,
102 .vop_close = VOP_NULL,
103 .vop_fsync = VOP_NULL,
104 .vop_getpages = vop_stdgetpages,
105 .vop_getwritemount = vop_stdgetwritemount,
106 .vop_inactive = VOP_NULL,
107 .vop_ioctl = VOP_ENOTTY,
108 .vop_kqfilter = vop_stdkqfilter,
109 .vop_islocked = vop_stdislocked,
110 .vop_lock1 = vop_stdlock,
111 .vop_lookup = vop_nolookup,
112 .vop_open = VOP_NULL,
113 .vop_pathconf = VOP_EINVAL,
114 .vop_poll = vop_nopoll,
115 .vop_putpages = vop_stdputpages,
116 .vop_readlink = VOP_EINVAL,
117 .vop_rename = vop_norename,
118 .vop_revoke = VOP_PANIC,
119 .vop_strategy = vop_nostrategy,
120 .vop_unlock = vop_stdunlock,
121 .vop_vptocnp = vop_stdvptocnp,
122 .vop_vptofh = vop_stdvptofh,
123 };
124
125 /*
126 * Series of placeholder functions for various error returns for
127 * VOPs.
128 */
129
130 int
131 vop_eopnotsupp(struct vop_generic_args *ap)
132 {
133 /*
134 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
135 */
136
137 return (EOPNOTSUPP);
138 }
139
140 int
141 vop_ebadf(struct vop_generic_args *ap)
142 {
143
144 return (EBADF);
145 }
146
147 int
148 vop_enotty(struct vop_generic_args *ap)
149 {
150
151 return (ENOTTY);
152 }
153
154 int
155 vop_einval(struct vop_generic_args *ap)
156 {
157
158 return (EINVAL);
159 }
160
161 int
162 vop_enoent(struct vop_generic_args *ap)
163 {
164
165 return (ENOENT);
166 }
167
168 int
169 vop_null(struct vop_generic_args *ap)
170 {
171
172 return (0);
173 }
174
175 /*
176 * Helper function to panic on some bad VOPs in some filesystems.
177 */
178 int
179 vop_panic(struct vop_generic_args *ap)
180 {
181
182 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
183 }
184
185 /*
186 * vop_std<something> and vop_no<something> are default functions for use by
187 * filesystems that need the "default reasonable" implementation for a
188 * particular operation.
189 *
190 * The documentation for the operations they implement exists (if it exists)
191 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
192 */
193
194 /*
195 * Default vop for filesystems that do not support name lookup
196 */
197 static int
198 vop_nolookup(ap)
199 struct vop_lookup_args /* {
200 struct vnode *a_dvp;
201 struct vnode **a_vpp;
202 struct componentname *a_cnp;
203 } */ *ap;
204 {
205
206 *ap->a_vpp = NULL;
207 return (ENOTDIR);
208 }
209
210 /*
211 * vop_norename:
212 *
213 * Handle unlock and reference counting for arguments of vop_rename
214 * for filesystems that do not implement rename operation.
215 */
216 static int
217 vop_norename(struct vop_rename_args *ap)
218 {
219
220 vop_rename_fail(ap);
221 return (EOPNOTSUPP);
222 }
223
224 /*
225 * vop_nostrategy:
226 *
227 * Strategy routine for VFS devices that have none.
228 *
229 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
230 * routine. Typically this is done for a BIO_READ strategy call.
231 * Typically B_INVAL is assumed to already be clear prior to a write
232 * and should not be cleared manually unless you just made the buffer
233 * invalid. BIO_ERROR should be cleared either way.
234 */
235
236 static int
237 vop_nostrategy (struct vop_strategy_args *ap)
238 {
239 printf("No strategy for buffer at %p\n", ap->a_bp);
240 vprint("vnode", ap->a_vp);
241 ap->a_bp->b_ioflags |= BIO_ERROR;
242 ap->a_bp->b_error = EOPNOTSUPP;
243 bufdone(ap->a_bp);
244 return (EOPNOTSUPP);
245 }
246
247 static int
248 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
249 int dirbuflen, off_t *off, char **cpos, int *len,
250 int *eofflag, struct thread *td)
251 {
252 int error, reclen;
253 struct uio uio;
254 struct iovec iov;
255 struct dirent *dp;
256
257 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
258 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
259
260 if (*len == 0) {
261 iov.iov_base = dirbuf;
262 iov.iov_len = dirbuflen;
263
264 uio.uio_iov = &iov;
265 uio.uio_iovcnt = 1;
266 uio.uio_offset = *off;
267 uio.uio_resid = dirbuflen;
268 uio.uio_segflg = UIO_SYSSPACE;
269 uio.uio_rw = UIO_READ;
270 uio.uio_td = td;
271
272 *eofflag = 0;
273
274 #ifdef MAC
275 error = mac_vnode_check_readdir(td->td_ucred, vp);
276 if (error == 0)
277 #endif
278 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
279 NULL, NULL);
280 if (error)
281 return (error);
282
283 *off = uio.uio_offset;
284
285 *cpos = dirbuf;
286 *len = (dirbuflen - uio.uio_resid);
287
288 if (*len == 0)
289 return (ENOENT);
290 }
291
292 dp = (struct dirent *)(*cpos);
293 reclen = dp->d_reclen;
294 *dpp = dp;
295
296 /* check for malformed directory.. */
297 if (reclen < DIRENT_MINSIZE)
298 return (EINVAL);
299
300 *cpos += reclen;
301 *len -= reclen;
302
303 return (0);
304 }
305
306 /*
307 * Check if a named file exists in a given directory vnode.
308 */
309 static int
310 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
311 {
312 char *dirbuf, *cpos;
313 int error, eofflag, dirbuflen, len, found;
314 off_t off;
315 struct dirent *dp;
316 struct vattr va;
317
318 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
319 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
320
321 found = 0;
322
323 error = VOP_GETATTR(vp, &va, td->td_ucred);
324 if (error)
325 return (found);
326
327 dirbuflen = DEV_BSIZE;
328 if (dirbuflen < va.va_blocksize)
329 dirbuflen = va.va_blocksize;
330 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
331
332 off = 0;
333 len = 0;
334 do {
335 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
336 &cpos, &len, &eofflag, td);
337 if (error)
338 goto out;
339
340 if ((dp->d_type != DT_WHT) &&
341 !strcmp(dp->d_name, dirname)) {
342 found = 1;
343 goto out;
344 }
345 } while (len > 0 || !eofflag);
346
347 out:
348 free(dirbuf, M_TEMP);
349 return (found);
350 }
351
352 int
353 vop_stdaccess(struct vop_access_args *ap)
354 {
355
356 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
357 VAPPEND)) == 0, ("invalid bit in accmode"));
358
359 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
360 }
361
362 int
363 vop_stdaccessx(struct vop_accessx_args *ap)
364 {
365 int error;
366 accmode_t accmode = ap->a_accmode;
367
368 error = vfs_unixify_accmode(&accmode);
369 if (error != 0)
370 return (error);
371
372 if (accmode == 0)
373 return (0);
374
375 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
376 }
377
378 /*
379 * Advisory record locking support
380 */
381 int
382 vop_stdadvlock(struct vop_advlock_args *ap)
383 {
384 struct vnode *vp;
385 struct ucred *cred;
386 struct vattr vattr;
387 int error;
388
389 vp = ap->a_vp;
390 cred = curthread->td_ucred;
391 vn_lock(vp, LK_SHARED | LK_RETRY);
392 error = VOP_GETATTR(vp, &vattr, cred);
393 VOP_UNLOCK(vp, 0);
394 if (error)
395 return (error);
396
397 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
398 }
399
400 int
401 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
402 {
403 struct vnode *vp;
404 struct ucred *cred;
405 struct vattr vattr;
406 int error;
407
408 vp = ap->a_vp;
409 cred = curthread->td_ucred;
410 vn_lock(vp, LK_SHARED | LK_RETRY);
411 error = VOP_GETATTR(vp, &vattr, cred);
412 VOP_UNLOCK(vp, 0);
413 if (error)
414 return (error);
415
416 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
417 }
418
419 /*
420 * vop_stdpathconf:
421 *
422 * Standard implementation of POSIX pathconf, to get information about limits
423 * for a filesystem.
424 * Override per filesystem for the case where the filesystem has smaller
425 * limits.
426 */
427 int
428 vop_stdpathconf(ap)
429 struct vop_pathconf_args /* {
430 struct vnode *a_vp;
431 int a_name;
432 int *a_retval;
433 } */ *ap;
434 {
435
436 switch (ap->a_name) {
437 case _PC_NAME_MAX:
438 *ap->a_retval = NAME_MAX;
439 return (0);
440 case _PC_PATH_MAX:
441 *ap->a_retval = PATH_MAX;
442 return (0);
443 case _PC_LINK_MAX:
444 *ap->a_retval = LINK_MAX;
445 return (0);
446 case _PC_MAX_CANON:
447 *ap->a_retval = MAX_CANON;
448 return (0);
449 case _PC_MAX_INPUT:
450 *ap->a_retval = MAX_INPUT;
451 return (0);
452 case _PC_PIPE_BUF:
453 *ap->a_retval = PIPE_BUF;
454 return (0);
455 case _PC_CHOWN_RESTRICTED:
456 *ap->a_retval = 1;
457 return (0);
458 case _PC_VDISABLE:
459 *ap->a_retval = _POSIX_VDISABLE;
460 return (0);
461 default:
462 return (EINVAL);
463 }
464 /* NOTREACHED */
465 }
466
467 /*
468 * Standard lock, unlock and islocked functions.
469 */
470 int
471 vop_stdlock(ap)
472 struct vop_lock1_args /* {
473 struct vnode *a_vp;
474 int a_flags;
475 char *file;
476 int line;
477 } */ *ap;
478 {
479 struct vnode *vp = ap->a_vp;
480
481 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
482 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
483 ap->a_line));
484 }
485
486 /* See above. */
487 int
488 vop_stdunlock(ap)
489 struct vop_unlock_args /* {
490 struct vnode *a_vp;
491 int a_flags;
492 } */ *ap;
493 {
494 struct vnode *vp = ap->a_vp;
495
496 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
497 }
498
499 /* See above. */
500 int
501 vop_stdislocked(ap)
502 struct vop_islocked_args /* {
503 struct vnode *a_vp;
504 } */ *ap;
505 {
506
507 return (lockstatus(ap->a_vp->v_vnlock));
508 }
509
510 /*
511 * Return true for select/poll.
512 */
513 int
514 vop_nopoll(ap)
515 struct vop_poll_args /* {
516 struct vnode *a_vp;
517 int a_events;
518 struct ucred *a_cred;
519 struct thread *a_td;
520 } */ *ap;
521 {
522
523 return (poll_no_poll(ap->a_events));
524 }
525
526 /*
527 * Implement poll for local filesystems that support it.
528 */
529 int
530 vop_stdpoll(ap)
531 struct vop_poll_args /* {
532 struct vnode *a_vp;
533 int a_events;
534 struct ucred *a_cred;
535 struct thread *a_td;
536 } */ *ap;
537 {
538 if (ap->a_events & ~POLLSTANDARD)
539 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
540 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
541 }
542
543 /*
544 * Return our mount point, as we will take charge of the writes.
545 */
546 int
547 vop_stdgetwritemount(ap)
548 struct vop_getwritemount_args /* {
549 struct vnode *a_vp;
550 struct mount **a_mpp;
551 } */ *ap;
552 {
553 struct mount *mp;
554
555 /*
556 * XXX Since this is called unlocked we may be recycled while
557 * attempting to ref the mount. If this is the case or mountpoint
558 * will be set to NULL. We only have to prevent this call from
559 * returning with a ref to an incorrect mountpoint. It is not
560 * harmful to return with a ref to our previous mountpoint.
561 */
562 mp = ap->a_vp->v_mount;
563 if (mp != NULL) {
564 vfs_ref(mp);
565 if (mp != ap->a_vp->v_mount) {
566 vfs_rel(mp);
567 mp = NULL;
568 }
569 }
570 *(ap->a_mpp) = mp;
571 return (0);
572 }
573
574 /* XXX Needs good comment and VOP_BMAP(9) manpage */
575 int
576 vop_stdbmap(ap)
577 struct vop_bmap_args /* {
578 struct vnode *a_vp;
579 daddr_t a_bn;
580 struct bufobj **a_bop;
581 daddr_t *a_bnp;
582 int *a_runp;
583 int *a_runb;
584 } */ *ap;
585 {
586
587 if (ap->a_bop != NULL)
588 *ap->a_bop = &ap->a_vp->v_bufobj;
589 if (ap->a_bnp != NULL)
590 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
591 if (ap->a_runp != NULL)
592 *ap->a_runp = 0;
593 if (ap->a_runb != NULL)
594 *ap->a_runb = 0;
595 return (0);
596 }
597
598 int
599 vop_stdfsync(ap)
600 struct vop_fsync_args /* {
601 struct vnode *a_vp;
602 struct ucred *a_cred;
603 int a_waitfor;
604 struct thread *a_td;
605 } */ *ap;
606 {
607 struct vnode *vp = ap->a_vp;
608 struct buf *bp;
609 struct bufobj *bo;
610 struct buf *nbp;
611 int error = 0;
612 int maxretry = 1000; /* large, arbitrarily chosen */
613
614 bo = &vp->v_bufobj;
615 BO_LOCK(bo);
616 loop1:
617 /*
618 * MARK/SCAN initialization to avoid infinite loops.
619 */
620 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
621 bp->b_vflags &= ~BV_SCANNED;
622 bp->b_error = 0;
623 }
624
625 /*
626 * Flush all dirty buffers associated with a vnode.
627 */
628 loop2:
629 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
630 if ((bp->b_vflags & BV_SCANNED) != 0)
631 continue;
632 bp->b_vflags |= BV_SCANNED;
633 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
634 continue;
635 BO_UNLOCK(bo);
636 KASSERT(bp->b_bufobj == bo,
637 ("bp %p wrong b_bufobj %p should be %p",
638 bp, bp->b_bufobj, bo));
639 if ((bp->b_flags & B_DELWRI) == 0)
640 panic("fsync: not dirty");
641 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
642 vfs_bio_awrite(bp);
643 } else {
644 bremfree(bp);
645 bawrite(bp);
646 }
647 BO_LOCK(bo);
648 goto loop2;
649 }
650
651 /*
652 * If synchronous the caller expects us to completely resolve all
653 * dirty buffers in the system. Wait for in-progress I/O to
654 * complete (which could include background bitmap writes), then
655 * retry if dirty blocks still exist.
656 */
657 if (ap->a_waitfor == MNT_WAIT) {
658 bufobj_wwait(bo, 0, 0);
659 if (bo->bo_dirty.bv_cnt > 0) {
660 /*
661 * If we are unable to write any of these buffers
662 * then we fail now rather than trying endlessly
663 * to write them out.
664 */
665 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
666 if ((error = bp->b_error) == 0)
667 continue;
668 if (error == 0 && --maxretry >= 0)
669 goto loop1;
670 error = EAGAIN;
671 }
672 }
673 BO_UNLOCK(bo);
674 if (error == EAGAIN)
675 vprint("fsync: giving up on dirty", vp);
676
677 return (error);
678 }
679
680 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
681 int
682 vop_stdgetpages(ap)
683 struct vop_getpages_args /* {
684 struct vnode *a_vp;
685 vm_page_t *a_m;
686 int a_count;
687 int a_reqpage;
688 vm_ooffset_t a_offset;
689 } */ *ap;
690 {
691
692 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
693 ap->a_count, ap->a_reqpage);
694 }
695
696 int
697 vop_stdkqfilter(struct vop_kqfilter_args *ap)
698 {
699 return vfs_kqfilter(ap);
700 }
701
702 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
703 int
704 vop_stdputpages(ap)
705 struct vop_putpages_args /* {
706 struct vnode *a_vp;
707 vm_page_t *a_m;
708 int a_count;
709 int a_sync;
710 int *a_rtvals;
711 vm_ooffset_t a_offset;
712 } */ *ap;
713 {
714
715 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
716 ap->a_sync, ap->a_rtvals);
717 }
718
719 int
720 vop_stdvptofh(struct vop_vptofh_args *ap)
721 {
722 return (EOPNOTSUPP);
723 }
724
725 int
726 vop_stdvptocnp(struct vop_vptocnp_args *ap)
727 {
728 struct vnode *vp = ap->a_vp;
729 struct vnode **dvp = ap->a_vpp;
730 struct ucred *cred = ap->a_cred;
731 char *buf = ap->a_buf;
732 int *buflen = ap->a_buflen;
733 char *dirbuf, *cpos;
734 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
735 off_t off;
736 ino_t fileno;
737 struct vattr va;
738 struct nameidata nd;
739 struct thread *td;
740 struct dirent *dp;
741 struct vnode *mvp;
742
743 i = *buflen;
744 error = 0;
745 covered = 0;
746 td = curthread;
747
748 if (vp->v_type != VDIR)
749 return (ENOENT);
750
751 error = VOP_GETATTR(vp, &va, cred);
752 if (error)
753 return (error);
754
755 VREF(vp);
756 locked = VOP_ISLOCKED(vp);
757 VOP_UNLOCK(vp, 0);
758 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
759 "..", vp, td);
760 flags = FREAD;
761 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
762 if (error) {
763 vn_lock(vp, locked | LK_RETRY);
764 return (error);
765 }
766 NDFREE(&nd, NDF_ONLY_PNBUF);
767
768 mvp = *dvp = nd.ni_vp;
769
770 if (vp->v_mount != (*dvp)->v_mount &&
771 ((*dvp)->v_vflag & VV_ROOT) &&
772 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
773 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
774 VREF(mvp);
775 VOP_UNLOCK(mvp, 0);
776 vn_close(mvp, FREAD, cred, td);
777 VREF(*dvp);
778 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
779 covered = 1;
780 }
781
782 fileno = va.va_fileid;
783
784 dirbuflen = DEV_BSIZE;
785 if (dirbuflen < va.va_blocksize)
786 dirbuflen = va.va_blocksize;
787 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
788
789 if ((*dvp)->v_type != VDIR) {
790 error = ENOENT;
791 goto out;
792 }
793
794 off = 0;
795 len = 0;
796 do {
797 /* call VOP_READDIR of parent */
798 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
799 &cpos, &len, &eofflag, td);
800 if (error)
801 goto out;
802
803 if ((dp->d_type != DT_WHT) &&
804 (dp->d_fileno == fileno)) {
805 if (covered) {
806 VOP_UNLOCK(*dvp, 0);
807 vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
808 if (dirent_exists(mvp, dp->d_name, td)) {
809 error = ENOENT;
810 VOP_UNLOCK(mvp, 0);
811 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
812 goto out;
813 }
814 VOP_UNLOCK(mvp, 0);
815 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
816 }
817 i -= dp->d_namlen;
818
819 if (i < 0) {
820 error = ENOMEM;
821 goto out;
822 }
823 bcopy(dp->d_name, buf + i, dp->d_namlen);
824 error = 0;
825 goto out;
826 }
827 } while (len > 0 || !eofflag);
828 error = ENOENT;
829
830 out:
831 free(dirbuf, M_TEMP);
832 if (!error) {
833 *buflen = i;
834 vhold(*dvp);
835 }
836 if (covered) {
837 vput(*dvp);
838 vrele(mvp);
839 } else {
840 VOP_UNLOCK(mvp, 0);
841 vn_close(mvp, FREAD, cred, td);
842 }
843 vn_lock(vp, locked | LK_RETRY);
844 return (error);
845 }
846
847 /*
848 * vfs default ops
849 * used to fill the vfs function table to get reasonable default return values.
850 */
851 int
852 vfs_stdroot (mp, flags, vpp)
853 struct mount *mp;
854 int flags;
855 struct vnode **vpp;
856 {
857
858 return (EOPNOTSUPP);
859 }
860
861 int
862 vfs_stdstatfs (mp, sbp)
863 struct mount *mp;
864 struct statfs *sbp;
865 {
866
867 return (EOPNOTSUPP);
868 }
869
870 int
871 vfs_stdquotactl (mp, cmds, uid, arg)
872 struct mount *mp;
873 int cmds;
874 uid_t uid;
875 void *arg;
876 {
877
878 return (EOPNOTSUPP);
879 }
880
881 int
882 vfs_stdsync(mp, waitfor)
883 struct mount *mp;
884 int waitfor;
885 {
886 struct vnode *vp, *mvp;
887 struct thread *td;
888 int error, lockreq, allerror = 0;
889
890 td = curthread;
891 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
892 if (waitfor != MNT_WAIT)
893 lockreq |= LK_NOWAIT;
894 /*
895 * Force stale buffer cache information to be flushed.
896 */
897 MNT_ILOCK(mp);
898 loop:
899 MNT_VNODE_FOREACH(vp, mp, mvp) {
900 /* bv_cnt is an acceptable race here. */
901 if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
902 continue;
903 VI_LOCK(vp);
904 MNT_IUNLOCK(mp);
905 if ((error = vget(vp, lockreq, td)) != 0) {
906 MNT_ILOCK(mp);
907 if (error == ENOENT) {
908 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
909 goto loop;
910 }
911 continue;
912 }
913 error = VOP_FSYNC(vp, waitfor, td);
914 if (error)
915 allerror = error;
916
917 /* Do not turn this into vput. td is not always curthread. */
918 VOP_UNLOCK(vp, 0);
919 vrele(vp);
920 MNT_ILOCK(mp);
921 }
922 MNT_IUNLOCK(mp);
923 return (allerror);
924 }
925
926 int
927 vfs_stdnosync (mp, waitfor)
928 struct mount *mp;
929 int waitfor;
930 {
931
932 return (0);
933 }
934
935 int
936 vfs_stdvget (mp, ino, flags, vpp)
937 struct mount *mp;
938 ino_t ino;
939 int flags;
940 struct vnode **vpp;
941 {
942
943 return (EOPNOTSUPP);
944 }
945
946 int
947 vfs_stdfhtovp (mp, fhp, vpp)
948 struct mount *mp;
949 struct fid *fhp;
950 struct vnode **vpp;
951 {
952
953 return (EOPNOTSUPP);
954 }
955
956 int
957 vfs_stdinit (vfsp)
958 struct vfsconf *vfsp;
959 {
960
961 return (0);
962 }
963
964 int
965 vfs_stduninit (vfsp)
966 struct vfsconf *vfsp;
967 {
968
969 return(0);
970 }
971
972 int
973 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
974 struct mount *mp;
975 int cmd;
976 struct vnode *filename_vp;
977 int attrnamespace;
978 const char *attrname;
979 {
980
981 if (filename_vp != NULL)
982 VOP_UNLOCK(filename_vp, 0);
983 return (EOPNOTSUPP);
984 }
985
986 int
987 vfs_stdsysctl(mp, op, req)
988 struct mount *mp;
989 fsctlop_t op;
990 struct sysctl_req *req;
991 {
992
993 return (EOPNOTSUPP);
994 }
995
996 /* end of vfs default ops */
Cache object: fdd8af98628eb3534da76b668dfd3c72
|