1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/9.1/sys/kern/vfs_default.c 237351 2012-06-21 03:58:10Z mckusick $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57
58 #include <security/mac/mac_framework.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
68
69 static int vop_nolookup(struct vop_lookup_args *);
70 static int vop_norename(struct vop_rename_args *);
71 static int vop_nostrategy(struct vop_strategy_args *);
72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
73 char *dirbuf, int dirbuflen, off_t *off,
74 char **cpos, int *len, int *eofflag,
75 struct thread *td);
76 static int dirent_exists(struct vnode *vp, const char *dirname,
77 struct thread *td);
78
79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80
81 /*
82 * This vnode table stores what we want to do if the filesystem doesn't
83 * implement a particular VOP.
84 *
85 * If there is no specific entry here, we will return EOPNOTSUPP.
86 *
87 * Note that every filesystem has to implement either vop_access
88 * or vop_accessx; failing to do so will result in immediate crash
89 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
90 * which calls vop_stdaccess() etc.
91 */
92
93 struct vop_vector default_vnodeops = {
94 .vop_default = NULL,
95 .vop_bypass = VOP_EOPNOTSUPP,
96
97 .vop_access = vop_stdaccess,
98 .vop_accessx = vop_stdaccessx,
99 .vop_advise = vop_stdadvise,
100 .vop_advlock = vop_stdadvlock,
101 .vop_advlockasync = vop_stdadvlockasync,
102 .vop_advlockpurge = vop_stdadvlockpurge,
103 .vop_allocate = vop_stdallocate,
104 .vop_bmap = vop_stdbmap,
105 .vop_close = VOP_NULL,
106 .vop_fsync = VOP_NULL,
107 .vop_getpages = vop_stdgetpages,
108 .vop_getwritemount = vop_stdgetwritemount,
109 .vop_inactive = VOP_NULL,
110 .vop_ioctl = VOP_ENOTTY,
111 .vop_kqfilter = vop_stdkqfilter,
112 .vop_islocked = vop_stdislocked,
113 .vop_lock1 = vop_stdlock,
114 .vop_lookup = vop_nolookup,
115 .vop_open = VOP_NULL,
116 .vop_pathconf = VOP_EINVAL,
117 .vop_poll = vop_nopoll,
118 .vop_putpages = vop_stdputpages,
119 .vop_readlink = VOP_EINVAL,
120 .vop_rename = vop_norename,
121 .vop_revoke = VOP_PANIC,
122 .vop_strategy = vop_nostrategy,
123 .vop_unlock = vop_stdunlock,
124 .vop_vptocnp = vop_stdvptocnp,
125 .vop_vptofh = vop_stdvptofh,
126 .vop_unp_bind = vop_stdunp_bind,
127 .vop_unp_connect = vop_stdunp_connect,
128 .vop_unp_detach = vop_stdunp_detach,
129 };
130
131 /*
132 * Series of placeholder functions for various error returns for
133 * VOPs.
134 */
135
136 int
137 vop_eopnotsupp(struct vop_generic_args *ap)
138 {
139 /*
140 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
141 */
142
143 return (EOPNOTSUPP);
144 }
145
146 int
147 vop_ebadf(struct vop_generic_args *ap)
148 {
149
150 return (EBADF);
151 }
152
153 int
154 vop_enotty(struct vop_generic_args *ap)
155 {
156
157 return (ENOTTY);
158 }
159
160 int
161 vop_einval(struct vop_generic_args *ap)
162 {
163
164 return (EINVAL);
165 }
166
167 int
168 vop_enoent(struct vop_generic_args *ap)
169 {
170
171 return (ENOENT);
172 }
173
174 int
175 vop_null(struct vop_generic_args *ap)
176 {
177
178 return (0);
179 }
180
181 /*
182 * Helper function to panic on some bad VOPs in some filesystems.
183 */
184 int
185 vop_panic(struct vop_generic_args *ap)
186 {
187
188 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
189 }
190
191 /*
192 * vop_std<something> and vop_no<something> are default functions for use by
193 * filesystems that need the "default reasonable" implementation for a
194 * particular operation.
195 *
196 * The documentation for the operations they implement exists (if it exists)
197 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
198 */
199
200 /*
201 * Default vop for filesystems that do not support name lookup
202 */
203 static int
204 vop_nolookup(ap)
205 struct vop_lookup_args /* {
206 struct vnode *a_dvp;
207 struct vnode **a_vpp;
208 struct componentname *a_cnp;
209 } */ *ap;
210 {
211
212 *ap->a_vpp = NULL;
213 return (ENOTDIR);
214 }
215
216 /*
217 * vop_norename:
218 *
219 * Handle unlock and reference counting for arguments of vop_rename
220 * for filesystems that do not implement rename operation.
221 */
222 static int
223 vop_norename(struct vop_rename_args *ap)
224 {
225
226 vop_rename_fail(ap);
227 return (EOPNOTSUPP);
228 }
229
230 /*
231 * vop_nostrategy:
232 *
233 * Strategy routine for VFS devices that have none.
234 *
235 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
236 * routine. Typically this is done for a BIO_READ strategy call.
237 * Typically B_INVAL is assumed to already be clear prior to a write
238 * and should not be cleared manually unless you just made the buffer
239 * invalid. BIO_ERROR should be cleared either way.
240 */
241
242 static int
243 vop_nostrategy (struct vop_strategy_args *ap)
244 {
245 printf("No strategy for buffer at %p\n", ap->a_bp);
246 vprint("vnode", ap->a_vp);
247 ap->a_bp->b_ioflags |= BIO_ERROR;
248 ap->a_bp->b_error = EOPNOTSUPP;
249 bufdone(ap->a_bp);
250 return (EOPNOTSUPP);
251 }
252
253 static int
254 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
255 int dirbuflen, off_t *off, char **cpos, int *len,
256 int *eofflag, struct thread *td)
257 {
258 int error, reclen;
259 struct uio uio;
260 struct iovec iov;
261 struct dirent *dp;
262
263 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
264 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
265
266 if (*len == 0) {
267 iov.iov_base = dirbuf;
268 iov.iov_len = dirbuflen;
269
270 uio.uio_iov = &iov;
271 uio.uio_iovcnt = 1;
272 uio.uio_offset = *off;
273 uio.uio_resid = dirbuflen;
274 uio.uio_segflg = UIO_SYSSPACE;
275 uio.uio_rw = UIO_READ;
276 uio.uio_td = td;
277
278 *eofflag = 0;
279
280 #ifdef MAC
281 error = mac_vnode_check_readdir(td->td_ucred, vp);
282 if (error == 0)
283 #endif
284 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
285 NULL, NULL);
286 if (error)
287 return (error);
288
289 *off = uio.uio_offset;
290
291 *cpos = dirbuf;
292 *len = (dirbuflen - uio.uio_resid);
293
294 if (*len == 0)
295 return (ENOENT);
296 }
297
298 dp = (struct dirent *)(*cpos);
299 reclen = dp->d_reclen;
300 *dpp = dp;
301
302 /* check for malformed directory.. */
303 if (reclen < DIRENT_MINSIZE)
304 return (EINVAL);
305
306 *cpos += reclen;
307 *len -= reclen;
308
309 return (0);
310 }
311
312 /*
313 * Check if a named file exists in a given directory vnode.
314 */
315 static int
316 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
317 {
318 char *dirbuf, *cpos;
319 int error, eofflag, dirbuflen, len, found;
320 off_t off;
321 struct dirent *dp;
322 struct vattr va;
323
324 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
325 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
326
327 found = 0;
328
329 error = VOP_GETATTR(vp, &va, td->td_ucred);
330 if (error)
331 return (found);
332
333 dirbuflen = DEV_BSIZE;
334 if (dirbuflen < va.va_blocksize)
335 dirbuflen = va.va_blocksize;
336 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
337
338 off = 0;
339 len = 0;
340 do {
341 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
342 &cpos, &len, &eofflag, td);
343 if (error)
344 goto out;
345
346 if ((dp->d_type != DT_WHT) &&
347 !strcmp(dp->d_name, dirname)) {
348 found = 1;
349 goto out;
350 }
351 } while (len > 0 || !eofflag);
352
353 out:
354 free(dirbuf, M_TEMP);
355 return (found);
356 }
357
358 int
359 vop_stdaccess(struct vop_access_args *ap)
360 {
361
362 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
363 VAPPEND)) == 0, ("invalid bit in accmode"));
364
365 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
366 }
367
368 int
369 vop_stdaccessx(struct vop_accessx_args *ap)
370 {
371 int error;
372 accmode_t accmode = ap->a_accmode;
373
374 error = vfs_unixify_accmode(&accmode);
375 if (error != 0)
376 return (error);
377
378 if (accmode == 0)
379 return (0);
380
381 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
382 }
383
384 /*
385 * Advisory record locking support
386 */
387 int
388 vop_stdadvlock(struct vop_advlock_args *ap)
389 {
390 struct vnode *vp;
391 struct ucred *cred;
392 struct vattr vattr;
393 int error;
394
395 vp = ap->a_vp;
396 cred = curthread->td_ucred;
397 vn_lock(vp, LK_SHARED | LK_RETRY);
398 error = VOP_GETATTR(vp, &vattr, cred);
399 VOP_UNLOCK(vp, 0);
400 if (error)
401 return (error);
402
403 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
404 }
405
406 int
407 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
408 {
409 struct vnode *vp;
410 struct ucred *cred;
411 struct vattr vattr;
412 int error;
413
414 vp = ap->a_vp;
415 cred = curthread->td_ucred;
416 vn_lock(vp, LK_SHARED | LK_RETRY);
417 error = VOP_GETATTR(vp, &vattr, cred);
418 VOP_UNLOCK(vp, 0);
419 if (error)
420 return (error);
421
422 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
423 }
424
425 int
426 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
427 {
428 struct vnode *vp;
429
430 vp = ap->a_vp;
431 lf_purgelocks(vp, &vp->v_lockf);
432 return (0);
433 }
434
435 /*
436 * vop_stdpathconf:
437 *
438 * Standard implementation of POSIX pathconf, to get information about limits
439 * for a filesystem.
440 * Override per filesystem for the case where the filesystem has smaller
441 * limits.
442 */
443 int
444 vop_stdpathconf(ap)
445 struct vop_pathconf_args /* {
446 struct vnode *a_vp;
447 int a_name;
448 int *a_retval;
449 } */ *ap;
450 {
451
452 switch (ap->a_name) {
453 case _PC_NAME_MAX:
454 *ap->a_retval = NAME_MAX;
455 return (0);
456 case _PC_PATH_MAX:
457 *ap->a_retval = PATH_MAX;
458 return (0);
459 case _PC_LINK_MAX:
460 *ap->a_retval = LINK_MAX;
461 return (0);
462 case _PC_MAX_CANON:
463 *ap->a_retval = MAX_CANON;
464 return (0);
465 case _PC_MAX_INPUT:
466 *ap->a_retval = MAX_INPUT;
467 return (0);
468 case _PC_PIPE_BUF:
469 *ap->a_retval = PIPE_BUF;
470 return (0);
471 case _PC_CHOWN_RESTRICTED:
472 *ap->a_retval = 1;
473 return (0);
474 case _PC_VDISABLE:
475 *ap->a_retval = _POSIX_VDISABLE;
476 return (0);
477 default:
478 return (EINVAL);
479 }
480 /* NOTREACHED */
481 }
482
483 /*
484 * Standard lock, unlock and islocked functions.
485 */
486 int
487 vop_stdlock(ap)
488 struct vop_lock1_args /* {
489 struct vnode *a_vp;
490 int a_flags;
491 char *file;
492 int line;
493 } */ *ap;
494 {
495 struct vnode *vp = ap->a_vp;
496
497 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
498 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
499 ap->a_line));
500 }
501
502 /* See above. */
503 int
504 vop_stdunlock(ap)
505 struct vop_unlock_args /* {
506 struct vnode *a_vp;
507 int a_flags;
508 } */ *ap;
509 {
510 struct vnode *vp = ap->a_vp;
511
512 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
513 }
514
515 /* See above. */
516 int
517 vop_stdislocked(ap)
518 struct vop_islocked_args /* {
519 struct vnode *a_vp;
520 } */ *ap;
521 {
522
523 return (lockstatus(ap->a_vp->v_vnlock));
524 }
525
526 /*
527 * Return true for select/poll.
528 */
529 int
530 vop_nopoll(ap)
531 struct vop_poll_args /* {
532 struct vnode *a_vp;
533 int a_events;
534 struct ucred *a_cred;
535 struct thread *a_td;
536 } */ *ap;
537 {
538
539 return (poll_no_poll(ap->a_events));
540 }
541
542 /*
543 * Implement poll for local filesystems that support it.
544 */
545 int
546 vop_stdpoll(ap)
547 struct vop_poll_args /* {
548 struct vnode *a_vp;
549 int a_events;
550 struct ucred *a_cred;
551 struct thread *a_td;
552 } */ *ap;
553 {
554 if (ap->a_events & ~POLLSTANDARD)
555 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
556 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
557 }
558
559 /*
560 * Return our mount point, as we will take charge of the writes.
561 */
562 int
563 vop_stdgetwritemount(ap)
564 struct vop_getwritemount_args /* {
565 struct vnode *a_vp;
566 struct mount **a_mpp;
567 } */ *ap;
568 {
569 struct mount *mp;
570
571 /*
572 * XXX Since this is called unlocked we may be recycled while
573 * attempting to ref the mount. If this is the case or mountpoint
574 * will be set to NULL. We only have to prevent this call from
575 * returning with a ref to an incorrect mountpoint. It is not
576 * harmful to return with a ref to our previous mountpoint.
577 */
578 mp = ap->a_vp->v_mount;
579 if (mp != NULL) {
580 vfs_ref(mp);
581 if (mp != ap->a_vp->v_mount) {
582 vfs_rel(mp);
583 mp = NULL;
584 }
585 }
586 *(ap->a_mpp) = mp;
587 return (0);
588 }
589
590 /* XXX Needs good comment and VOP_BMAP(9) manpage */
591 int
592 vop_stdbmap(ap)
593 struct vop_bmap_args /* {
594 struct vnode *a_vp;
595 daddr_t a_bn;
596 struct bufobj **a_bop;
597 daddr_t *a_bnp;
598 int *a_runp;
599 int *a_runb;
600 } */ *ap;
601 {
602
603 if (ap->a_bop != NULL)
604 *ap->a_bop = &ap->a_vp->v_bufobj;
605 if (ap->a_bnp != NULL)
606 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
607 if (ap->a_runp != NULL)
608 *ap->a_runp = 0;
609 if (ap->a_runb != NULL)
610 *ap->a_runb = 0;
611 return (0);
612 }
613
614 int
615 vop_stdfsync(ap)
616 struct vop_fsync_args /* {
617 struct vnode *a_vp;
618 struct ucred *a_cred;
619 int a_waitfor;
620 struct thread *a_td;
621 } */ *ap;
622 {
623 struct vnode *vp = ap->a_vp;
624 struct buf *bp;
625 struct bufobj *bo;
626 struct buf *nbp;
627 int error = 0;
628 int maxretry = 1000; /* large, arbitrarily chosen */
629
630 bo = &vp->v_bufobj;
631 BO_LOCK(bo);
632 loop1:
633 /*
634 * MARK/SCAN initialization to avoid infinite loops.
635 */
636 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
637 bp->b_vflags &= ~BV_SCANNED;
638 bp->b_error = 0;
639 }
640
641 /*
642 * Flush all dirty buffers associated with a vnode.
643 */
644 loop2:
645 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
646 if ((bp->b_vflags & BV_SCANNED) != 0)
647 continue;
648 bp->b_vflags |= BV_SCANNED;
649 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
650 if (ap->a_waitfor != MNT_WAIT)
651 continue;
652 if (BUF_LOCK(bp,
653 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
654 BO_MTX(bo)) != 0) {
655 BO_LOCK(bo);
656 goto loop1;
657 }
658 BO_LOCK(bo);
659 }
660 BO_UNLOCK(bo);
661 KASSERT(bp->b_bufobj == bo,
662 ("bp %p wrong b_bufobj %p should be %p",
663 bp, bp->b_bufobj, bo));
664 if ((bp->b_flags & B_DELWRI) == 0)
665 panic("fsync: not dirty");
666 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
667 vfs_bio_awrite(bp);
668 } else {
669 bremfree(bp);
670 bawrite(bp);
671 }
672 BO_LOCK(bo);
673 goto loop2;
674 }
675
676 /*
677 * If synchronous the caller expects us to completely resolve all
678 * dirty buffers in the system. Wait for in-progress I/O to
679 * complete (which could include background bitmap writes), then
680 * retry if dirty blocks still exist.
681 */
682 if (ap->a_waitfor == MNT_WAIT) {
683 bufobj_wwait(bo, 0, 0);
684 if (bo->bo_dirty.bv_cnt > 0) {
685 /*
686 * If we are unable to write any of these buffers
687 * then we fail now rather than trying endlessly
688 * to write them out.
689 */
690 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
691 if ((error = bp->b_error) == 0)
692 continue;
693 if (error == 0 && --maxretry >= 0)
694 goto loop1;
695 error = EAGAIN;
696 }
697 }
698 BO_UNLOCK(bo);
699 if (error == EAGAIN)
700 vprint("fsync: giving up on dirty", vp);
701
702 return (error);
703 }
704
705 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
706 int
707 vop_stdgetpages(ap)
708 struct vop_getpages_args /* {
709 struct vnode *a_vp;
710 vm_page_t *a_m;
711 int a_count;
712 int a_reqpage;
713 vm_ooffset_t a_offset;
714 } */ *ap;
715 {
716
717 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
718 ap->a_count, ap->a_reqpage);
719 }
720
721 int
722 vop_stdkqfilter(struct vop_kqfilter_args *ap)
723 {
724 return vfs_kqfilter(ap);
725 }
726
727 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
728 int
729 vop_stdputpages(ap)
730 struct vop_putpages_args /* {
731 struct vnode *a_vp;
732 vm_page_t *a_m;
733 int a_count;
734 int a_sync;
735 int *a_rtvals;
736 vm_ooffset_t a_offset;
737 } */ *ap;
738 {
739
740 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
741 ap->a_sync, ap->a_rtvals);
742 }
743
744 int
745 vop_stdvptofh(struct vop_vptofh_args *ap)
746 {
747 return (EOPNOTSUPP);
748 }
749
750 int
751 vop_stdvptocnp(struct vop_vptocnp_args *ap)
752 {
753 struct vnode *vp = ap->a_vp;
754 struct vnode **dvp = ap->a_vpp;
755 struct ucred *cred = ap->a_cred;
756 char *buf = ap->a_buf;
757 int *buflen = ap->a_buflen;
758 char *dirbuf, *cpos;
759 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
760 off_t off;
761 ino_t fileno;
762 struct vattr va;
763 struct nameidata nd;
764 struct thread *td;
765 struct dirent *dp;
766 struct vnode *mvp;
767
768 i = *buflen;
769 error = 0;
770 covered = 0;
771 td = curthread;
772
773 if (vp->v_type != VDIR)
774 return (ENOENT);
775
776 error = VOP_GETATTR(vp, &va, cred);
777 if (error)
778 return (error);
779
780 VREF(vp);
781 locked = VOP_ISLOCKED(vp);
782 VOP_UNLOCK(vp, 0);
783 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
784 "..", vp, td);
785 flags = FREAD;
786 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
787 if (error) {
788 vn_lock(vp, locked | LK_RETRY);
789 return (error);
790 }
791 NDFREE(&nd, NDF_ONLY_PNBUF);
792
793 mvp = *dvp = nd.ni_vp;
794
795 if (vp->v_mount != (*dvp)->v_mount &&
796 ((*dvp)->v_vflag & VV_ROOT) &&
797 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
798 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
799 VREF(mvp);
800 VOP_UNLOCK(mvp, 0);
801 vn_close(mvp, FREAD, cred, td);
802 VREF(*dvp);
803 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
804 covered = 1;
805 }
806
807 fileno = va.va_fileid;
808
809 dirbuflen = DEV_BSIZE;
810 if (dirbuflen < va.va_blocksize)
811 dirbuflen = va.va_blocksize;
812 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
813
814 if ((*dvp)->v_type != VDIR) {
815 error = ENOENT;
816 goto out;
817 }
818
819 off = 0;
820 len = 0;
821 do {
822 /* call VOP_READDIR of parent */
823 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
824 &cpos, &len, &eofflag, td);
825 if (error)
826 goto out;
827
828 if ((dp->d_type != DT_WHT) &&
829 (dp->d_fileno == fileno)) {
830 if (covered) {
831 VOP_UNLOCK(*dvp, 0);
832 vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
833 if (dirent_exists(mvp, dp->d_name, td)) {
834 error = ENOENT;
835 VOP_UNLOCK(mvp, 0);
836 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
837 goto out;
838 }
839 VOP_UNLOCK(mvp, 0);
840 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
841 }
842 i -= dp->d_namlen;
843
844 if (i < 0) {
845 error = ENOMEM;
846 goto out;
847 }
848 bcopy(dp->d_name, buf + i, dp->d_namlen);
849 error = 0;
850 goto out;
851 }
852 } while (len > 0 || !eofflag);
853 error = ENOENT;
854
855 out:
856 free(dirbuf, M_TEMP);
857 if (!error) {
858 *buflen = i;
859 vref(*dvp);
860 }
861 if (covered) {
862 vput(*dvp);
863 vrele(mvp);
864 } else {
865 VOP_UNLOCK(mvp, 0);
866 vn_close(mvp, FREAD, cred, td);
867 }
868 vn_lock(vp, locked | LK_RETRY);
869 return (error);
870 }
871
872 int
873 vop_stdallocate(struct vop_allocate_args *ap)
874 {
875 #ifdef __notyet__
876 struct statfs sfs;
877 #endif
878 struct iovec aiov;
879 struct vattr vattr, *vap;
880 struct uio auio;
881 off_t fsize, len, cur, offset;
882 uint8_t *buf;
883 struct thread *td;
884 struct vnode *vp;
885 size_t iosize;
886 int error;
887
888 buf = NULL;
889 error = 0;
890 td = curthread;
891 vap = &vattr;
892 vp = ap->a_vp;
893 len = *ap->a_len;
894 offset = *ap->a_offset;
895
896 error = VOP_GETATTR(vp, vap, td->td_ucred);
897 if (error != 0)
898 goto out;
899 fsize = vap->va_size;
900 iosize = vap->va_blocksize;
901 if (iosize == 0)
902 iosize = BLKDEV_IOSIZE;
903 if (iosize > MAXPHYS)
904 iosize = MAXPHYS;
905 buf = malloc(iosize, M_TEMP, M_WAITOK);
906
907 #ifdef __notyet__
908 /*
909 * Check if the filesystem sets f_maxfilesize; if not use
910 * VOP_SETATTR to perform the check.
911 */
912 error = VFS_STATFS(vp->v_mount, &sfs, td);
913 if (error != 0)
914 goto out;
915 if (sfs.f_maxfilesize) {
916 if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize ||
917 offset + len > sfs.f_maxfilesize) {
918 error = EFBIG;
919 goto out;
920 }
921 } else
922 #endif
923 if (offset + len > vap->va_size) {
924 /*
925 * Test offset + len against the filesystem's maxfilesize.
926 */
927 VATTR_NULL(vap);
928 vap->va_size = offset + len;
929 error = VOP_SETATTR(vp, vap, td->td_ucred);
930 if (error != 0)
931 goto out;
932 VATTR_NULL(vap);
933 vap->va_size = fsize;
934 error = VOP_SETATTR(vp, vap, td->td_ucred);
935 if (error != 0)
936 goto out;
937 }
938
939 for (;;) {
940 /*
941 * Read and write back anything below the nominal file
942 * size. There's currently no way outside the filesystem
943 * to know whether this area is sparse or not.
944 */
945 cur = iosize;
946 if ((offset % iosize) != 0)
947 cur -= (offset % iosize);
948 if (cur > len)
949 cur = len;
950 if (offset < fsize) {
951 aiov.iov_base = buf;
952 aiov.iov_len = cur;
953 auio.uio_iov = &aiov;
954 auio.uio_iovcnt = 1;
955 auio.uio_offset = offset;
956 auio.uio_resid = cur;
957 auio.uio_segflg = UIO_SYSSPACE;
958 auio.uio_rw = UIO_READ;
959 auio.uio_td = td;
960 error = VOP_READ(vp, &auio, 0, td->td_ucred);
961 if (error != 0)
962 break;
963 if (auio.uio_resid > 0) {
964 bzero(buf + cur - auio.uio_resid,
965 auio.uio_resid);
966 }
967 } else {
968 bzero(buf, cur);
969 }
970
971 aiov.iov_base = buf;
972 aiov.iov_len = cur;
973 auio.uio_iov = &aiov;
974 auio.uio_iovcnt = 1;
975 auio.uio_offset = offset;
976 auio.uio_resid = cur;
977 auio.uio_segflg = UIO_SYSSPACE;
978 auio.uio_rw = UIO_WRITE;
979 auio.uio_td = td;
980
981 error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
982 if (error != 0)
983 break;
984
985 len -= cur;
986 offset += cur;
987 if (len == 0)
988 break;
989 if (should_yield())
990 break;
991 }
992
993 out:
994 *ap->a_len = len;
995 *ap->a_offset = offset;
996 free(buf, M_TEMP);
997 return (error);
998 }
999
1000 int
1001 vop_stdadvise(struct vop_advise_args *ap)
1002 {
1003 struct vnode *vp;
1004 off_t start, end;
1005 int error, vfslocked;
1006
1007 vp = ap->a_vp;
1008 switch (ap->a_advice) {
1009 case POSIX_FADV_WILLNEED:
1010 /*
1011 * Do nothing for now. Filesystems should provide a
1012 * custom method which starts an asynchronous read of
1013 * the requested region.
1014 */
1015 error = 0;
1016 break;
1017 case POSIX_FADV_DONTNEED:
1018 /*
1019 * Flush any open FS buffers and then remove pages
1020 * from the backing VM object. Using vinvalbuf() here
1021 * is a bit heavy-handed as it flushes all buffers for
1022 * the given vnode, not just the buffers covering the
1023 * requested range.
1024 */
1025 error = 0;
1026 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1027 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1028 if (vp->v_iflag & VI_DOOMED) {
1029 VOP_UNLOCK(vp, 0);
1030 VFS_UNLOCK_GIANT(vfslocked);
1031 break;
1032 }
1033 vinvalbuf(vp, V_CLEANONLY, 0, 0);
1034 if (vp->v_object != NULL) {
1035 start = trunc_page(ap->a_start);
1036 end = round_page(ap->a_end);
1037 VM_OBJECT_LOCK(vp->v_object);
1038 vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
1039 OFF_TO_IDX(end));
1040 VM_OBJECT_UNLOCK(vp->v_object);
1041 }
1042 VOP_UNLOCK(vp, 0);
1043 VFS_UNLOCK_GIANT(vfslocked);
1044 break;
1045 default:
1046 error = EINVAL;
1047 break;
1048 }
1049 return (error);
1050 }
1051
1052 int
1053 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1054 {
1055
1056 ap->a_vp->v_socket = ap->a_socket;
1057 return (0);
1058 }
1059
1060 int
1061 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1062 {
1063
1064 *ap->a_socket = ap->a_vp->v_socket;
1065 return (0);
1066 }
1067
1068 int
1069 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1070 {
1071
1072 ap->a_vp->v_socket = NULL;
1073 return (0);
1074 }
1075
1076 /*
1077 * vfs default ops
1078 * used to fill the vfs function table to get reasonable default return values.
1079 */
1080 int
1081 vfs_stdroot (mp, flags, vpp)
1082 struct mount *mp;
1083 int flags;
1084 struct vnode **vpp;
1085 {
1086
1087 return (EOPNOTSUPP);
1088 }
1089
1090 int
1091 vfs_stdstatfs (mp, sbp)
1092 struct mount *mp;
1093 struct statfs *sbp;
1094 {
1095
1096 return (EOPNOTSUPP);
1097 }
1098
1099 int
1100 vfs_stdquotactl (mp, cmds, uid, arg)
1101 struct mount *mp;
1102 int cmds;
1103 uid_t uid;
1104 void *arg;
1105 {
1106
1107 return (EOPNOTSUPP);
1108 }
1109
1110 int
1111 vfs_stdsync(mp, waitfor)
1112 struct mount *mp;
1113 int waitfor;
1114 {
1115 struct vnode *vp, *mvp;
1116 struct thread *td;
1117 int error, lockreq, allerror = 0;
1118
1119 td = curthread;
1120 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1121 if (waitfor != MNT_WAIT)
1122 lockreq |= LK_NOWAIT;
1123 /*
1124 * Force stale buffer cache information to be flushed.
1125 */
1126 loop:
1127 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1128 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1129 VI_UNLOCK(vp);
1130 continue;
1131 }
1132 if ((error = vget(vp, lockreq, td)) != 0) {
1133 if (error == ENOENT) {
1134 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1135 goto loop;
1136 }
1137 continue;
1138 }
1139 error = VOP_FSYNC(vp, waitfor, td);
1140 if (error)
1141 allerror = error;
1142 vput(vp);
1143 }
1144 return (allerror);
1145 }
1146
1147 int
1148 vfs_stdnosync (mp, waitfor)
1149 struct mount *mp;
1150 int waitfor;
1151 {
1152
1153 return (0);
1154 }
1155
1156 int
1157 vfs_stdvget (mp, ino, flags, vpp)
1158 struct mount *mp;
1159 ino_t ino;
1160 int flags;
1161 struct vnode **vpp;
1162 {
1163
1164 return (EOPNOTSUPP);
1165 }
1166
1167 int
1168 vfs_stdfhtovp (mp, fhp, flags, vpp)
1169 struct mount *mp;
1170 struct fid *fhp;
1171 int flags;
1172 struct vnode **vpp;
1173 {
1174
1175 return (EOPNOTSUPP);
1176 }
1177
1178 int
1179 vfs_stdinit (vfsp)
1180 struct vfsconf *vfsp;
1181 {
1182
1183 return (0);
1184 }
1185
1186 int
1187 vfs_stduninit (vfsp)
1188 struct vfsconf *vfsp;
1189 {
1190
1191 return(0);
1192 }
1193
1194 int
1195 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1196 struct mount *mp;
1197 int cmd;
1198 struct vnode *filename_vp;
1199 int attrnamespace;
1200 const char *attrname;
1201 {
1202
1203 if (filename_vp != NULL)
1204 VOP_UNLOCK(filename_vp, 0);
1205 return (EOPNOTSUPP);
1206 }
1207
1208 int
1209 vfs_stdsysctl(mp, op, req)
1210 struct mount *mp;
1211 fsctlop_t op;
1212 struct sysctl_req *req;
1213 {
1214
1215 return (EOPNOTSUPP);
1216 }
1217
1218 /* end of vfs default ops */
Cache object: f213c0193aadf17a26e5951bb45e94fe
|