1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *
39 * $FreeBSD: releng/5.0/sys/kern/vfs_default.c 105884 2002-10-24 17:55:49Z phk $
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bio.h>
45 #include <sys/buf.h>
46 #include <sys/conf.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/mutex.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/poll.h>
55
56 #include <machine/limits.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_extern.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_pager.h>
65 #include <vm/vnode_pager.h>
66
67 static int vop_nolookup(struct vop_lookup_args *);
68 static int vop_nostrategy(struct vop_strategy_args *);
69
70 /*
71 * This vnode table stores what we want to do if the filesystem doesn't
72 * implement a particular VOP.
73 *
74 * If there is no specific entry here, we will return EOPNOTSUPP.
75 *
76 */
77
78 vop_t **default_vnodeop_p;
79 static struct vnodeopv_entry_desc default_vnodeop_entries[] = {
80 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
81 { &vop_advlock_desc, (vop_t *) vop_einval },
82 { &vop_bmap_desc, (vop_t *) vop_stdbmap },
83 { &vop_close_desc, (vop_t *) vop_null },
84 { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
85 { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
86 { &vop_fsync_desc, (vop_t *) vop_null },
87 { &vop_getpages_desc, (vop_t *) vop_stdgetpages },
88 { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject },
89 { &vop_inactive_desc, (vop_t *) vop_stdinactive },
90 { &vop_ioctl_desc, (vop_t *) vop_enotty },
91 { &vop_islocked_desc, (vop_t *) vop_stdislocked },
92 { &vop_lease_desc, (vop_t *) vop_null },
93 { &vop_lock_desc, (vop_t *) vop_stdlock },
94 { &vop_lookup_desc, (vop_t *) vop_nolookup },
95 { &vop_open_desc, (vop_t *) vop_null },
96 { &vop_pathconf_desc, (vop_t *) vop_einval },
97 { &vop_putpages_desc, (vop_t *) vop_stdputpages },
98 { &vop_poll_desc, (vop_t *) vop_nopoll },
99 { &vop_readlink_desc, (vop_t *) vop_einval },
100 { &vop_revoke_desc, (vop_t *) vop_revoke },
101 { &vop_strategy_desc, (vop_t *) vop_nostrategy },
102 { &vop_unlock_desc, (vop_t *) vop_stdunlock },
103 { NULL, NULL }
104 };
105
106 static struct vnodeopv_desc default_vnodeop_opv_desc =
107 { &default_vnodeop_p, default_vnodeop_entries };
108
109 VNODEOP_SET(default_vnodeop_opv_desc);
110
111 /*
112 * Series of placeholder functions for various error returns for
113 * VOPs.
114 */
115
116 int
117 vop_eopnotsupp(struct vop_generic_args *ap)
118 {
119 /*
120 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
121 */
122
123 return (EOPNOTSUPP);
124 }
125
126 int
127 vop_ebadf(struct vop_generic_args *ap)
128 {
129
130 return (EBADF);
131 }
132
133 int
134 vop_enotty(struct vop_generic_args *ap)
135 {
136
137 return (ENOTTY);
138 }
139
140 int
141 vop_einval(struct vop_generic_args *ap)
142 {
143
144 return (EINVAL);
145 }
146
147 int
148 vop_null(struct vop_generic_args *ap)
149 {
150
151 return (0);
152 }
153
154 /*
155 * Used to make a defined VOP fall back to the default VOP.
156 */
157 int
158 vop_defaultop(struct vop_generic_args *ap)
159 {
160
161 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap));
162 }
163
164 /*
165 * Helper function to panic on some bad VOPs in some filesystems.
166 */
167 int
168 vop_panic(struct vop_generic_args *ap)
169 {
170
171 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
172 }
173
174 /*
175 * vop_std<something> and vop_no<something> are default functions for use by
176 * filesystems that need the "default reasonable" implementation for a
177 * particular operation.
178 *
179 * The documentation for the operations they implement exists (if it exists)
180 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
181 */
182
183 /*
184 * Default vop for filesystems that do not support name lookup
185 */
186 static int
187 vop_nolookup(ap)
188 struct vop_lookup_args /* {
189 struct vnode *a_dvp;
190 struct vnode **a_vpp;
191 struct componentname *a_cnp;
192 } */ *ap;
193 {
194
195 *ap->a_vpp = NULL;
196 return (ENOTDIR);
197 }
198
199 /*
200 * vop_nostrategy:
201 *
202 * Strategy routine for VFS devices that have none.
203 *
204 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
205 * routine. Typically this is done for a BIO_READ strategy call.
206 * Typically B_INVAL is assumed to already be clear prior to a write
207 * and should not be cleared manually unless you just made the buffer
208 * invalid. BIO_ERROR should be cleared either way.
209 */
210
211 static int
212 vop_nostrategy (struct vop_strategy_args *ap)
213 {
214 printf("No strategy for buffer at %p\n", ap->a_bp);
215 vprint("", ap->a_vp);
216 vprint("", ap->a_bp->b_vp);
217 ap->a_bp->b_ioflags |= BIO_ERROR;
218 ap->a_bp->b_error = EOPNOTSUPP;
219 bufdone(ap->a_bp);
220 return (EOPNOTSUPP);
221 }
222
223 /*
224 * vop_stdpathconf:
225 *
226 * Standard implementation of POSIX pathconf, to get information about limits
227 * for a filesystem.
228 * Override per filesystem for the case where the filesystem has smaller
229 * limits.
230 */
231 int
232 vop_stdpathconf(ap)
233 struct vop_pathconf_args /* {
234 struct vnode *a_vp;
235 int a_name;
236 int *a_retval;
237 } */ *ap;
238 {
239
240 switch (ap->a_name) {
241 case _PC_LINK_MAX:
242 *ap->a_retval = LINK_MAX;
243 return (0);
244 case _PC_MAX_CANON:
245 *ap->a_retval = MAX_CANON;
246 return (0);
247 case _PC_MAX_INPUT:
248 *ap->a_retval = MAX_INPUT;
249 return (0);
250 case _PC_PIPE_BUF:
251 *ap->a_retval = PIPE_BUF;
252 return (0);
253 case _PC_CHOWN_RESTRICTED:
254 *ap->a_retval = 1;
255 return (0);
256 case _PC_VDISABLE:
257 *ap->a_retval = _POSIX_VDISABLE;
258 return (0);
259 default:
260 return (EINVAL);
261 }
262 /* NOTREACHED */
263 }
264
265 /*
266 * Standard lock, unlock and islocked functions.
267 */
268 int
269 vop_stdlock(ap)
270 struct vop_lock_args /* {
271 struct vnode *a_vp;
272 int a_flags;
273 struct thread *a_td;
274 } */ *ap;
275 {
276 struct vnode *vp = ap->a_vp;
277
278 #ifndef DEBUG_LOCKS
279 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
280 #else
281 return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
282 ap->a_td, "vop_stdlock", vp->filename, vp->line));
283 #endif
284 }
285
286 /* See above. */
287 int
288 vop_stdunlock(ap)
289 struct vop_unlock_args /* {
290 struct vnode *a_vp;
291 int a_flags;
292 struct thread *a_td;
293 } */ *ap;
294 {
295 struct vnode *vp = ap->a_vp;
296
297 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
298 ap->a_td));
299 }
300
301 /* See above. */
302 int
303 vop_stdislocked(ap)
304 struct vop_islocked_args /* {
305 struct vnode *a_vp;
306 struct thread *a_td;
307 } */ *ap;
308 {
309
310 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
311 }
312
313 /* Mark the vnode inactive */
314 int
315 vop_stdinactive(ap)
316 struct vop_inactive_args /* {
317 struct vnode *a_vp;
318 struct thread *a_td;
319 } */ *ap;
320 {
321
322 VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
323 return (0);
324 }
325
326 /*
327 * Return true for select/poll.
328 */
329 int
330 vop_nopoll(ap)
331 struct vop_poll_args /* {
332 struct vnode *a_vp;
333 int a_events;
334 struct ucred *a_cred;
335 struct thread *a_td;
336 } */ *ap;
337 {
338 /*
339 * Return true for read/write. If the user asked for something
340 * special, return POLLNVAL, so that clients have a way of
341 * determining reliably whether or not the extended
342 * functionality is present without hard-coding knowledge
343 * of specific filesystem implementations.
344 */
345 if (ap->a_events & ~POLLSTANDARD)
346 return (POLLNVAL);
347
348 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
349 }
350
351 /*
352 * Implement poll for local filesystems that support it.
353 */
354 int
355 vop_stdpoll(ap)
356 struct vop_poll_args /* {
357 struct vnode *a_vp;
358 int a_events;
359 struct ucred *a_cred;
360 struct thread *a_td;
361 } */ *ap;
362 {
363 if (ap->a_events & ~POLLSTANDARD)
364 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
365 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
366 }
367
368 /*
369 * Stubs to use when there is no locking to be done on the underlying object.
370 * A minimal shared lock is necessary to ensure that the underlying object
371 * is not revoked while an operation is in progress. So, an active shared
372 * count is maintained in an auxillary vnode lock structure.
373 */
374 int
375 vop_sharedlock(ap)
376 struct vop_lock_args /* {
377 struct vnode *a_vp;
378 int a_flags;
379 struct thread *a_td;
380 } */ *ap;
381 {
382 /*
383 * This code cannot be used until all the non-locking filesystems
384 * (notably NFS) are converted to properly lock and release nodes.
385 * Also, certain vnode operations change the locking state within
386 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
387 * and symlink). Ideally these operations should not change the
388 * lock state, but should be changed to let the caller of the
389 * function unlock them. Otherwise all intermediate vnode layers
390 * (such as union, umapfs, etc) must catch these functions to do
391 * the necessary locking at their layer. Note that the inactive
392 * and lookup operations also change their lock state, but this
393 * cannot be avoided, so these two operations will always need
394 * to be handled in intermediate layers.
395 */
396 struct vnode *vp = ap->a_vp;
397 int vnflags, flags = ap->a_flags;
398
399 switch (flags & LK_TYPE_MASK) {
400 case LK_DRAIN:
401 vnflags = LK_DRAIN;
402 break;
403 case LK_EXCLUSIVE:
404 #ifdef DEBUG_VFS_LOCKS
405 /*
406 * Normally, we use shared locks here, but that confuses
407 * the locking assertions.
408 */
409 vnflags = LK_EXCLUSIVE;
410 break;
411 #endif
412 case LK_SHARED:
413 vnflags = LK_SHARED;
414 break;
415 case LK_UPGRADE:
416 case LK_EXCLUPGRADE:
417 case LK_DOWNGRADE:
418 return (0);
419 case LK_RELEASE:
420 default:
421 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
422 }
423 if (flags & LK_INTERLOCK)
424 vnflags |= LK_INTERLOCK;
425 #ifndef DEBUG_LOCKS
426 return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
427 #else
428 return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
429 "vop_sharedlock", vp->filename, vp->line));
430 #endif
431 }
432
433 /*
434 * Stubs to use when there is no locking to be done on the underlying object.
435 * A minimal shared lock is necessary to ensure that the underlying object
436 * is not revoked while an operation is in progress. So, an active shared
437 * count is maintained in an auxillary vnode lock structure.
438 */
439 int
440 vop_nolock(ap)
441 struct vop_lock_args /* {
442 struct vnode *a_vp;
443 int a_flags;
444 struct thread *a_td;
445 } */ *ap;
446 {
447 #ifdef notyet
448 /*
449 * This code cannot be used until all the non-locking filesystems
450 * (notably NFS) are converted to properly lock and release nodes.
451 * Also, certain vnode operations change the locking state within
452 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
453 * and symlink). Ideally these operations should not change the
454 * lock state, but should be changed to let the caller of the
455 * function unlock them. Otherwise all intermediate vnode layers
456 * (such as union, umapfs, etc) must catch these functions to do
457 * the necessary locking at their layer. Note that the inactive
458 * and lookup operations also change their lock state, but this
459 * cannot be avoided, so these two operations will always need
460 * to be handled in intermediate layers.
461 */
462 struct vnode *vp = ap->a_vp;
463 int vnflags, flags = ap->a_flags;
464
465 switch (flags & LK_TYPE_MASK) {
466 case LK_DRAIN:
467 vnflags = LK_DRAIN;
468 break;
469 case LK_EXCLUSIVE:
470 case LK_SHARED:
471 vnflags = LK_SHARED;
472 break;
473 case LK_UPGRADE:
474 case LK_EXCLUPGRADE:
475 case LK_DOWNGRADE:
476 return (0);
477 case LK_RELEASE:
478 default:
479 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
480 }
481 if (flags & LK_INTERLOCK)
482 vnflags |= LK_INTERLOCK;
483 return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
484 #else /* for now */
485 /*
486 * Since we are not using the lock manager, we must clear
487 * the interlock here.
488 */
489 if (ap->a_flags & LK_INTERLOCK)
490 VI_UNLOCK(ap->a_vp);
491 return (0);
492 #endif
493 }
494
495 /*
496 * Do the inverse of vop_nolock, handling the interlock in a compatible way.
497 */
498 int
499 vop_nounlock(ap)
500 struct vop_unlock_args /* {
501 struct vnode *a_vp;
502 int a_flags;
503 struct thread *a_td;
504 } */ *ap;
505 {
506
507 /*
508 * Since we are not using the lock manager, we must clear
509 * the interlock here.
510 */
511 if (ap->a_flags & LK_INTERLOCK)
512 VI_UNLOCK(ap->a_vp);
513 return (0);
514 }
515
516 /*
517 * Return whether or not the node is in use.
518 */
519 int
520 vop_noislocked(ap)
521 struct vop_islocked_args /* {
522 struct vnode *a_vp;
523 struct thread *a_td;
524 } */ *ap;
525 {
526
527 return (0);
528 }
529
530 /*
531 * Return our mount point, as we will take charge of the writes.
532 */
533 int
534 vop_stdgetwritemount(ap)
535 struct vop_getwritemount_args /* {
536 struct vnode *a_vp;
537 struct mount **a_mpp;
538 } */ *ap;
539 {
540
541 *(ap->a_mpp) = ap->a_vp->v_mount;
542 return (0);
543 }
544
545 /* Create the VM system backing object for this vnode */
546 int
547 vop_stdcreatevobject(ap)
548 struct vop_createvobject_args /* {
549 struct vnode *vp;
550 struct ucred *cred;
551 struct thread *td;
552 } */ *ap;
553 {
554 struct vnode *vp = ap->a_vp;
555 struct ucred *cred = ap->a_cred;
556 struct thread *td = ap->a_td;
557 struct vattr vat;
558 vm_object_t object;
559 int error = 0;
560
561 GIANT_REQUIRED;
562
563 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
564 return (0);
565
566 retry:
567 if ((object = vp->v_object) == NULL) {
568 if (vp->v_type == VREG || vp->v_type == VDIR) {
569 if ((error = VOP_GETATTR(vp, &vat, cred, td)) != 0)
570 goto retn;
571 object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
572 } else if (devsw(vp->v_rdev) != NULL) {
573 /*
574 * This simply allocates the biggest object possible
575 * for a disk vnode. This should be fixed, but doesn't
576 * cause any problems (yet).
577 */
578 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
579 } else {
580 goto retn;
581 }
582 /*
583 * Dereference the reference we just created. This assumes
584 * that the object is associated with the vp.
585 */
586 object->ref_count--;
587 vrele(vp);
588 } else {
589 if (object->flags & OBJ_DEAD) {
590 VOP_UNLOCK(vp, 0, td);
591 tsleep(object, PVM, "vodead", 0);
592 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
593 goto retry;
594 }
595 }
596
597 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
598 vp->v_vflag |= VV_OBJBUF;
599
600 retn:
601 return (error);
602 }
603
604 /* Destroy the VM system object associated with this vnode */
605 int
606 vop_stddestroyvobject(ap)
607 struct vop_destroyvobject_args /* {
608 struct vnode *vp;
609 } */ *ap;
610 {
611 struct vnode *vp = ap->a_vp;
612 vm_object_t obj = vp->v_object;
613
614 GIANT_REQUIRED;
615
616 if (vp->v_object == NULL)
617 return (0);
618
619 if (obj->ref_count == 0) {
620 /*
621 * vclean() may be called twice. The first time
622 * removes the primary reference to the object,
623 * the second time goes one further and is a
624 * special-case to terminate the object.
625 *
626 * don't double-terminate the object
627 */
628 if ((obj->flags & OBJ_DEAD) == 0)
629 vm_object_terminate(obj);
630 } else {
631 /*
632 * Woe to the process that tries to page now :-).
633 */
634 vm_pager_deallocate(obj);
635 }
636 return (0);
637 }
638
639 /*
640 * Return the underlying VM object. This routine may be called with or
641 * without the vnode interlock held. If called without, the returned
642 * object is not guarenteed to be valid. The syncer typically gets the
643 * object without holding the interlock in order to quickly test whether
644 * it might be dirty before going heavy-weight. vm_object's use zalloc
645 * and thus stable-storage, so this is safe.
646 */
647 int
648 vop_stdgetvobject(ap)
649 struct vop_getvobject_args /* {
650 struct vnode *vp;
651 struct vm_object **objpp;
652 } */ *ap;
653 {
654 struct vnode *vp = ap->a_vp;
655 struct vm_object **objpp = ap->a_objpp;
656
657 if (objpp)
658 *objpp = vp->v_object;
659 return (vp->v_object ? 0 : EINVAL);
660 }
661
662 /* XXX Needs good comment and VOP_BMAP(9) manpage */
663 int
664 vop_stdbmap(ap)
665 struct vop_bmap_args /* {
666 struct vnode *a_vp;
667 daddr_t a_bn;
668 struct vnode **a_vpp;
669 daddr_t *a_bnp;
670 int *a_runp;
671 int *a_runb;
672 } */ *ap;
673 {
674
675 if (ap->a_vpp != NULL)
676 *ap->a_vpp = ap->a_vp;
677 if (ap->a_bnp != NULL)
678 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
679 if (ap->a_runp != NULL)
680 *ap->a_runp = 0;
681 if (ap->a_runb != NULL)
682 *ap->a_runb = 0;
683 return (0);
684 }
685
686 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
687 int
688 vop_stdgetpages(ap)
689 struct vop_getpages_args /* {
690 struct vnode *a_vp;
691 vm_page_t *a_m;
692 int a_count;
693 int a_reqpage;
694 vm_ooffset_t a_offset;
695 } */ *ap;
696 {
697
698 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
699 ap->a_count, ap->a_reqpage);
700 }
701
702 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
703 int
704 vop_stdputpages(ap)
705 struct vop_putpages_args /* {
706 struct vnode *a_vp;
707 vm_page_t *a_m;
708 int a_count;
709 int a_sync;
710 int *a_rtvals;
711 vm_ooffset_t a_offset;
712 } */ *ap;
713 {
714
715 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
716 ap->a_sync, ap->a_rtvals);
717 }
718
719
720
721 /*
722 * vfs default ops
723 * used to fill the vfs function table to get reasonable default return values.
724 */
725 int
726 vfs_stdroot (mp, vpp)
727 struct mount *mp;
728 struct vnode **vpp;
729 {
730 return (EOPNOTSUPP);
731 }
732
733 int
734 vfs_stdstatfs (mp, sbp, td)
735 struct mount *mp;
736 struct statfs *sbp;
737 struct thread *td;
738 {
739 return (EOPNOTSUPP);
740 }
741
742 int
743 vfs_stdvptofh (vp, fhp)
744 struct vnode *vp;
745 struct fid *fhp;
746 {
747 return (EOPNOTSUPP);
748 }
749
750 int
751 vfs_stdstart (mp, flags, td)
752 struct mount *mp;
753 int flags;
754 struct thread *td;
755 {
756 return (0);
757 }
758
759 int
760 vfs_stdquotactl (mp, cmds, uid, arg, td)
761 struct mount *mp;
762 int cmds;
763 uid_t uid;
764 caddr_t arg;
765 struct thread *td;
766 {
767 return (EOPNOTSUPP);
768 }
769
770 int
771 vfs_stdsync (mp, waitfor, cred, td)
772 struct mount *mp;
773 int waitfor;
774 struct ucred *cred;
775 struct thread *td;
776 {
777 return (0);
778 }
779
780 int
781 vfs_stdvget (mp, ino, flags, vpp)
782 struct mount *mp;
783 ino_t ino;
784 int flags;
785 struct vnode **vpp;
786 {
787 return (EOPNOTSUPP);
788 }
789
790 int
791 vfs_stdfhtovp (mp, fhp, vpp)
792 struct mount *mp;
793 struct fid *fhp;
794 struct vnode **vpp;
795 {
796 return (EOPNOTSUPP);
797 }
798
799 int
800 vfs_stdinit (vfsp)
801 struct vfsconf *vfsp;
802 {
803 return (0);
804 }
805
806 int
807 vfs_stduninit (vfsp)
808 struct vfsconf *vfsp;
809 {
810 return(0);
811 }
812
813 int
814 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
815 struct mount *mp;
816 int cmd;
817 struct vnode *filename_vp;
818 int attrnamespace;
819 const char *attrname;
820 struct thread *td;
821 {
822 if (filename_vp != NULL)
823 VOP_UNLOCK(filename_vp, 0, td);
824 return(EOPNOTSUPP);
825 }
826
827 /* end of vfs default ops */
Cache object: aeaf75ed6d1613aa9483b9bd1b408ecd
|