1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/6.3/sys/kern/vfs_default.c 173886 2007-11-24 19:45:58Z cvs2svn $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/poll.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_extern.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vnode_pager.h>
62
63 static int vop_nolookup(struct vop_lookup_args *);
64 static int vop_nostrategy(struct vop_strategy_args *);
65
66 /*
67 * This vnode table stores what we want to do if the filesystem doesn't
68 * implement a particular VOP.
69 *
70 * If there is no specific entry here, we will return EOPNOTSUPP.
71 *
72 */
73
74 struct vop_vector default_vnodeops = {
75 .vop_default = NULL,
76 .vop_bypass = VOP_EOPNOTSUPP,
77
78 .vop_advlock = VOP_EINVAL,
79 .vop_bmap = vop_stdbmap,
80 .vop_close = VOP_NULL,
81 .vop_fsync = VOP_NULL,
82 .vop_getpages = vop_stdgetpages,
83 .vop_getwritemount = vop_stdgetwritemount,
84 .vop_inactive = VOP_NULL,
85 .vop_ioctl = VOP_ENOTTY,
86 .vop_kqfilter = vop_stdkqfilter,
87 .vop_islocked = vop_stdislocked,
88 .vop_lease = VOP_NULL,
89 .vop_lock = vop_stdlock,
90 .vop_lookup = vop_nolookup,
91 .vop_open = VOP_NULL,
92 .vop_pathconf = VOP_EINVAL,
93 .vop_poll = vop_nopoll,
94 .vop_putpages = vop_stdputpages,
95 .vop_readlink = VOP_EINVAL,
96 .vop_revoke = VOP_PANIC,
97 .vop_strategy = vop_nostrategy,
98 .vop_unlock = vop_stdunlock,
99 };
100
101 /*
102 * Series of placeholder functions for various error returns for
103 * VOPs.
104 */
105
106 int
107 vop_eopnotsupp(struct vop_generic_args *ap)
108 {
109 /*
110 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
111 */
112
113 return (EOPNOTSUPP);
114 }
115
116 int
117 vop_ebadf(struct vop_generic_args *ap)
118 {
119
120 return (EBADF);
121 }
122
123 int
124 vop_enotty(struct vop_generic_args *ap)
125 {
126
127 return (ENOTTY);
128 }
129
130 int
131 vop_einval(struct vop_generic_args *ap)
132 {
133
134 return (EINVAL);
135 }
136
137 int
138 vop_null(struct vop_generic_args *ap)
139 {
140
141 return (0);
142 }
143
144 /*
145 * Helper function to panic on some bad VOPs in some filesystems.
146 */
147 int
148 vop_panic(struct vop_generic_args *ap)
149 {
150
151 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
152 }
153
154 /*
155 * vop_std<something> and vop_no<something> are default functions for use by
156 * filesystems that need the "default reasonable" implementation for a
157 * particular operation.
158 *
159 * The documentation for the operations they implement exists (if it exists)
160 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
161 */
162
163 /*
164 * Default vop for filesystems that do not support name lookup
165 */
166 static int
167 vop_nolookup(ap)
168 struct vop_lookup_args /* {
169 struct vnode *a_dvp;
170 struct vnode **a_vpp;
171 struct componentname *a_cnp;
172 } */ *ap;
173 {
174
175 *ap->a_vpp = NULL;
176 return (ENOTDIR);
177 }
178
179 /*
180 * vop_nostrategy:
181 *
182 * Strategy routine for VFS devices that have none.
183 *
184 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
185 * routine. Typically this is done for a BIO_READ strategy call.
186 * Typically B_INVAL is assumed to already be clear prior to a write
187 * and should not be cleared manually unless you just made the buffer
188 * invalid. BIO_ERROR should be cleared either way.
189 */
190
191 static int
192 vop_nostrategy (struct vop_strategy_args *ap)
193 {
194 printf("No strategy for buffer at %p\n", ap->a_bp);
195 vprint("vnode", ap->a_vp);
196 ap->a_bp->b_ioflags |= BIO_ERROR;
197 ap->a_bp->b_error = EOPNOTSUPP;
198 bufdone(ap->a_bp);
199 return (EOPNOTSUPP);
200 }
201
202 /*
203 * vop_stdpathconf:
204 *
205 * Standard implementation of POSIX pathconf, to get information about limits
206 * for a filesystem.
207 * Override per filesystem for the case where the filesystem has smaller
208 * limits.
209 */
210 int
211 vop_stdpathconf(ap)
212 struct vop_pathconf_args /* {
213 struct vnode *a_vp;
214 int a_name;
215 int *a_retval;
216 } */ *ap;
217 {
218
219 switch (ap->a_name) {
220 case _PC_LINK_MAX:
221 *ap->a_retval = LINK_MAX;
222 return (0);
223 case _PC_MAX_CANON:
224 *ap->a_retval = MAX_CANON;
225 return (0);
226 case _PC_MAX_INPUT:
227 *ap->a_retval = MAX_INPUT;
228 return (0);
229 case _PC_PIPE_BUF:
230 *ap->a_retval = PIPE_BUF;
231 return (0);
232 case _PC_CHOWN_RESTRICTED:
233 *ap->a_retval = 1;
234 return (0);
235 case _PC_VDISABLE:
236 *ap->a_retval = _POSIX_VDISABLE;
237 return (0);
238 default:
239 return (EINVAL);
240 }
241 /* NOTREACHED */
242 }
243
244 /*
245 * Standard lock, unlock and islocked functions.
246 */
247 int
248 vop_stdlock(ap)
249 struct vop_lock_args /* {
250 struct vnode *a_vp;
251 int a_flags;
252 struct thread *a_td;
253 } */ *ap;
254 {
255 struct vnode *vp = ap->a_vp;
256
257 return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
258 }
259
260 /* See above. */
261 int
262 vop_stdunlock(ap)
263 struct vop_unlock_args /* {
264 struct vnode *a_vp;
265 int a_flags;
266 struct thread *a_td;
267 } */ *ap;
268 {
269 struct vnode *vp = ap->a_vp;
270
271 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
272 ap->a_td));
273 }
274
275 /* See above. */
276 int
277 vop_stdislocked(ap)
278 struct vop_islocked_args /* {
279 struct vnode *a_vp;
280 struct thread *a_td;
281 } */ *ap;
282 {
283
284 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
285 }
286
287 /*
288 * Return true for select/poll.
289 */
290 int
291 vop_nopoll(ap)
292 struct vop_poll_args /* {
293 struct vnode *a_vp;
294 int a_events;
295 struct ucred *a_cred;
296 struct thread *a_td;
297 } */ *ap;
298 {
299 /*
300 * Return true for read/write. If the user asked for something
301 * special, return POLLNVAL, so that clients have a way of
302 * determining reliably whether or not the extended
303 * functionality is present without hard-coding knowledge
304 * of specific filesystem implementations.
305 * Stay in sync with kern_conf.c::no_poll().
306 */
307 if (ap->a_events & ~POLLSTANDARD)
308 return (POLLNVAL);
309
310 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
311 }
312
313 /*
314 * Implement poll for local filesystems that support it.
315 */
316 int
317 vop_stdpoll(ap)
318 struct vop_poll_args /* {
319 struct vnode *a_vp;
320 int a_events;
321 struct ucred *a_cred;
322 struct thread *a_td;
323 } */ *ap;
324 {
325 if (ap->a_events & ~POLLSTANDARD)
326 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
327 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
328 }
329
330 /*
331 * Return our mount point, as we will take charge of the writes.
332 */
333 int
334 vop_stdgetwritemount(ap)
335 struct vop_getwritemount_args /* {
336 struct vnode *a_vp;
337 struct mount **a_mpp;
338 } */ *ap;
339 {
340 struct mount *mp;
341
342 /*
343 * XXX Since this is called unlocked we may be recycled while
344 * attempting to ref the mount. If this is the case or mountpoint
345 * will be set to NULL. We only have to prevent this call from
346 * returning with a ref to an incorrect mountpoint. It is not
347 * harmful to return with a ref to our previous mountpoint.
348 */
349 mp = ap->a_vp->v_mount;
350 if (mp != NULL) {
351 vfs_ref(mp);
352 if (mp != ap->a_vp->v_mount) {
353 vfs_rel(mp);
354 mp = NULL;
355 }
356 }
357 *(ap->a_mpp) = mp;
358 return (0);
359 }
360
361 /* XXX Needs good comment and VOP_BMAP(9) manpage */
362 int
363 vop_stdbmap(ap)
364 struct vop_bmap_args /* {
365 struct vnode *a_vp;
366 daddr_t a_bn;
367 struct bufobj **a_bop;
368 daddr_t *a_bnp;
369 int *a_runp;
370 int *a_runb;
371 } */ *ap;
372 {
373
374 if (ap->a_bop != NULL)
375 *ap->a_bop = &ap->a_vp->v_bufobj;
376 if (ap->a_bnp != NULL)
377 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
378 if (ap->a_runp != NULL)
379 *ap->a_runp = 0;
380 if (ap->a_runb != NULL)
381 *ap->a_runb = 0;
382 return (0);
383 }
384
385 int
386 vop_stdfsync(ap)
387 struct vop_fsync_args /* {
388 struct vnode *a_vp;
389 struct ucred *a_cred;
390 int a_waitfor;
391 struct thread *a_td;
392 } */ *ap;
393 {
394 struct vnode *vp = ap->a_vp;
395 struct buf *bp;
396 struct bufobj *bo;
397 struct buf *nbp;
398 int error = 0;
399 int maxretry = 1000; /* large, arbitrarily chosen */
400
401 VI_LOCK(vp);
402 loop1:
403 /*
404 * MARK/SCAN initialization to avoid infinite loops.
405 */
406 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
407 bp->b_vflags &= ~BV_SCANNED;
408 bp->b_error = 0;
409 }
410
411 /*
412 * Flush all dirty buffers associated with a vnode.
413 */
414 loop2:
415 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
416 if ((bp->b_vflags & BV_SCANNED) != 0)
417 continue;
418 bp->b_vflags |= BV_SCANNED;
419 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
420 continue;
421 VI_UNLOCK(vp);
422 KASSERT(bp->b_bufobj == &vp->v_bufobj,
423 ("bp %p wrong b_bufobj %p should be %p",
424 bp, bp->b_bufobj, &vp->v_bufobj));
425 if ((bp->b_flags & B_DELWRI) == 0)
426 panic("fsync: not dirty");
427 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
428 vfs_bio_awrite(bp);
429 } else {
430 bremfree(bp);
431 bawrite(bp);
432 }
433 VI_LOCK(vp);
434 goto loop2;
435 }
436
437 /*
438 * If synchronous the caller expects us to completely resolve all
439 * dirty buffers in the system. Wait for in-progress I/O to
440 * complete (which could include background bitmap writes), then
441 * retry if dirty blocks still exist.
442 */
443 if (ap->a_waitfor == MNT_WAIT) {
444 bo = &vp->v_bufobj;
445 bufobj_wwait(bo, 0, 0);
446 if (bo->bo_dirty.bv_cnt > 0) {
447 /*
448 * If we are unable to write any of these buffers
449 * then we fail now rather than trying endlessly
450 * to write them out.
451 */
452 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
453 if ((error = bp->b_error) == 0)
454 continue;
455 if (error == 0 && --maxretry >= 0)
456 goto loop1;
457 error = EAGAIN;
458 }
459 }
460 VI_UNLOCK(vp);
461 if (error == EAGAIN)
462 vprint("fsync: giving up on dirty", vp);
463
464 return (error);
465 }
466
467 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
468 int
469 vop_stdgetpages(ap)
470 struct vop_getpages_args /* {
471 struct vnode *a_vp;
472 vm_page_t *a_m;
473 int a_count;
474 int a_reqpage;
475 vm_ooffset_t a_offset;
476 } */ *ap;
477 {
478
479 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
480 ap->a_count, ap->a_reqpage);
481 }
482
483 int
484 vop_stdkqfilter(struct vop_kqfilter_args *ap)
485 {
486 return vfs_kqfilter(ap);
487 }
488
489 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
490 int
491 vop_stdputpages(ap)
492 struct vop_putpages_args /* {
493 struct vnode *a_vp;
494 vm_page_t *a_m;
495 int a_count;
496 int a_sync;
497 int *a_rtvals;
498 vm_ooffset_t a_offset;
499 } */ *ap;
500 {
501
502 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
503 ap->a_sync, ap->a_rtvals);
504 }
505
506 /*
507 * vfs default ops
508 * used to fill the vfs function table to get reasonable default return values.
509 */
510 int
511 vfs_stdroot (mp, flags, vpp, td)
512 struct mount *mp;
513 int flags;
514 struct vnode **vpp;
515 struct thread *td;
516 {
517
518 return (EOPNOTSUPP);
519 }
520
521 int
522 vfs_stdstatfs (mp, sbp, td)
523 struct mount *mp;
524 struct statfs *sbp;
525 struct thread *td;
526 {
527
528 return (EOPNOTSUPP);
529 }
530
531 int
532 vfs_stdvptofh (vp, fhp)
533 struct vnode *vp;
534 struct fid *fhp;
535 {
536
537 return (EOPNOTSUPP);
538 }
539
540 int
541 vfs_stdquotactl (mp, cmds, uid, arg, td)
542 struct mount *mp;
543 int cmds;
544 uid_t uid;
545 caddr_t arg;
546 struct thread *td;
547 {
548
549 return (EOPNOTSUPP);
550 }
551
552 int
553 vfs_stdsync(mp, waitfor, td)
554 struct mount *mp;
555 int waitfor;
556 struct thread *td;
557 {
558 struct vnode *vp, *mvp;
559 int error, lockreq, allerror = 0;
560
561 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
562 if (waitfor != MNT_WAIT)
563 lockreq |= LK_NOWAIT;
564 /*
565 * Force stale buffer cache information to be flushed.
566 */
567 MNT_ILOCK(mp);
568 loop:
569 MNT_VNODE_FOREACH(vp, mp, mvp) {
570
571 VI_LOCK(vp);
572 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
573 VI_UNLOCK(vp);
574 continue;
575 }
576 MNT_IUNLOCK(mp);
577
578 if ((error = vget(vp, lockreq, td)) != 0) {
579 MNT_ILOCK(mp);
580 if (error == ENOENT) {
581 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
582 goto loop;
583 }
584 continue;
585 }
586 error = VOP_FSYNC(vp, waitfor, td);
587 if (error)
588 allerror = error;
589
590 VOP_UNLOCK(vp, 0, td);
591 vrele(vp);
592 MNT_ILOCK(mp);
593 }
594 MNT_IUNLOCK(mp);
595 return (allerror);
596 }
597
598 int
599 vfs_stdnosync (mp, waitfor, td)
600 struct mount *mp;
601 int waitfor;
602 struct thread *td;
603 {
604
605 return (0);
606 }
607
608 int
609 vfs_stdvget (mp, ino, flags, vpp)
610 struct mount *mp;
611 ino_t ino;
612 int flags;
613 struct vnode **vpp;
614 {
615
616 return (EOPNOTSUPP);
617 }
618
619 int
620 vfs_stdfhtovp (mp, fhp, vpp)
621 struct mount *mp;
622 struct fid *fhp;
623 struct vnode **vpp;
624 {
625
626 return (EOPNOTSUPP);
627 }
628
629 int
630 vfs_stdinit (vfsp)
631 struct vfsconf *vfsp;
632 {
633
634 return (0);
635 }
636
637 int
638 vfs_stduninit (vfsp)
639 struct vfsconf *vfsp;
640 {
641
642 return(0);
643 }
644
645 int
646 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
647 struct mount *mp;
648 int cmd;
649 struct vnode *filename_vp;
650 int attrnamespace;
651 const char *attrname;
652 struct thread *td;
653 {
654
655 if (filename_vp != NULL)
656 VOP_UNLOCK(filename_vp, 0, td);
657 return (EOPNOTSUPP);
658 }
659
660 int
661 vfs_stdsysctl(mp, op, req)
662 struct mount *mp;
663 fsctlop_t op;
664 struct sysctl_req *req;
665 {
666
667 return (EOPNOTSUPP);
668 }
669
670 /* end of vfs default ops */
Cache object: e117ca0481a153504542ae433d7f01ca
|