1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/9.2/sys/kern/vfs_default.c 248029 2013-03-08 08:09:26Z kib $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
56 #include <sys/poll.h>
57
58 #include <security/mac/mac_framework.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
68
69 static int vop_nolookup(struct vop_lookup_args *);
70 static int vop_norename(struct vop_rename_args *);
71 static int vop_nostrategy(struct vop_strategy_args *);
72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
73 char *dirbuf, int dirbuflen, off_t *off,
74 char **cpos, int *len, int *eofflag,
75 struct thread *td);
76 static int dirent_exists(struct vnode *vp, const char *dirname,
77 struct thread *td);
78
79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
80
81 static int vop_stdis_text(struct vop_is_text_args *ap);
82 static int vop_stdset_text(struct vop_set_text_args *ap);
83 static int vop_stdunset_text(struct vop_unset_text_args *ap);
84 static int vop_stdget_writecount(struct vop_get_writecount_args *ap);
85 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
86
87 /*
88 * This vnode table stores what we want to do if the filesystem doesn't
89 * implement a particular VOP.
90 *
91 * If there is no specific entry here, we will return EOPNOTSUPP.
92 *
93 * Note that every filesystem has to implement either vop_access
94 * or vop_accessx; failing to do so will result in immediate crash
95 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
96 * which calls vop_stdaccess() etc.
97 */
98
99 struct vop_vector default_vnodeops = {
100 .vop_default = NULL,
101 .vop_bypass = VOP_EOPNOTSUPP,
102
103 .vop_access = vop_stdaccess,
104 .vop_accessx = vop_stdaccessx,
105 .vop_advise = vop_stdadvise,
106 .vop_advlock = vop_stdadvlock,
107 .vop_advlockasync = vop_stdadvlockasync,
108 .vop_advlockpurge = vop_stdadvlockpurge,
109 .vop_allocate = vop_stdallocate,
110 .vop_bmap = vop_stdbmap,
111 .vop_close = VOP_NULL,
112 .vop_fsync = VOP_NULL,
113 .vop_getpages = vop_stdgetpages,
114 .vop_getwritemount = vop_stdgetwritemount,
115 .vop_inactive = VOP_NULL,
116 .vop_ioctl = VOP_ENOTTY,
117 .vop_kqfilter = vop_stdkqfilter,
118 .vop_islocked = vop_stdislocked,
119 .vop_lock1 = vop_stdlock,
120 .vop_lookup = vop_nolookup,
121 .vop_open = VOP_NULL,
122 .vop_pathconf = VOP_EINVAL,
123 .vop_poll = vop_nopoll,
124 .vop_putpages = vop_stdputpages,
125 .vop_readlink = VOP_EINVAL,
126 .vop_rename = vop_norename,
127 .vop_revoke = VOP_PANIC,
128 .vop_strategy = vop_nostrategy,
129 .vop_unlock = vop_stdunlock,
130 .vop_vptocnp = vop_stdvptocnp,
131 .vop_vptofh = vop_stdvptofh,
132 .vop_unp_bind = vop_stdunp_bind,
133 .vop_unp_connect = vop_stdunp_connect,
134 .vop_unp_detach = vop_stdunp_detach,
135 .vop_is_text = vop_stdis_text,
136 .vop_set_text = vop_stdset_text,
137 .vop_unset_text = vop_stdunset_text,
138 .vop_get_writecount = vop_stdget_writecount,
139 .vop_add_writecount = vop_stdadd_writecount,
140 };
141
142 /*
143 * Series of placeholder functions for various error returns for
144 * VOPs.
145 */
146
147 int
148 vop_eopnotsupp(struct vop_generic_args *ap)
149 {
150 /*
151 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
152 */
153
154 return (EOPNOTSUPP);
155 }
156
157 int
158 vop_ebadf(struct vop_generic_args *ap)
159 {
160
161 return (EBADF);
162 }
163
164 int
165 vop_enotty(struct vop_generic_args *ap)
166 {
167
168 return (ENOTTY);
169 }
170
171 int
172 vop_einval(struct vop_generic_args *ap)
173 {
174
175 return (EINVAL);
176 }
177
178 int
179 vop_enoent(struct vop_generic_args *ap)
180 {
181
182 return (ENOENT);
183 }
184
185 int
186 vop_null(struct vop_generic_args *ap)
187 {
188
189 return (0);
190 }
191
192 /*
193 * Helper function to panic on some bad VOPs in some filesystems.
194 */
195 int
196 vop_panic(struct vop_generic_args *ap)
197 {
198
199 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
200 }
201
202 /*
203 * vop_std<something> and vop_no<something> are default functions for use by
204 * filesystems that need the "default reasonable" implementation for a
205 * particular operation.
206 *
207 * The documentation for the operations they implement exists (if it exists)
208 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
209 */
210
211 /*
212 * Default vop for filesystems that do not support name lookup
213 */
214 static int
215 vop_nolookup(ap)
216 struct vop_lookup_args /* {
217 struct vnode *a_dvp;
218 struct vnode **a_vpp;
219 struct componentname *a_cnp;
220 } */ *ap;
221 {
222
223 *ap->a_vpp = NULL;
224 return (ENOTDIR);
225 }
226
227 /*
228 * vop_norename:
229 *
230 * Handle unlock and reference counting for arguments of vop_rename
231 * for filesystems that do not implement rename operation.
232 */
233 static int
234 vop_norename(struct vop_rename_args *ap)
235 {
236
237 vop_rename_fail(ap);
238 return (EOPNOTSUPP);
239 }
240
241 /*
242 * vop_nostrategy:
243 *
244 * Strategy routine for VFS devices that have none.
245 *
246 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
247 * routine. Typically this is done for a BIO_READ strategy call.
248 * Typically B_INVAL is assumed to already be clear prior to a write
249 * and should not be cleared manually unless you just made the buffer
250 * invalid. BIO_ERROR should be cleared either way.
251 */
252
253 static int
254 vop_nostrategy (struct vop_strategy_args *ap)
255 {
256 printf("No strategy for buffer at %p\n", ap->a_bp);
257 vprint("vnode", ap->a_vp);
258 ap->a_bp->b_ioflags |= BIO_ERROR;
259 ap->a_bp->b_error = EOPNOTSUPP;
260 bufdone(ap->a_bp);
261 return (EOPNOTSUPP);
262 }
263
264 static int
265 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
266 int dirbuflen, off_t *off, char **cpos, int *len,
267 int *eofflag, struct thread *td)
268 {
269 int error, reclen;
270 struct uio uio;
271 struct iovec iov;
272 struct dirent *dp;
273
274 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
275 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
276
277 if (*len == 0) {
278 iov.iov_base = dirbuf;
279 iov.iov_len = dirbuflen;
280
281 uio.uio_iov = &iov;
282 uio.uio_iovcnt = 1;
283 uio.uio_offset = *off;
284 uio.uio_resid = dirbuflen;
285 uio.uio_segflg = UIO_SYSSPACE;
286 uio.uio_rw = UIO_READ;
287 uio.uio_td = td;
288
289 *eofflag = 0;
290
291 #ifdef MAC
292 error = mac_vnode_check_readdir(td->td_ucred, vp);
293 if (error == 0)
294 #endif
295 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
296 NULL, NULL);
297 if (error)
298 return (error);
299
300 *off = uio.uio_offset;
301
302 *cpos = dirbuf;
303 *len = (dirbuflen - uio.uio_resid);
304
305 if (*len == 0)
306 return (ENOENT);
307 }
308
309 dp = (struct dirent *)(*cpos);
310 reclen = dp->d_reclen;
311 *dpp = dp;
312
313 /* check for malformed directory.. */
314 if (reclen < DIRENT_MINSIZE)
315 return (EINVAL);
316
317 *cpos += reclen;
318 *len -= reclen;
319
320 return (0);
321 }
322
323 /*
324 * Check if a named file exists in a given directory vnode.
325 */
326 static int
327 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
328 {
329 char *dirbuf, *cpos;
330 int error, eofflag, dirbuflen, len, found;
331 off_t off;
332 struct dirent *dp;
333 struct vattr va;
334
335 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
336 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
337
338 found = 0;
339
340 error = VOP_GETATTR(vp, &va, td->td_ucred);
341 if (error)
342 return (found);
343
344 dirbuflen = DEV_BSIZE;
345 if (dirbuflen < va.va_blocksize)
346 dirbuflen = va.va_blocksize;
347 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
348
349 off = 0;
350 len = 0;
351 do {
352 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
353 &cpos, &len, &eofflag, td);
354 if (error)
355 goto out;
356
357 if ((dp->d_type != DT_WHT) &&
358 !strcmp(dp->d_name, dirname)) {
359 found = 1;
360 goto out;
361 }
362 } while (len > 0 || !eofflag);
363
364 out:
365 free(dirbuf, M_TEMP);
366 return (found);
367 }
368
369 int
370 vop_stdaccess(struct vop_access_args *ap)
371 {
372
373 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
374 VAPPEND)) == 0, ("invalid bit in accmode"));
375
376 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
377 }
378
379 int
380 vop_stdaccessx(struct vop_accessx_args *ap)
381 {
382 int error;
383 accmode_t accmode = ap->a_accmode;
384
385 error = vfs_unixify_accmode(&accmode);
386 if (error != 0)
387 return (error);
388
389 if (accmode == 0)
390 return (0);
391
392 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
393 }
394
395 /*
396 * Advisory record locking support
397 */
398 int
399 vop_stdadvlock(struct vop_advlock_args *ap)
400 {
401 struct vnode *vp;
402 struct ucred *cred;
403 struct vattr vattr;
404 int error;
405
406 vp = ap->a_vp;
407 cred = curthread->td_ucred;
408 vn_lock(vp, LK_SHARED | LK_RETRY);
409 error = VOP_GETATTR(vp, &vattr, cred);
410 VOP_UNLOCK(vp, 0);
411 if (error)
412 return (error);
413
414 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
415 }
416
417 int
418 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
419 {
420 struct vnode *vp;
421 struct ucred *cred;
422 struct vattr vattr;
423 int error;
424
425 vp = ap->a_vp;
426 cred = curthread->td_ucred;
427 vn_lock(vp, LK_SHARED | LK_RETRY);
428 error = VOP_GETATTR(vp, &vattr, cred);
429 VOP_UNLOCK(vp, 0);
430 if (error)
431 return (error);
432
433 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
434 }
435
436 int
437 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
438 {
439 struct vnode *vp;
440
441 vp = ap->a_vp;
442 lf_purgelocks(vp, &vp->v_lockf);
443 return (0);
444 }
445
446 /*
447 * vop_stdpathconf:
448 *
449 * Standard implementation of POSIX pathconf, to get information about limits
450 * for a filesystem.
451 * Override per filesystem for the case where the filesystem has smaller
452 * limits.
453 */
454 int
455 vop_stdpathconf(ap)
456 struct vop_pathconf_args /* {
457 struct vnode *a_vp;
458 int a_name;
459 int *a_retval;
460 } */ *ap;
461 {
462
463 switch (ap->a_name) {
464 case _PC_NAME_MAX:
465 *ap->a_retval = NAME_MAX;
466 return (0);
467 case _PC_PATH_MAX:
468 *ap->a_retval = PATH_MAX;
469 return (0);
470 case _PC_LINK_MAX:
471 *ap->a_retval = LINK_MAX;
472 return (0);
473 case _PC_MAX_CANON:
474 *ap->a_retval = MAX_CANON;
475 return (0);
476 case _PC_MAX_INPUT:
477 *ap->a_retval = MAX_INPUT;
478 return (0);
479 case _PC_PIPE_BUF:
480 *ap->a_retval = PIPE_BUF;
481 return (0);
482 case _PC_CHOWN_RESTRICTED:
483 *ap->a_retval = 1;
484 return (0);
485 case _PC_VDISABLE:
486 *ap->a_retval = _POSIX_VDISABLE;
487 return (0);
488 default:
489 return (EINVAL);
490 }
491 /* NOTREACHED */
492 }
493
494 /*
495 * Standard lock, unlock and islocked functions.
496 */
497 int
498 vop_stdlock(ap)
499 struct vop_lock1_args /* {
500 struct vnode *a_vp;
501 int a_flags;
502 char *file;
503 int line;
504 } */ *ap;
505 {
506 struct vnode *vp = ap->a_vp;
507
508 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
509 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
510 ap->a_line));
511 }
512
513 /* See above. */
514 int
515 vop_stdunlock(ap)
516 struct vop_unlock_args /* {
517 struct vnode *a_vp;
518 int a_flags;
519 } */ *ap;
520 {
521 struct vnode *vp = ap->a_vp;
522
523 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
524 }
525
526 /* See above. */
527 int
528 vop_stdislocked(ap)
529 struct vop_islocked_args /* {
530 struct vnode *a_vp;
531 } */ *ap;
532 {
533
534 return (lockstatus(ap->a_vp->v_vnlock));
535 }
536
537 /*
538 * Return true for select/poll.
539 */
540 int
541 vop_nopoll(ap)
542 struct vop_poll_args /* {
543 struct vnode *a_vp;
544 int a_events;
545 struct ucred *a_cred;
546 struct thread *a_td;
547 } */ *ap;
548 {
549
550 return (poll_no_poll(ap->a_events));
551 }
552
553 /*
554 * Implement poll for local filesystems that support it.
555 */
556 int
557 vop_stdpoll(ap)
558 struct vop_poll_args /* {
559 struct vnode *a_vp;
560 int a_events;
561 struct ucred *a_cred;
562 struct thread *a_td;
563 } */ *ap;
564 {
565 if (ap->a_events & ~POLLSTANDARD)
566 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
567 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
568 }
569
570 /*
571 * Return our mount point, as we will take charge of the writes.
572 */
573 int
574 vop_stdgetwritemount(ap)
575 struct vop_getwritemount_args /* {
576 struct vnode *a_vp;
577 struct mount **a_mpp;
578 } */ *ap;
579 {
580 struct mount *mp;
581
582 /*
583 * XXX Since this is called unlocked we may be recycled while
584 * attempting to ref the mount. If this is the case or mountpoint
585 * will be set to NULL. We only have to prevent this call from
586 * returning with a ref to an incorrect mountpoint. It is not
587 * harmful to return with a ref to our previous mountpoint.
588 */
589 mp = ap->a_vp->v_mount;
590 if (mp != NULL) {
591 vfs_ref(mp);
592 if (mp != ap->a_vp->v_mount) {
593 vfs_rel(mp);
594 mp = NULL;
595 }
596 }
597 *(ap->a_mpp) = mp;
598 return (0);
599 }
600
601 /* XXX Needs good comment and VOP_BMAP(9) manpage */
602 int
603 vop_stdbmap(ap)
604 struct vop_bmap_args /* {
605 struct vnode *a_vp;
606 daddr_t a_bn;
607 struct bufobj **a_bop;
608 daddr_t *a_bnp;
609 int *a_runp;
610 int *a_runb;
611 } */ *ap;
612 {
613
614 if (ap->a_bop != NULL)
615 *ap->a_bop = &ap->a_vp->v_bufobj;
616 if (ap->a_bnp != NULL)
617 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
618 if (ap->a_runp != NULL)
619 *ap->a_runp = 0;
620 if (ap->a_runb != NULL)
621 *ap->a_runb = 0;
622 return (0);
623 }
624
625 int
626 vop_stdfsync(ap)
627 struct vop_fsync_args /* {
628 struct vnode *a_vp;
629 struct ucred *a_cred;
630 int a_waitfor;
631 struct thread *a_td;
632 } */ *ap;
633 {
634 struct vnode *vp = ap->a_vp;
635 struct buf *bp;
636 struct bufobj *bo;
637 struct buf *nbp;
638 int error = 0;
639 int maxretry = 1000; /* large, arbitrarily chosen */
640
641 bo = &vp->v_bufobj;
642 BO_LOCK(bo);
643 loop1:
644 /*
645 * MARK/SCAN initialization to avoid infinite loops.
646 */
647 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
648 bp->b_vflags &= ~BV_SCANNED;
649 bp->b_error = 0;
650 }
651
652 /*
653 * Flush all dirty buffers associated with a vnode.
654 */
655 loop2:
656 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
657 if ((bp->b_vflags & BV_SCANNED) != 0)
658 continue;
659 bp->b_vflags |= BV_SCANNED;
660 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
661 if (ap->a_waitfor != MNT_WAIT)
662 continue;
663 if (BUF_LOCK(bp,
664 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
665 BO_MTX(bo)) != 0) {
666 BO_LOCK(bo);
667 goto loop1;
668 }
669 BO_LOCK(bo);
670 }
671 BO_UNLOCK(bo);
672 KASSERT(bp->b_bufobj == bo,
673 ("bp %p wrong b_bufobj %p should be %p",
674 bp, bp->b_bufobj, bo));
675 if ((bp->b_flags & B_DELWRI) == 0)
676 panic("fsync: not dirty");
677 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
678 vfs_bio_awrite(bp);
679 } else {
680 bremfree(bp);
681 bawrite(bp);
682 }
683 BO_LOCK(bo);
684 goto loop2;
685 }
686
687 /*
688 * If synchronous the caller expects us to completely resolve all
689 * dirty buffers in the system. Wait for in-progress I/O to
690 * complete (which could include background bitmap writes), then
691 * retry if dirty blocks still exist.
692 */
693 if (ap->a_waitfor == MNT_WAIT) {
694 bufobj_wwait(bo, 0, 0);
695 if (bo->bo_dirty.bv_cnt > 0) {
696 /*
697 * If we are unable to write any of these buffers
698 * then we fail now rather than trying endlessly
699 * to write them out.
700 */
701 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
702 if ((error = bp->b_error) == 0)
703 continue;
704 if (error == 0 && --maxretry >= 0)
705 goto loop1;
706 error = EAGAIN;
707 }
708 }
709 BO_UNLOCK(bo);
710 if (error == EAGAIN)
711 vprint("fsync: giving up on dirty", vp);
712
713 return (error);
714 }
715
716 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
717 int
718 vop_stdgetpages(ap)
719 struct vop_getpages_args /* {
720 struct vnode *a_vp;
721 vm_page_t *a_m;
722 int a_count;
723 int a_reqpage;
724 vm_ooffset_t a_offset;
725 } */ *ap;
726 {
727
728 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
729 ap->a_count, ap->a_reqpage);
730 }
731
732 int
733 vop_stdkqfilter(struct vop_kqfilter_args *ap)
734 {
735 return vfs_kqfilter(ap);
736 }
737
738 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
739 int
740 vop_stdputpages(ap)
741 struct vop_putpages_args /* {
742 struct vnode *a_vp;
743 vm_page_t *a_m;
744 int a_count;
745 int a_sync;
746 int *a_rtvals;
747 vm_ooffset_t a_offset;
748 } */ *ap;
749 {
750
751 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
752 ap->a_sync, ap->a_rtvals);
753 }
754
755 int
756 vop_stdvptofh(struct vop_vptofh_args *ap)
757 {
758 return (EOPNOTSUPP);
759 }
760
761 int
762 vop_stdvptocnp(struct vop_vptocnp_args *ap)
763 {
764 struct vnode *vp = ap->a_vp;
765 struct vnode **dvp = ap->a_vpp;
766 struct ucred *cred = ap->a_cred;
767 char *buf = ap->a_buf;
768 int *buflen = ap->a_buflen;
769 char *dirbuf, *cpos;
770 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
771 off_t off;
772 ino_t fileno;
773 struct vattr va;
774 struct nameidata nd;
775 struct thread *td;
776 struct dirent *dp;
777 struct vnode *mvp;
778
779 i = *buflen;
780 error = 0;
781 covered = 0;
782 td = curthread;
783
784 if (vp->v_type != VDIR)
785 return (ENOENT);
786
787 error = VOP_GETATTR(vp, &va, cred);
788 if (error)
789 return (error);
790
791 VREF(vp);
792 locked = VOP_ISLOCKED(vp);
793 VOP_UNLOCK(vp, 0);
794 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
795 "..", vp, td);
796 flags = FREAD;
797 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
798 if (error) {
799 vn_lock(vp, locked | LK_RETRY);
800 return (error);
801 }
802 NDFREE(&nd, NDF_ONLY_PNBUF);
803
804 mvp = *dvp = nd.ni_vp;
805
806 if (vp->v_mount != (*dvp)->v_mount &&
807 ((*dvp)->v_vflag & VV_ROOT) &&
808 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
809 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
810 VREF(mvp);
811 VOP_UNLOCK(mvp, 0);
812 vn_close(mvp, FREAD, cred, td);
813 VREF(*dvp);
814 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
815 covered = 1;
816 }
817
818 fileno = va.va_fileid;
819
820 dirbuflen = DEV_BSIZE;
821 if (dirbuflen < va.va_blocksize)
822 dirbuflen = va.va_blocksize;
823 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
824
825 if ((*dvp)->v_type != VDIR) {
826 error = ENOENT;
827 goto out;
828 }
829
830 off = 0;
831 len = 0;
832 do {
833 /* call VOP_READDIR of parent */
834 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
835 &cpos, &len, &eofflag, td);
836 if (error)
837 goto out;
838
839 if ((dp->d_type != DT_WHT) &&
840 (dp->d_fileno == fileno)) {
841 if (covered) {
842 VOP_UNLOCK(*dvp, 0);
843 vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
844 if (dirent_exists(mvp, dp->d_name, td)) {
845 error = ENOENT;
846 VOP_UNLOCK(mvp, 0);
847 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
848 goto out;
849 }
850 VOP_UNLOCK(mvp, 0);
851 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
852 }
853 i -= dp->d_namlen;
854
855 if (i < 0) {
856 error = ENOMEM;
857 goto out;
858 }
859 if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
860 error = ENOENT;
861 } else {
862 bcopy(dp->d_name, buf + i, dp->d_namlen);
863 error = 0;
864 }
865 goto out;
866 }
867 } while (len > 0 || !eofflag);
868 error = ENOENT;
869
870 out:
871 free(dirbuf, M_TEMP);
872 if (!error) {
873 *buflen = i;
874 vref(*dvp);
875 }
876 if (covered) {
877 vput(*dvp);
878 vrele(mvp);
879 } else {
880 VOP_UNLOCK(mvp, 0);
881 vn_close(mvp, FREAD, cred, td);
882 }
883 vn_lock(vp, locked | LK_RETRY);
884 return (error);
885 }
886
887 int
888 vop_stdallocate(struct vop_allocate_args *ap)
889 {
890 #ifdef __notyet__
891 struct statfs sfs;
892 #endif
893 struct iovec aiov;
894 struct vattr vattr, *vap;
895 struct uio auio;
896 off_t fsize, len, cur, offset;
897 uint8_t *buf;
898 struct thread *td;
899 struct vnode *vp;
900 size_t iosize;
901 int error;
902
903 buf = NULL;
904 error = 0;
905 td = curthread;
906 vap = &vattr;
907 vp = ap->a_vp;
908 len = *ap->a_len;
909 offset = *ap->a_offset;
910
911 error = VOP_GETATTR(vp, vap, td->td_ucred);
912 if (error != 0)
913 goto out;
914 fsize = vap->va_size;
915 iosize = vap->va_blocksize;
916 if (iosize == 0)
917 iosize = BLKDEV_IOSIZE;
918 if (iosize > MAXPHYS)
919 iosize = MAXPHYS;
920 buf = malloc(iosize, M_TEMP, M_WAITOK);
921
922 #ifdef __notyet__
923 /*
924 * Check if the filesystem sets f_maxfilesize; if not use
925 * VOP_SETATTR to perform the check.
926 */
927 error = VFS_STATFS(vp->v_mount, &sfs, td);
928 if (error != 0)
929 goto out;
930 if (sfs.f_maxfilesize) {
931 if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize ||
932 offset + len > sfs.f_maxfilesize) {
933 error = EFBIG;
934 goto out;
935 }
936 } else
937 #endif
938 if (offset + len > vap->va_size) {
939 /*
940 * Test offset + len against the filesystem's maxfilesize.
941 */
942 VATTR_NULL(vap);
943 vap->va_size = offset + len;
944 error = VOP_SETATTR(vp, vap, td->td_ucred);
945 if (error != 0)
946 goto out;
947 VATTR_NULL(vap);
948 vap->va_size = fsize;
949 error = VOP_SETATTR(vp, vap, td->td_ucred);
950 if (error != 0)
951 goto out;
952 }
953
954 for (;;) {
955 /*
956 * Read and write back anything below the nominal file
957 * size. There's currently no way outside the filesystem
958 * to know whether this area is sparse or not.
959 */
960 cur = iosize;
961 if ((offset % iosize) != 0)
962 cur -= (offset % iosize);
963 if (cur > len)
964 cur = len;
965 if (offset < fsize) {
966 aiov.iov_base = buf;
967 aiov.iov_len = cur;
968 auio.uio_iov = &aiov;
969 auio.uio_iovcnt = 1;
970 auio.uio_offset = offset;
971 auio.uio_resid = cur;
972 auio.uio_segflg = UIO_SYSSPACE;
973 auio.uio_rw = UIO_READ;
974 auio.uio_td = td;
975 error = VOP_READ(vp, &auio, 0, td->td_ucred);
976 if (error != 0)
977 break;
978 if (auio.uio_resid > 0) {
979 bzero(buf + cur - auio.uio_resid,
980 auio.uio_resid);
981 }
982 } else {
983 bzero(buf, cur);
984 }
985
986 aiov.iov_base = buf;
987 aiov.iov_len = cur;
988 auio.uio_iov = &aiov;
989 auio.uio_iovcnt = 1;
990 auio.uio_offset = offset;
991 auio.uio_resid = cur;
992 auio.uio_segflg = UIO_SYSSPACE;
993 auio.uio_rw = UIO_WRITE;
994 auio.uio_td = td;
995
996 error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
997 if (error != 0)
998 break;
999
1000 len -= cur;
1001 offset += cur;
1002 if (len == 0)
1003 break;
1004 if (should_yield())
1005 break;
1006 }
1007
1008 out:
1009 *ap->a_len = len;
1010 *ap->a_offset = offset;
1011 free(buf, M_TEMP);
1012 return (error);
1013 }
1014
1015 int
1016 vop_stdadvise(struct vop_advise_args *ap)
1017 {
1018 struct vnode *vp;
1019 off_t start, end;
1020 int error, vfslocked;
1021
1022 vp = ap->a_vp;
1023 switch (ap->a_advice) {
1024 case POSIX_FADV_WILLNEED:
1025 /*
1026 * Do nothing for now. Filesystems should provide a
1027 * custom method which starts an asynchronous read of
1028 * the requested region.
1029 */
1030 error = 0;
1031 break;
1032 case POSIX_FADV_DONTNEED:
1033 /*
1034 * Flush any open FS buffers and then remove pages
1035 * from the backing VM object. Using vinvalbuf() here
1036 * is a bit heavy-handed as it flushes all buffers for
1037 * the given vnode, not just the buffers covering the
1038 * requested range.
1039 */
1040 error = 0;
1041 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1042 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1043 if (vp->v_iflag & VI_DOOMED) {
1044 VOP_UNLOCK(vp, 0);
1045 VFS_UNLOCK_GIANT(vfslocked);
1046 break;
1047 }
1048 vinvalbuf(vp, V_CLEANONLY, 0, 0);
1049 if (vp->v_object != NULL) {
1050 start = trunc_page(ap->a_start);
1051 end = round_page(ap->a_end);
1052 VM_OBJECT_LOCK(vp->v_object);
1053 vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
1054 OFF_TO_IDX(end));
1055 VM_OBJECT_UNLOCK(vp->v_object);
1056 }
1057 VOP_UNLOCK(vp, 0);
1058 VFS_UNLOCK_GIANT(vfslocked);
1059 break;
1060 default:
1061 error = EINVAL;
1062 break;
1063 }
1064 return (error);
1065 }
1066
1067 int
1068 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1069 {
1070
1071 ap->a_vp->v_socket = ap->a_socket;
1072 return (0);
1073 }
1074
1075 int
1076 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1077 {
1078
1079 *ap->a_socket = ap->a_vp->v_socket;
1080 return (0);
1081 }
1082
1083 int
1084 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1085 {
1086
1087 ap->a_vp->v_socket = NULL;
1088 return (0);
1089 }
1090
1091 static int
1092 vop_stdis_text(struct vop_is_text_args *ap)
1093 {
1094
1095 return ((ap->a_vp->v_vflag & VV_TEXT) != 0);
1096 }
1097
1098 static int
1099 vop_stdset_text(struct vop_set_text_args *ap)
1100 {
1101
1102 ap->a_vp->v_vflag |= VV_TEXT;
1103 return (0);
1104 }
1105
1106 static int
1107 vop_stdunset_text(struct vop_unset_text_args *ap)
1108 {
1109
1110 ap->a_vp->v_vflag &= ~VV_TEXT;
1111 return (0);
1112 }
1113
1114 static int
1115 vop_stdget_writecount(struct vop_get_writecount_args *ap)
1116 {
1117
1118 *ap->a_writecount = ap->a_vp->v_writecount;
1119 return (0);
1120 }
1121
1122 static int
1123 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1124 {
1125
1126 ap->a_vp->v_writecount += ap->a_inc;
1127 return (0);
1128 }
1129
1130 /*
1131 * vfs default ops
1132 * used to fill the vfs function table to get reasonable default return values.
1133 */
1134 int
1135 vfs_stdroot (mp, flags, vpp)
1136 struct mount *mp;
1137 int flags;
1138 struct vnode **vpp;
1139 {
1140
1141 return (EOPNOTSUPP);
1142 }
1143
1144 int
1145 vfs_stdstatfs (mp, sbp)
1146 struct mount *mp;
1147 struct statfs *sbp;
1148 {
1149
1150 return (EOPNOTSUPP);
1151 }
1152
1153 int
1154 vfs_stdquotactl (mp, cmds, uid, arg)
1155 struct mount *mp;
1156 int cmds;
1157 uid_t uid;
1158 void *arg;
1159 {
1160
1161 return (EOPNOTSUPP);
1162 }
1163
1164 int
1165 vfs_stdsync(mp, waitfor)
1166 struct mount *mp;
1167 int waitfor;
1168 {
1169 struct vnode *vp, *mvp;
1170 struct thread *td;
1171 int error, lockreq, allerror = 0;
1172
1173 td = curthread;
1174 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1175 if (waitfor != MNT_WAIT)
1176 lockreq |= LK_NOWAIT;
1177 /*
1178 * Force stale buffer cache information to be flushed.
1179 */
1180 loop:
1181 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1182 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1183 VI_UNLOCK(vp);
1184 continue;
1185 }
1186 if ((error = vget(vp, lockreq, td)) != 0) {
1187 if (error == ENOENT) {
1188 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1189 goto loop;
1190 }
1191 continue;
1192 }
1193 error = VOP_FSYNC(vp, waitfor, td);
1194 if (error)
1195 allerror = error;
1196 vput(vp);
1197 }
1198 return (allerror);
1199 }
1200
1201 int
1202 vfs_stdnosync (mp, waitfor)
1203 struct mount *mp;
1204 int waitfor;
1205 {
1206
1207 return (0);
1208 }
1209
1210 int
1211 vfs_stdvget (mp, ino, flags, vpp)
1212 struct mount *mp;
1213 ino_t ino;
1214 int flags;
1215 struct vnode **vpp;
1216 {
1217
1218 return (EOPNOTSUPP);
1219 }
1220
1221 int
1222 vfs_stdfhtovp (mp, fhp, flags, vpp)
1223 struct mount *mp;
1224 struct fid *fhp;
1225 int flags;
1226 struct vnode **vpp;
1227 {
1228
1229 return (EOPNOTSUPP);
1230 }
1231
1232 int
1233 vfs_stdinit (vfsp)
1234 struct vfsconf *vfsp;
1235 {
1236
1237 return (0);
1238 }
1239
1240 int
1241 vfs_stduninit (vfsp)
1242 struct vfsconf *vfsp;
1243 {
1244
1245 return(0);
1246 }
1247
1248 int
1249 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1250 struct mount *mp;
1251 int cmd;
1252 struct vnode *filename_vp;
1253 int attrnamespace;
1254 const char *attrname;
1255 {
1256
1257 if (filename_vp != NULL)
1258 VOP_UNLOCK(filename_vp, 0);
1259 return (EOPNOTSUPP);
1260 }
1261
1262 int
1263 vfs_stdsysctl(mp, op, req)
1264 struct mount *mp;
1265 fsctlop_t op;
1266 struct sysctl_req *req;
1267 {
1268
1269 return (EOPNOTSUPP);
1270 }
1271
1272 /* end of vfs default ops */
Cache object: 913774c9f8ef95bb4d8d24886750e51b
|