1 /*-
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * John Heidemann of the UCLA Ficus project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
33 *
34 * Ancestors:
35 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
36 * ...and...
37 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
38 *
39 * $FreeBSD: releng/9.0/sys/fs/nullfs/null_vnops.c 218965 2011-02-23 09:22:33Z brucec $
40 */
41
42 /*
43 * Null Layer
44 *
45 * (See mount_nullfs(8) for more information.)
46 *
47 * The null layer duplicates a portion of the filesystem
48 * name space under a new name. In this respect, it is
49 * similar to the loopback filesystem. It differs from
50 * the loopback fs in two respects: it is implemented using
51 * a stackable layers techniques, and its "null-node"s stack above
52 * all lower-layer vnodes, not just over directory vnodes.
53 *
54 * The null layer has two purposes. First, it serves as a demonstration
55 * of layering by proving a layer which does nothing. (It actually
56 * does everything the loopback filesystem does, which is slightly
57 * more than nothing.) Second, the null layer can serve as a prototype
58 * layer. Since it provides all necessary layer framework,
59 * new filesystem layers can be created very easily be starting
60 * with a null layer.
61 *
62 * The remainder of this man page examines the null layer as a basis
63 * for constructing new layers.
64 *
65 *
66 * INSTANTIATING NEW NULL LAYERS
67 *
68 * New null layers are created with mount_nullfs(8).
69 * Mount_nullfs(8) takes two arguments, the pathname
70 * of the lower vfs (target-pn) and the pathname where the null
71 * layer will appear in the namespace (alias-pn). After
72 * the null layer is put into place, the contents
73 * of target-pn subtree will be aliased under alias-pn.
74 *
75 *
76 * OPERATION OF A NULL LAYER
77 *
78 * The null layer is the minimum filesystem layer,
79 * simply bypassing all possible operations to the lower layer
80 * for processing there. The majority of its activity centers
81 * on the bypass routine, through which nearly all vnode operations
82 * pass.
83 *
84 * The bypass routine accepts arbitrary vnode operations for
85 * handling by the lower layer. It begins by examing vnode
86 * operation arguments and replacing any null-nodes by their
87 * lower-layer equivlants. It then invokes the operation
88 * on the lower layer. Finally, it replaces the null-nodes
89 * in the arguments and, if a vnode is return by the operation,
90 * stacks a null-node on top of the returned vnode.
91 *
92 * Although bypass handles most operations, vop_getattr, vop_lock,
93 * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not
94 * bypassed. Vop_getattr must change the fsid being returned.
95 * Vop_lock and vop_unlock must handle any locking for the
96 * current vnode as well as pass the lock request down.
97 * Vop_inactive and vop_reclaim are not bypassed so that
98 * they can handle freeing null-layer specific data. Vop_print
99 * is not bypassed to avoid excessive debugging information.
100 * Also, certain vnode operations change the locking state within
101 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
102 * and symlink). Ideally these operations should not change the
103 * lock state, but should be changed to let the caller of the
104 * function unlock them. Otherwise all intermediate vnode layers
105 * (such as union, umapfs, etc) must catch these functions to do
106 * the necessary locking at their layer.
107 *
108 *
109 * INSTANTIATING VNODE STACKS
110 *
111 * Mounting associates the null layer with a lower layer,
112 * effect stacking two VFSes. Vnode stacks are instead
113 * created on demand as files are accessed.
114 *
115 * The initial mount creates a single vnode stack for the
116 * root of the new null layer. All other vnode stacks
117 * are created as a result of vnode operations on
118 * this or other null vnode stacks.
119 *
120 * New vnode stacks come into existance as a result of
121 * an operation which returns a vnode.
122 * The bypass routine stacks a null-node above the new
123 * vnode before returning it to the caller.
124 *
125 * For example, imagine mounting a null layer with
126 * "mount_nullfs /usr/include /dev/layer/null".
127 * Changing directory to /dev/layer/null will assign
128 * the root null-node (which was created when the null layer was mounted).
129 * Now consider opening "sys". A vop_lookup would be
130 * done on the root null-node. This operation would bypass through
131 * to the lower layer which would return a vnode representing
132 * the UFS "sys". Null_bypass then builds a null-node
133 * aliasing the UFS "sys" and returns this to the caller.
134 * Later operations on the null-node "sys" will repeat this
135 * process when constructing other vnode stacks.
136 *
137 *
138 * CREATING OTHER FILE SYSTEM LAYERS
139 *
140 * One of the easiest ways to construct new filesystem layers is to make
141 * a copy of the null layer, rename all files and variables, and
142 * then begin modifing the copy. Sed can be used to easily rename
143 * all variables.
144 *
145 * The umap layer is an example of a layer descended from the
146 * null layer.
147 *
148 *
149 * INVOKING OPERATIONS ON LOWER LAYERS
150 *
151 * There are two techniques to invoke operations on a lower layer
152 * when the operation cannot be completely bypassed. Each method
153 * is appropriate in different situations. In both cases,
154 * it is the responsibility of the aliasing layer to make
155 * the operation arguments "correct" for the lower layer
156 * by mapping a vnode arguments to the lower layer.
157 *
158 * The first approach is to call the aliasing layer's bypass routine.
159 * This method is most suitable when you wish to invoke the operation
160 * currently being handled on the lower layer. It has the advantage
161 * that the bypass routine already must do argument mapping.
162 * An example of this is null_getattrs in the null layer.
163 *
164 * A second approach is to directly invoke vnode operations on
165 * the lower layer with the VOP_OPERATIONNAME interface.
166 * The advantage of this method is that it is easy to invoke
167 * arbitrary operations on the lower layer. The disadvantage
168 * is that vnode arguments must be manualy mapped.
169 *
170 */
171
172 #include <sys/param.h>
173 #include <sys/systm.h>
174 #include <sys/conf.h>
175 #include <sys/kernel.h>
176 #include <sys/lock.h>
177 #include <sys/malloc.h>
178 #include <sys/mount.h>
179 #include <sys/mutex.h>
180 #include <sys/namei.h>
181 #include <sys/sysctl.h>
182 #include <sys/vnode.h>
183
184 #include <fs/nullfs/null.h>
185
186 #include <vm/vm.h>
187 #include <vm/vm_extern.h>
188 #include <vm/vm_object.h>
189 #include <vm/vnode_pager.h>
190
191 static int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */
192 SYSCTL_INT(_debug, OID_AUTO, nullfs_bug_bypass, CTLFLAG_RW,
193 &null_bug_bypass, 0, "");
194
195 /*
196 * This is the 10-Apr-92 bypass routine.
197 * This version has been optimized for speed, throwing away some
198 * safety checks. It should still always work, but it's not as
199 * robust to programmer errors.
200 *
201 * In general, we map all vnodes going down and unmap them on the way back.
202 * As an exception to this, vnodes can be marked "unmapped" by setting
203 * the Nth bit in operation's vdesc_flags.
204 *
205 * Also, some BSD vnode operations have the side effect of vrele'ing
206 * their arguments. With stacking, the reference counts are held
207 * by the upper node, not the lower one, so we must handle these
208 * side-effects here. This is not of concern in Sun-derived systems
209 * since there are no such side-effects.
210 *
211 * This makes the following assumptions:
212 * - only one returned vpp
213 * - no INOUT vpp's (Sun's vop_open has one of these)
214 * - the vnode operation vector of the first vnode should be used
215 * to determine what implementation of the op should be invoked
216 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
217 * problems on rmdir'ing mount points and renaming?)
218 */
219 int
220 null_bypass(struct vop_generic_args *ap)
221 {
222 struct vnode **this_vp_p;
223 int error;
224 struct vnode *old_vps[VDESC_MAX_VPS];
225 struct vnode **vps_p[VDESC_MAX_VPS];
226 struct vnode ***vppp;
227 struct vnodeop_desc *descp = ap->a_desc;
228 int reles, i;
229
230 if (null_bug_bypass)
231 printf ("null_bypass: %s\n", descp->vdesc_name);
232
233 #ifdef DIAGNOSTIC
234 /*
235 * We require at least one vp.
236 */
237 if (descp->vdesc_vp_offsets == NULL ||
238 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
239 panic ("null_bypass: no vp's in map");
240 #endif
241
242 /*
243 * Map the vnodes going in.
244 * Later, we'll invoke the operation based on
245 * the first mapped vnode's operation vector.
246 */
247 reles = descp->vdesc_flags;
248 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
249 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
250 break; /* bail out at end of list */
251 vps_p[i] = this_vp_p =
252 VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
253 /*
254 * We're not guaranteed that any but the first vnode
255 * are of our type. Check for and don't map any
256 * that aren't. (We must always map first vp or vclean fails.)
257 */
258 if (i && (*this_vp_p == NULLVP ||
259 (*this_vp_p)->v_op != &null_vnodeops)) {
260 old_vps[i] = NULLVP;
261 } else {
262 old_vps[i] = *this_vp_p;
263 *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
264 /*
265 * XXX - Several operations have the side effect
266 * of vrele'ing their vp's. We must account for
267 * that. (This should go away in the future.)
268 */
269 if (reles & VDESC_VP0_WILLRELE)
270 VREF(*this_vp_p);
271 }
272
273 }
274
275 /*
276 * Call the operation on the lower layer
277 * with the modified argument structure.
278 */
279 if (vps_p[0] && *vps_p[0])
280 error = VCALL(ap);
281 else {
282 printf("null_bypass: no map for %s\n", descp->vdesc_name);
283 error = EINVAL;
284 }
285
286 /*
287 * Maintain the illusion of call-by-value
288 * by restoring vnodes in the argument structure
289 * to their original value.
290 */
291 reles = descp->vdesc_flags;
292 for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
293 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
294 break; /* bail out at end of list */
295 if (old_vps[i]) {
296 *(vps_p[i]) = old_vps[i];
297 #if 0
298 if (reles & VDESC_VP0_WILLUNLOCK)
299 VOP_UNLOCK(*(vps_p[i]), 0);
300 #endif
301 if (reles & VDESC_VP0_WILLRELE)
302 vrele(*(vps_p[i]));
303 }
304 }
305
306 /*
307 * Map the possible out-going vpp
308 * (Assumes that the lower layer always returns
309 * a VREF'ed vpp unless it gets an error.)
310 */
311 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
312 !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
313 !error) {
314 /*
315 * XXX - even though some ops have vpp returned vp's,
316 * several ops actually vrele this before returning.
317 * We must avoid these ops.
318 * (This should go away when these ops are regularized.)
319 */
320 if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
321 goto out;
322 vppp = VOPARG_OFFSETTO(struct vnode***,
323 descp->vdesc_vpp_offset,ap);
324 if (*vppp)
325 error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
326 }
327
328 out:
329 return (error);
330 }
331
332 /*
333 * We have to carry on the locking protocol on the null layer vnodes
334 * as we progress through the tree. We also have to enforce read-only
335 * if this layer is mounted read-only.
336 */
337 static int
338 null_lookup(struct vop_lookup_args *ap)
339 {
340 struct componentname *cnp = ap->a_cnp;
341 struct vnode *dvp = ap->a_dvp;
342 int flags = cnp->cn_flags;
343 struct vnode *vp, *ldvp, *lvp;
344 int error;
345
346 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
347 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
348 return (EROFS);
349 /*
350 * Although it is possible to call null_bypass(), we'll do
351 * a direct call to reduce overhead
352 */
353 ldvp = NULLVPTOLOWERVP(dvp);
354 vp = lvp = NULL;
355 error = VOP_LOOKUP(ldvp, &lvp, cnp);
356 if (error == EJUSTRETURN && (flags & ISLASTCN) &&
357 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
358 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
359 error = EROFS;
360
361 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
362 if (ldvp == lvp) {
363 *ap->a_vpp = dvp;
364 VREF(dvp);
365 vrele(lvp);
366 } else {
367 error = null_nodeget(dvp->v_mount, lvp, &vp);
368 if (error)
369 vput(lvp);
370 else
371 *ap->a_vpp = vp;
372 }
373 }
374 return (error);
375 }
376
377 static int
378 null_open(struct vop_open_args *ap)
379 {
380 int retval;
381 struct vnode *vp, *ldvp;
382
383 vp = ap->a_vp;
384 ldvp = NULLVPTOLOWERVP(vp);
385 retval = null_bypass(&ap->a_gen);
386 if (retval == 0)
387 vp->v_object = ldvp->v_object;
388 return (retval);
389 }
390
391 /*
392 * Setattr call. Disallow write attempts if the layer is mounted read-only.
393 */
394 static int
395 null_setattr(struct vop_setattr_args *ap)
396 {
397 struct vnode *vp = ap->a_vp;
398 struct vattr *vap = ap->a_vap;
399
400 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
401 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
402 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
403 (vp->v_mount->mnt_flag & MNT_RDONLY))
404 return (EROFS);
405 if (vap->va_size != VNOVAL) {
406 switch (vp->v_type) {
407 case VDIR:
408 return (EISDIR);
409 case VCHR:
410 case VBLK:
411 case VSOCK:
412 case VFIFO:
413 if (vap->va_flags != VNOVAL)
414 return (EOPNOTSUPP);
415 return (0);
416 case VREG:
417 case VLNK:
418 default:
419 /*
420 * Disallow write attempts if the filesystem is
421 * mounted read-only.
422 */
423 if (vp->v_mount->mnt_flag & MNT_RDONLY)
424 return (EROFS);
425 }
426 }
427
428 return (null_bypass((struct vop_generic_args *)ap));
429 }
430
431 /*
432 * We handle getattr only to change the fsid.
433 */
434 static int
435 null_getattr(struct vop_getattr_args *ap)
436 {
437 int error;
438
439 if ((error = null_bypass((struct vop_generic_args *)ap)) != 0)
440 return (error);
441
442 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
443 return (0);
444 }
445
446 /*
447 * Handle to disallow write access if mounted read-only.
448 */
449 static int
450 null_access(struct vop_access_args *ap)
451 {
452 struct vnode *vp = ap->a_vp;
453 accmode_t accmode = ap->a_accmode;
454
455 /*
456 * Disallow write attempts on read-only layers;
457 * unless the file is a socket, fifo, or a block or
458 * character device resident on the filesystem.
459 */
460 if (accmode & VWRITE) {
461 switch (vp->v_type) {
462 case VDIR:
463 case VLNK:
464 case VREG:
465 if (vp->v_mount->mnt_flag & MNT_RDONLY)
466 return (EROFS);
467 break;
468 default:
469 break;
470 }
471 }
472 return (null_bypass((struct vop_generic_args *)ap));
473 }
474
475 static int
476 null_accessx(struct vop_accessx_args *ap)
477 {
478 struct vnode *vp = ap->a_vp;
479 accmode_t accmode = ap->a_accmode;
480
481 /*
482 * Disallow write attempts on read-only layers;
483 * unless the file is a socket, fifo, or a block or
484 * character device resident on the filesystem.
485 */
486 if (accmode & VWRITE) {
487 switch (vp->v_type) {
488 case VDIR:
489 case VLNK:
490 case VREG:
491 if (vp->v_mount->mnt_flag & MNT_RDONLY)
492 return (EROFS);
493 break;
494 default:
495 break;
496 }
497 }
498 return (null_bypass((struct vop_generic_args *)ap));
499 }
500
501 /*
502 * Increasing refcount of lower vnode is needed at least for the case
503 * when lower FS is NFS to do sillyrename if the file is in use.
504 * Unfortunately v_usecount is incremented in many places in
505 * the kernel and, as such, there may be races that result in
506 * the NFS client doing an extraneous silly rename, but that seems
507 * preferable to not doing a silly rename when it is needed.
508 */
509 static int
510 null_remove(struct vop_remove_args *ap)
511 {
512 int retval, vreleit;
513 struct vnode *lvp;
514
515 if (vrefcnt(ap->a_vp) > 1) {
516 lvp = NULLVPTOLOWERVP(ap->a_vp);
517 VREF(lvp);
518 vreleit = 1;
519 } else
520 vreleit = 0;
521 retval = null_bypass(&ap->a_gen);
522 if (vreleit != 0)
523 vrele(lvp);
524 return (retval);
525 }
526
527 /*
528 * We handle this to eliminate null FS to lower FS
529 * file moving. Don't know why we don't allow this,
530 * possibly we should.
531 */
532 static int
533 null_rename(struct vop_rename_args *ap)
534 {
535 struct vnode *tdvp = ap->a_tdvp;
536 struct vnode *fvp = ap->a_fvp;
537 struct vnode *fdvp = ap->a_fdvp;
538 struct vnode *tvp = ap->a_tvp;
539
540 /* Check for cross-device rename. */
541 if ((fvp->v_mount != tdvp->v_mount) ||
542 (tvp && (fvp->v_mount != tvp->v_mount))) {
543 if (tdvp == tvp)
544 vrele(tdvp);
545 else
546 vput(tdvp);
547 if (tvp)
548 vput(tvp);
549 vrele(fdvp);
550 vrele(fvp);
551 return (EXDEV);
552 }
553
554 return (null_bypass((struct vop_generic_args *)ap));
555 }
556
557 /*
558 * We need to process our own vnode lock and then clear the
559 * interlock flag as it applies only to our vnode, not the
560 * vnodes below us on the stack.
561 */
562 static int
563 null_lock(struct vop_lock1_args *ap)
564 {
565 struct vnode *vp = ap->a_vp;
566 int flags = ap->a_flags;
567 struct null_node *nn;
568 struct vnode *lvp;
569 int error;
570
571
572 if ((flags & LK_INTERLOCK) == 0) {
573 VI_LOCK(vp);
574 ap->a_flags = flags |= LK_INTERLOCK;
575 }
576 nn = VTONULL(vp);
577 /*
578 * If we're still active we must ask the lower layer to
579 * lock as ffs has special lock considerations in it's
580 * vop lock.
581 */
582 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
583 VI_LOCK_FLAGS(lvp, MTX_DUPOK);
584 VI_UNLOCK(vp);
585 /*
586 * We have to hold the vnode here to solve a potential
587 * reclaim race. If we're forcibly vgone'd while we
588 * still have refs, a thread could be sleeping inside
589 * the lowervp's vop_lock routine. When we vgone we will
590 * drop our last ref to the lowervp, which would allow it
591 * to be reclaimed. The lowervp could then be recycled,
592 * in which case it is not legal to be sleeping in it's VOP.
593 * We prevent it from being recycled by holding the vnode
594 * here.
595 */
596 vholdl(lvp);
597 error = VOP_LOCK(lvp, flags);
598
599 /*
600 * We might have slept to get the lock and someone might have
601 * clean our vnode already, switching vnode lock from one in
602 * lowervp to v_lock in our own vnode structure. Handle this
603 * case by reacquiring correct lock in requested mode.
604 */
605 if (VTONULL(vp) == NULL && error == 0) {
606 ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK);
607 switch (flags & LK_TYPE_MASK) {
608 case LK_SHARED:
609 ap->a_flags |= LK_SHARED;
610 break;
611 case LK_UPGRADE:
612 case LK_EXCLUSIVE:
613 ap->a_flags |= LK_EXCLUSIVE;
614 break;
615 default:
616 panic("Unsupported lock request %d\n",
617 ap->a_flags);
618 }
619 VOP_UNLOCK(lvp, 0);
620 error = vop_stdlock(ap);
621 }
622 vdrop(lvp);
623 } else
624 error = vop_stdlock(ap);
625
626 return (error);
627 }
628
629 /*
630 * We need to process our own vnode unlock and then clear the
631 * interlock flag as it applies only to our vnode, not the
632 * vnodes below us on the stack.
633 */
634 static int
635 null_unlock(struct vop_unlock_args *ap)
636 {
637 struct vnode *vp = ap->a_vp;
638 int flags = ap->a_flags;
639 int mtxlkflag = 0;
640 struct null_node *nn;
641 struct vnode *lvp;
642 int error;
643
644 if ((flags & LK_INTERLOCK) != 0)
645 mtxlkflag = 1;
646 else if (mtx_owned(VI_MTX(vp)) == 0) {
647 VI_LOCK(vp);
648 mtxlkflag = 2;
649 }
650 nn = VTONULL(vp);
651 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
652 VI_LOCK_FLAGS(lvp, MTX_DUPOK);
653 flags |= LK_INTERLOCK;
654 vholdl(lvp);
655 VI_UNLOCK(vp);
656 error = VOP_UNLOCK(lvp, flags);
657 vdrop(lvp);
658 if (mtxlkflag == 0)
659 VI_LOCK(vp);
660 } else {
661 if (mtxlkflag == 2)
662 VI_UNLOCK(vp);
663 error = vop_stdunlock(ap);
664 }
665
666 return (error);
667 }
668
669 /*
670 * There is no way to tell that someone issued remove/rmdir operation
671 * on the underlying filesystem. For now we just have to release lowervp
672 * as soon as possible.
673 *
674 * Note, we can't release any resources nor remove vnode from hash before
675 * appropriate VXLOCK stuff is done because other process can find this
676 * vnode in hash during inactivation and may be sitting in vget() and waiting
677 * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
678 */
679 static int
680 null_inactive(struct vop_inactive_args *ap)
681 {
682 struct vnode *vp = ap->a_vp;
683 struct thread *td = ap->a_td;
684
685 vp->v_object = NULL;
686
687 /*
688 * If this is the last reference, then free up the vnode
689 * so as not to tie up the lower vnodes.
690 */
691 vrecycle(vp, td);
692
693 return (0);
694 }
695
696 /*
697 * Now, the VXLOCK is in force and we're free to destroy the null vnode.
698 */
699 static int
700 null_reclaim(struct vop_reclaim_args *ap)
701 {
702 struct vnode *vp = ap->a_vp;
703 struct null_node *xp = VTONULL(vp);
704 struct vnode *lowervp = xp->null_lowervp;
705
706 if (lowervp)
707 null_hashrem(xp);
708 /*
709 * Use the interlock to protect the clearing of v_data to
710 * prevent faults in null_lock().
711 */
712 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
713 VI_LOCK(vp);
714 vp->v_data = NULL;
715 vp->v_object = NULL;
716 vp->v_vnlock = &vp->v_lock;
717 VI_UNLOCK(vp);
718 if (lowervp)
719 vput(lowervp);
720 else
721 panic("null_reclaim: reclaiming a node with no lowervp");
722 free(xp, M_NULLFSNODE);
723
724 return (0);
725 }
726
727 static int
728 null_print(struct vop_print_args *ap)
729 {
730 struct vnode *vp = ap->a_vp;
731
732 printf("\tvp=%p, lowervp=%p\n", vp, NULLVPTOLOWERVP(vp));
733 return (0);
734 }
735
736 /* ARGSUSED */
737 static int
738 null_getwritemount(struct vop_getwritemount_args *ap)
739 {
740 struct null_node *xp;
741 struct vnode *lowervp;
742 struct vnode *vp;
743
744 vp = ap->a_vp;
745 VI_LOCK(vp);
746 xp = VTONULL(vp);
747 if (xp && (lowervp = xp->null_lowervp)) {
748 VI_LOCK_FLAGS(lowervp, MTX_DUPOK);
749 VI_UNLOCK(vp);
750 vholdl(lowervp);
751 VI_UNLOCK(lowervp);
752 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
753 vdrop(lowervp);
754 } else {
755 VI_UNLOCK(vp);
756 *(ap->a_mpp) = NULL;
757 }
758 return (0);
759 }
760
761 static int
762 null_vptofh(struct vop_vptofh_args *ap)
763 {
764 struct vnode *lvp;
765
766 lvp = NULLVPTOLOWERVP(ap->a_vp);
767 return VOP_VPTOFH(lvp, ap->a_fhp);
768 }
769
770 static int
771 null_vptocnp(struct vop_vptocnp_args *ap)
772 {
773 struct vnode *vp = ap->a_vp;
774 struct vnode **dvp = ap->a_vpp;
775 struct vnode *lvp, *ldvp;
776 struct ucred *cred = ap->a_cred;
777 int error, locked;
778
779 if (vp->v_type == VDIR)
780 return (vop_stdvptocnp(ap));
781
782 locked = VOP_ISLOCKED(vp);
783 lvp = NULLVPTOLOWERVP(vp);
784 vhold(lvp);
785 VOP_UNLOCK(vp, 0); /* vp is held by vn_vptocnp_locked that called us */
786 ldvp = lvp;
787 error = vn_vptocnp(&ldvp, cred, ap->a_buf, ap->a_buflen);
788 vdrop(lvp);
789 if (error != 0) {
790 vn_lock(vp, locked | LK_RETRY);
791 return (ENOENT);
792 }
793
794 /*
795 * Exclusive lock is required by insmntque1 call in
796 * null_nodeget()
797 */
798 error = vn_lock(ldvp, LK_EXCLUSIVE);
799 if (error != 0) {
800 vn_lock(vp, locked | LK_RETRY);
801 vdrop(ldvp);
802 return (ENOENT);
803 }
804 vref(ldvp);
805 vdrop(ldvp);
806 error = null_nodeget(vp->v_mount, ldvp, dvp);
807 if (error == 0) {
808 #ifdef DIAGNOSTIC
809 NULLVPTOLOWERVP(*dvp);
810 #endif
811 vhold(*dvp);
812 vput(*dvp);
813 } else
814 vput(ldvp);
815
816 vn_lock(vp, locked | LK_RETRY);
817 return (error);
818 }
819
820 /*
821 * Global vfs data structures
822 */
823 struct vop_vector null_vnodeops = {
824 .vop_bypass = null_bypass,
825 .vop_access = null_access,
826 .vop_accessx = null_accessx,
827 .vop_advlockpurge = vop_stdadvlockpurge,
828 .vop_bmap = VOP_EOPNOTSUPP,
829 .vop_getattr = null_getattr,
830 .vop_getwritemount = null_getwritemount,
831 .vop_inactive = null_inactive,
832 .vop_islocked = vop_stdislocked,
833 .vop_lock1 = null_lock,
834 .vop_lookup = null_lookup,
835 .vop_open = null_open,
836 .vop_print = null_print,
837 .vop_reclaim = null_reclaim,
838 .vop_remove = null_remove,
839 .vop_rename = null_rename,
840 .vop_setattr = null_setattr,
841 .vop_strategy = VOP_EOPNOTSUPP,
842 .vop_unlock = null_unlock,
843 .vop_vptocnp = null_vptocnp,
844 .vop_vptofh = null_vptofh,
845 };
Cache object: 5f5da30c628bec5c93ab7fe80a6e0059
|