1 /*-
2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3 * Copyright (c) 1992, 1993, 1994, 1995
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
34 * $FreeBSD$
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/fcntl.h>
40 #include <sys/stat.h>
41 #include <sys/kernel.h>
42 #include <sys/vnode.h>
43 #include <sys/mount.h>
44 #include <sys/namei.h>
45 #include <sys/malloc.h>
46 #include <sys/bio.h>
47 #include <sys/buf.h>
48 #include <sys/lock.h>
49 #include <sys/sysctl.h>
50 #include <sys/unistd.h>
51 #include <sys/acl.h>
52 #include <sys/event.h>
53 #include <sys/extattr.h>
54 #include <sys/mac.h>
55 #include <fs/unionfs/union.h>
56
57 #include <vm/vm.h>
58 #include <vm/vnode_pager.h>
59
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62
63 int uniondebug = 0;
64
65 #if UDEBUG_ENABLED
66 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
67 #else
68 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
69 #endif
70
71 static int union_access(struct vop_access_args *ap);
72 static int union_aclcheck(struct vop_aclcheck_args *ap);
73 static int union_advlock(struct vop_advlock_args *ap);
74 static int union_close(struct vop_close_args *ap);
75 static int union_closeextattr(struct vop_closeextattr_args *ap);
76 static int union_create(struct vop_create_args *ap);
77 static int union_createvobject(struct vop_createvobject_args *ap);
78 static int union_deleteextattr(struct vop_deleteextattr_args *ap);
79 static int union_destroyvobject(struct vop_destroyvobject_args *ap);
80 static int union_fsync(struct vop_fsync_args *ap);
81 static int union_getacl(struct vop_getacl_args *ap);
82 static int union_getattr(struct vop_getattr_args *ap);
83 static int union_getextattr(struct vop_getextattr_args *ap);
84 static int union_getvobject(struct vop_getvobject_args *ap);
85 static int union_inactive(struct vop_inactive_args *ap);
86 static int union_ioctl(struct vop_ioctl_args *ap);
87 static int union_lease(struct vop_lease_args *ap);
88 static int union_link(struct vop_link_args *ap);
89 static int union_listextattr(struct vop_listextattr_args *ap);
90 static int union_lookup(struct vop_lookup_args *ap);
91 static int union_lookup1(struct vnode *udvp, struct vnode **dvp,
92 struct vnode **vpp,
93 struct componentname *cnp);
94 static int union_mkdir(struct vop_mkdir_args *ap);
95 static int union_mknod(struct vop_mknod_args *ap);
96 static int union_open(struct vop_open_args *ap);
97 static int union_openextattr(struct vop_openextattr_args *ap);
98 static int union_pathconf(struct vop_pathconf_args *ap);
99 static int union_print(struct vop_print_args *ap);
100 static int union_read(struct vop_read_args *ap);
101 static int union_readdir(struct vop_readdir_args *ap);
102 static int union_readlink(struct vop_readlink_args *ap);
103 static int union_getwritemount(struct vop_getwritemount_args *ap);
104 static int union_reclaim(struct vop_reclaim_args *ap);
105 static int union_remove(struct vop_remove_args *ap);
106 static int union_rename(struct vop_rename_args *ap);
107 static int union_revoke(struct vop_revoke_args *ap);
108 static int union_rmdir(struct vop_rmdir_args *ap);
109 static int union_poll(struct vop_poll_args *ap);
110 static int union_setacl(struct vop_setacl_args *ap);
111 static int union_setattr(struct vop_setattr_args *ap);
112 static int union_setlabel(struct vop_setlabel_args *ap);
113 static int union_setextattr(struct vop_setextattr_args *ap);
114 static int union_strategy(struct vop_strategy_args *ap);
115 static int union_symlink(struct vop_symlink_args *ap);
116 static int union_whiteout(struct vop_whiteout_args *ap);
117 static int union_write(struct vop_read_args *ap);
118
119 static __inline
120 struct vnode *
121 union_lock_upper(struct union_node *un, struct thread *td)
122 {
123 struct vnode *uppervp;
124
125 if ((uppervp = un->un_uppervp) != NULL) {
126 VREF(uppervp);
127 vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
128 }
129 KASSERT((uppervp == NULL || vrefcnt(uppervp) > 0), ("uppervp usecount is 0"));
130 return(uppervp);
131 }
132
133 static __inline
134 void
135 union_unlock_upper(struct vnode *uppervp, struct thread *td)
136 {
137 vput(uppervp);
138 }
139
140 static __inline
141 struct vnode *
142 union_lock_other(struct union_node *un, struct thread *td)
143 {
144 struct vnode *vp;
145
146 if (un->un_uppervp != NULL) {
147 vp = union_lock_upper(un, td);
148 } else if ((vp = un->un_lowervp) != NULL) {
149 VREF(vp);
150 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
151 }
152 return(vp);
153 }
154
155 static __inline
156 void
157 union_unlock_other(struct vnode *vp, struct thread *td)
158 {
159 vput(vp);
160 }
161
162 /*
163 * union_lookup:
164 *
165 * udvp must be exclusively locked on call and will remain
166 * exclusively locked on return. This is the mount point
167 * for our filesystem.
168 *
169 * dvp Our base directory, locked and referenced.
170 * The passed dvp will be dereferenced and unlocked on return
171 * and a new dvp will be returned which is locked and
172 * referenced in the same variable.
173 *
174 * vpp is filled in with the result if no error occured,
175 * locked and ref'd.
176 *
177 * If an error is returned, *vpp is set to NULLVP. If no
178 * error occurs, *vpp is returned with a reference and an
179 * exclusive lock.
180 */
181
182 static int
183 union_lookup1(udvp, pdvp, vpp, cnp)
184 struct vnode *udvp;
185 struct vnode **pdvp;
186 struct vnode **vpp;
187 struct componentname *cnp;
188 {
189 int error;
190 struct thread *td = cnp->cn_thread;
191 struct vnode *dvp = *pdvp;
192 struct vnode *tdvp;
193 struct mount *mp;
194
195 /*
196 * If stepping up the directory tree, check for going
197 * back across the mount point, in which case do what
198 * lookup would do by stepping back down the mount
199 * hierarchy.
200 */
201 if (cnp->cn_flags & ISDOTDOT) {
202 while ((dvp != udvp) && (dvp->v_vflag & VV_ROOT)) {
203 /*
204 * Don't do the NOCROSSMOUNT check
205 * at this level. By definition,
206 * union fs deals with namespaces, not
207 * filesystems.
208 */
209 tdvp = dvp;
210 dvp = dvp->v_mount->mnt_vnodecovered;
211 VREF(dvp);
212 vput(tdvp);
213 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
214 }
215 }
216
217 /*
218 * Set return dvp to be the upperdvp 'parent directory.
219 */
220 *pdvp = dvp;
221
222 /*
223 * If the VOP_LOOKUP() call generates an error, tdvp is invalid and
224 * no changes will have been made to dvp, so we are set to return.
225 */
226
227 error = VOP_LOOKUP(dvp, &tdvp, cnp);
228 if (error) {
229 UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
230 *vpp = NULL;
231 return (error);
232 }
233
234 /*
235 * The parent directory will have been unlocked, unless lookup
236 * found the last component or if dvp == tdvp (tdvp must be locked).
237 *
238 * We want our dvp to remain locked and ref'd. We also want tdvp
239 * to remain locked and ref'd.
240 */
241 UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
242
243 if (dvp != tdvp && (cnp->cn_flags & ISLASTCN) == 0)
244 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
245
246 /*
247 * Lastly check if the current node is a mount point in
248 * which case walk up the mount hierarchy making sure not to
249 * bump into the root of the mount tree (ie. dvp != udvp).
250 *
251 * We use dvp as a temporary variable here, it is no longer related
252 * to the dvp above. However, we have to ensure that both *pdvp and
253 * tdvp are locked on return.
254 */
255
256 dvp = tdvp;
257 while (
258 dvp != udvp &&
259 (dvp->v_type == VDIR) &&
260 (mp = dvp->v_mountedhere)
261 ) {
262 int relock_pdvp = 0;
263
264 if (vfs_busy(mp, 0, 0, td))
265 continue;
266
267 if (dvp == *pdvp)
268 relock_pdvp = 1;
269 vput(dvp);
270 dvp = NULL;
271 error = VFS_ROOT(mp, &dvp, td);
272
273 vfs_unbusy(mp, td);
274
275 if (relock_pdvp)
276 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY, td);
277
278 if (error) {
279 *vpp = NULL;
280 return (error);
281 }
282 }
283 *vpp = dvp;
284 return (0);
285 }
286
287 static int
288 union_lookup(ap)
289 struct vop_lookup_args /* {
290 struct vnodeop_desc *a_desc;
291 struct vnode *a_dvp;
292 struct vnode **a_vpp;
293 struct componentname *a_cnp;
294 } */ *ap;
295 {
296 int error;
297 int uerror, lerror;
298 struct vnode *uppervp, *lowervp;
299 struct vnode *upperdvp, *lowerdvp;
300 struct vnode *dvp = ap->a_dvp; /* starting dir */
301 struct union_node *dun = VTOUNION(dvp); /* associated union node */
302 struct componentname *cnp = ap->a_cnp;
303 struct thread *td = cnp->cn_thread;
304 int lockparent = cnp->cn_flags & LOCKPARENT;
305 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
306 struct ucred *saved_cred = NULL;
307 int iswhiteout;
308 struct vattr va;
309
310 *ap->a_vpp = NULLVP;
311
312 /*
313 * Disallow write attempts to the filesystem mounted read-only.
314 */
315 if ((cnp->cn_flags & ISLASTCN) &&
316 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
317 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
318 return (EROFS);
319 }
320
321 /*
322 * For any lookups we do, always return with the parent locked.
323 */
324 cnp->cn_flags |= LOCKPARENT;
325
326 lowerdvp = dun->un_lowervp;
327 uppervp = NULLVP;
328 lowervp = NULLVP;
329 iswhiteout = 0;
330
331 uerror = ENOENT;
332 lerror = ENOENT;
333
334 /*
335 * Get a private lock on uppervp and a reference, effectively
336 * taking it out of the union_node's control.
337 *
338 * We must lock upperdvp while holding our lock on dvp
339 * to avoid a deadlock.
340 */
341 upperdvp = union_lock_upper(dun, td);
342
343 /*
344 * Do the lookup in the upper level.
345 * If that level consumes additional pathnames,
346 * then assume that something special is going
347 * on and just return that vnode.
348 */
349 if (upperdvp != NULLVP) {
350 /*
351 * We do not have to worry about the DOTDOT case, we've
352 * already unlocked dvp.
353 */
354 UDEBUG(("A %p\n", upperdvp));
355
356 /*
357 * Do the lookup. We must supply a locked and referenced
358 * upperdvp to the function and will get a new locked and
359 * referenced upperdvp back, with the old having been
360 * dereferenced.
361 *
362 * If an error is returned, uppervp will be NULLVP. If no
363 * error occurs, uppervp will be the locked and referenced.
364 * Return vnode, or possibly NULL, depending on what is being
365 * requested. It is possible that the returned uppervp
366 * will be the same as upperdvp.
367 */
368 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
369 UDEBUG((
370 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
371 uerror,
372 upperdvp,
373 vrefcnt(upperdvp),
374 VOP_ISLOCKED(upperdvp, NULL),
375 uppervp,
376 (uppervp ? vrefcnt(uppervp) : -99),
377 (uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
378 ));
379
380 /*
381 * Disallow write attempts to the filesystem mounted read-only.
382 */
383 if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) &&
384 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
385 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) {
386 error = EROFS;
387 goto out;
388 }
389
390 /*
391 * Special case: If cn_consume != 0 then skip out. The result
392 * of the lookup is transfered to our return variable. If
393 * an error occured we have to throw away the results.
394 */
395
396 if (cnp->cn_consume != 0) {
397 if ((error = uerror) == 0) {
398 *ap->a_vpp = uppervp;
399 uppervp = NULL;
400 }
401 goto out;
402 }
403
404 /*
405 * Calculate whiteout, fall through.
406 */
407
408 if (uerror == ENOENT || uerror == EJUSTRETURN) {
409 if (cnp->cn_flags & ISWHITEOUT) {
410 iswhiteout = 1;
411 } else if (lowerdvp != NULLVP) {
412 int terror;
413
414 terror = VOP_GETATTR(upperdvp, &va,
415 cnp->cn_cred, cnp->cn_thread);
416 if (terror == 0 && (va.va_flags & OPAQUE))
417 iswhiteout = 1;
418 }
419 }
420 }
421
422 /*
423 * In a similar way to the upper layer, do the lookup
424 * in the lower layer. This time, if there is some
425 * component magic going on, then vput whatever we got
426 * back from the upper layer and return the lower vnode
427 * instead.
428 */
429
430 if (lowerdvp != NULLVP && !iswhiteout) {
431 int nameiop;
432
433 UDEBUG(("B %p\n", lowerdvp));
434
435 /*
436 * Force only LOOKUPs on the lower node, since
437 * we won't be making changes to it anyway.
438 */
439 nameiop = cnp->cn_nameiop;
440 cnp->cn_nameiop = LOOKUP;
441 if (um->um_op == UNMNT_BELOW) {
442 saved_cred = cnp->cn_cred;
443 cnp->cn_cred = um->um_cred;
444 }
445
446 /*
447 * We shouldn't have to worry about locking interactions
448 * between the lower layer and our union layer (w.r.t.
449 * `..' processing) because we don't futz with lowervp
450 * locks in the union-node instantiation code path.
451 *
452 * union_lookup1() requires lowervp to be locked on entry,
453 * and it will be unlocked on return. The ref count will
454 * not change. On return lowervp doesn't represent anything
455 * to us so we NULL it out.
456 */
457 VREF(lowerdvp);
458 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, td);
459 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
460 if (lowerdvp == lowervp)
461 vrele(lowerdvp);
462 else
463 vput(lowerdvp);
464 lowerdvp = NULL; /* lowerdvp invalid after vput */
465
466 if (um->um_op == UNMNT_BELOW)
467 cnp->cn_cred = saved_cred;
468 cnp->cn_nameiop = nameiop;
469
470 if (cnp->cn_consume != 0 || lerror == EACCES) {
471 if ((error = lerror) == 0) {
472 *ap->a_vpp = lowervp;
473 lowervp = NULL;
474 }
475 goto out;
476 }
477 } else {
478 UDEBUG(("C %p\n", lowerdvp));
479 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) {
480 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
481 VREF(lowervp);
482 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, td);
483 lerror = 0;
484 }
485 }
486 }
487
488 /*
489 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
490 *
491 * 1. If both layers returned an error, select the upper layer.
492 *
493 * 2. If the upper layer failed and the bottom layer succeeded,
494 * two subcases occur:
495 *
496 * a. The bottom vnode is not a directory, in which case
497 * just return a new union vnode referencing an
498 * empty top layer and the existing bottom layer.
499 *
500 * b. The bottom vnode is a directory, in which case
501 * create a new directory in the top layer and
502 * and fall through to case 3.
503 *
504 * 3. If the top layer succeeded, then return a new union
505 * vnode referencing whatever the new top layer and
506 * whatever the bottom layer returned.
507 */
508
509 /* case 1. */
510 if ((uerror != 0) && (lerror != 0)) {
511 error = uerror;
512 goto out;
513 }
514
515 /* case 2. */
516 if (uerror != 0 /* && (lerror == 0) */ ) {
517 if (lowervp->v_type == VDIR) { /* case 2b. */
518 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
519 /*
520 * Oops, uppervp has a problem, we may have to shadow.
521 */
522 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
523 if (uerror) {
524 error = uerror;
525 goto out;
526 }
527 }
528 }
529
530 /*
531 * Must call union_allocvp() with both the upper and lower vnodes
532 * referenced and the upper vnode locked. ap->a_vpp is returned
533 * referenced and locked. lowervp, uppervp, and upperdvp are
534 * absorbed by union_allocvp() whether it succeeds or fails.
535 *
536 * upperdvp is the parent directory of uppervp which may be
537 * different, depending on the path, from dvp->un_uppervp. That's
538 * why it is a separate argument. Note that it must be unlocked.
539 *
540 * dvp must be locked on entry to the call and will be locked on
541 * return.
542 */
543
544 if (uppervp && uppervp != upperdvp)
545 VOP_UNLOCK(uppervp, 0, td);
546 if (lowervp)
547 VOP_UNLOCK(lowervp, 0, td);
548 if (upperdvp)
549 VOP_UNLOCK(upperdvp, 0, td);
550
551 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
552 uppervp, lowervp, 1);
553
554 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99));
555
556 uppervp = NULL;
557 upperdvp = NULL;
558 lowervp = NULL;
559
560 /*
561 * Termination Code
562 *
563 * - put away any extra junk laying around. Note that lowervp
564 * (if not NULL) will never be the same as *ap->a_vp and
565 * neither will uppervp, because when we set that state we
566 * NULL-out lowervp or uppervp. On the otherhand, upperdvp
567 * may match uppervp or *ap->a_vpp.
568 *
569 * - relock/unlock dvp if appropriate.
570 */
571
572 out:
573 if (upperdvp) {
574 if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
575 vrele(upperdvp);
576 else
577 vput(upperdvp);
578 }
579
580 if (uppervp)
581 vput(uppervp);
582
583 if (lowervp)
584 vput(lowervp);
585
586 /*
587 * Restore LOCKPARENT state
588 */
589
590 if (!lockparent)
591 cnp->cn_flags &= ~LOCKPARENT;
592
593 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
594 ((*ap->a_vpp) ? vrefcnt(*ap->a_vpp) : -99),
595 lowervp, uppervp));
596
597 if (error == 0 || error == EJUSTRETURN) {
598 /*
599 * dvp lock state, determine whether to relock dvp.
600 * We are expected to unlock dvp unless:
601 *
602 * - there was an error (other than EJUSTRETURN), or
603 * - we hit the last component and lockparent is true
604 */
605 if (*ap->a_vpp != dvp) {
606 if (!lockparent || (cnp->cn_flags & ISLASTCN) == 0)
607 VOP_UNLOCK(dvp, 0, td);
608 }
609
610 if (cnp->cn_namelen == 1 &&
611 cnp->cn_nameptr[0] == '.' &&
612 *ap->a_vpp != dvp) {
613 #ifdef DIAGNOSTIC
614 vprint("union_lookup: vp", *ap->a_vpp);
615 vprint("union_lookup: dvp", dvp);
616 #endif
617 panic("union_lookup returning . (%p) != startdir (%p)",
618 *ap->a_vpp, dvp);
619 }
620 }
621
622 return (error);
623 }
624
625 /*
626 * union_create:
627 *
628 * a_dvp is locked on entry and remains locked on return. a_vpp is returned
629 * locked if no error occurs, otherwise it is garbage.
630 */
631
632 static int
633 union_create(ap)
634 struct vop_create_args /* {
635 struct vnode *a_dvp;
636 struct vnode **a_vpp;
637 struct componentname *a_cnp;
638 struct vattr *a_vap;
639 } */ *ap;
640 {
641 struct union_node *dun = VTOUNION(ap->a_dvp);
642 struct componentname *cnp = ap->a_cnp;
643 struct thread *td = cnp->cn_thread;
644 struct vnode *dvp;
645 int error = EROFS;
646
647 if ((dvp = union_lock_upper(dun, td)) != NULL) {
648 struct vnode *vp;
649 struct mount *mp;
650
651 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
652 if (error == 0) {
653 mp = ap->a_dvp->v_mount;
654 VOP_UNLOCK(vp, 0, td);
655 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vrefcnt(vp)));
656 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
657 cnp, vp, NULLVP, 1);
658 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
659 }
660 union_unlock_upper(dvp, td);
661 }
662 return (error);
663 }
664
665 static int
666 union_whiteout(ap)
667 struct vop_whiteout_args /* {
668 struct vnode *a_dvp;
669 struct componentname *a_cnp;
670 int a_flags;
671 } */ *ap;
672 {
673 struct union_node *un = VTOUNION(ap->a_dvp);
674 struct componentname *cnp = ap->a_cnp;
675 struct vnode *uppervp;
676 int error;
677
678 switch (ap->a_flags) {
679 case CREATE:
680 case DELETE:
681 uppervp = union_lock_upper(un, cnp->cn_thread);
682 if (uppervp != NULLVP) {
683 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
684 union_unlock_upper(uppervp, cnp->cn_thread);
685 } else
686 error = EOPNOTSUPP;
687 break;
688 case LOOKUP:
689 error = EOPNOTSUPP;
690 break;
691 default:
692 panic("union_whiteout: unknown op");
693 }
694 return (error);
695 }
696
697 /*
698 * union_mknod:
699 *
700 * a_dvp is locked on entry and should remain locked on return.
701 * a_vpp is garbagre whether an error occurs or not.
702 */
703
704 static int
705 union_mknod(ap)
706 struct vop_mknod_args /* {
707 struct vnode *a_dvp;
708 struct vnode **a_vpp;
709 struct componentname *a_cnp;
710 struct vattr *a_vap;
711 } */ *ap;
712 {
713 struct union_node *dun = VTOUNION(ap->a_dvp);
714 struct componentname *cnp = ap->a_cnp;
715 struct vnode *dvp;
716 int error = EROFS;
717
718 if ((dvp = union_lock_upper(dun, cnp->cn_thread)) != NULL) {
719 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
720 union_unlock_upper(dvp, cnp->cn_thread);
721 }
722 return (error);
723 }
724
725 /*
726 * union_open:
727 *
728 * run open VOP. When opening the underlying vnode we have to mimic
729 * vn_open(). What we *really* need to do to avoid screwups if the
730 * open semantics change is to call vn_open(). For example, ufs blows
731 * up if you open a file but do not vmio it prior to writing.
732 */
733
734 static int
735 union_open(ap)
736 struct vop_open_args /* {
737 struct vnodeop_desc *a_desc;
738 struct vnode *a_vp;
739 int a_mode;
740 struct ucred *a_cred;
741 struct thread *a_td;
742 } */ *ap;
743 {
744 struct union_node *un = VTOUNION(ap->a_vp);
745 struct vnode *tvp;
746 int mode = ap->a_mode;
747 struct ucred *cred = ap->a_cred;
748 struct thread *td = ap->a_td;
749 int error = 0;
750 int tvpisupper = 1;
751
752 /*
753 * If there is an existing upper vp then simply open that.
754 * The upper vp takes precedence over the lower vp. When opening
755 * a lower vp for writing copy it to the uppervp and then open the
756 * uppervp.
757 *
758 * At the end of this section tvp will be left locked.
759 */
760 if ((tvp = union_lock_upper(un, td)) == NULLVP) {
761 /*
762 * If the lower vnode is being opened for writing, then
763 * copy the file contents to the upper vnode and open that,
764 * otherwise can simply open the lower vnode.
765 */
766 tvp = un->un_lowervp;
767 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
768 int docopy = !(mode & O_TRUNC);
769 error = union_copyup(un, docopy, cred, td);
770 tvp = union_lock_upper(un, td);
771 } else {
772 un->un_openl++;
773 VREF(tvp);
774 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, td);
775 tvpisupper = 0;
776 }
777 }
778
779 /*
780 * We are holding the correct vnode, open it.
781 */
782
783 if (error == 0)
784 error = VOP_OPEN(tvp, mode, cred, td, -1);
785
786 /*
787 * This is absolutely necessary or UFS will blow up.
788 */
789 if (error == 0 && vn_canvmio(tvp) == TRUE) {
790 error = vfs_object_create(tvp, td, cred);
791 }
792
793 /*
794 * Release any locks held.
795 */
796 if (tvpisupper) {
797 if (tvp)
798 union_unlock_upper(tvp, td);
799 } else {
800 vput(tvp);
801 }
802 return (error);
803 }
804
805 /*
806 * union_close:
807 *
808 * It is unclear whether a_vp is passed locked or unlocked. Whatever
809 * the case we do not change it.
810 */
811
812 static int
813 union_close(ap)
814 struct vop_close_args /* {
815 struct vnode *a_vp;
816 int a_fflag;
817 struct ucred *a_cred;
818 struct thread *a_td;
819 } */ *ap;
820 {
821 struct union_node *un = VTOUNION(ap->a_vp);
822 struct vnode *vp;
823
824 if ((vp = un->un_uppervp) == NULLVP) {
825 #ifdef UNION_DIAGNOSTIC
826 if (un->un_openl <= 0)
827 panic("union: un_openl cnt");
828 #endif
829 --un->un_openl;
830 vp = un->un_lowervp;
831 }
832 ap->a_vp = vp;
833 return (VCALL(vp, VOFFSET(vop_close), ap));
834 }
835
836 /*
837 * Check access permission on the union vnode.
838 * The access check being enforced is to check
839 * against both the underlying vnode, and any
840 * copied vnode. This ensures that no additional
841 * file permissions are given away simply because
842 * the user caused an implicit file copy.
843 */
844 static int
845 union_access(ap)
846 struct vop_access_args /* {
847 struct vnodeop_desc *a_desc;
848 struct vnode *a_vp;
849 int a_mode;
850 struct ucred *a_cred;
851 struct thread *a_td;
852 } */ *ap;
853 {
854 struct union_node *un = VTOUNION(ap->a_vp);
855 struct thread *td = ap->a_td;
856 int error = EACCES;
857 struct vnode *vp;
858
859 /*
860 * Disallow write attempts on filesystems mounted read-only.
861 */
862 if ((ap->a_mode & VWRITE) &&
863 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
864 switch (ap->a_vp->v_type) {
865 case VREG:
866 case VDIR:
867 case VLNK:
868 return (EROFS);
869 default:
870 break;
871 }
872 }
873
874 if ((vp = union_lock_upper(un, td)) != NULLVP) {
875 ap->a_vp = vp;
876 error = VCALL(vp, VOFFSET(vop_access), ap);
877 union_unlock_upper(vp, td);
878 return(error);
879 }
880
881 if ((vp = un->un_lowervp) != NULLVP) {
882 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
883 ap->a_vp = vp;
884
885 /*
886 * Remove VWRITE from a_mode if our mount point is RW, because
887 * we want to allow writes and lowervp may be read-only.
888 */
889 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
890 ap->a_mode &= ~VWRITE;
891
892 error = VCALL(vp, VOFFSET(vop_access), ap);
893 if (error == 0) {
894 struct union_mount *um;
895
896 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
897
898 if (um->um_op == UNMNT_BELOW) {
899 ap->a_cred = um->um_cred;
900 error = VCALL(vp, VOFFSET(vop_access), ap);
901 }
902 }
903 VOP_UNLOCK(vp, 0, td);
904 }
905 return(error);
906 }
907
908 /*
909 * We handle getattr only to change the fsid and
910 * track object sizes
911 *
912 * It's not clear whether VOP_GETATTR is to be
913 * called with the vnode locked or not. stat() calls
914 * it with (vp) locked, and fstat() calls it with
915 * (vp) unlocked.
916 *
917 * Because of this we cannot use our normal locking functions
918 * if we do not intend to lock the main a_vp node. At the moment
919 * we are running without any specific locking at all, but beware
920 * to any programmer that care must be taken if locking is added
921 * to this function.
922 */
923
924 static int
925 union_getattr(ap)
926 struct vop_getattr_args /* {
927 struct vnode *a_vp;
928 struct vattr *a_vap;
929 struct ucred *a_cred;
930 struct thread *a_td;
931 } */ *ap;
932 {
933 int error;
934 struct union_node *un = VTOUNION(ap->a_vp);
935 struct vnode *vp;
936 struct vattr *vap;
937 struct vattr va;
938
939 /*
940 * Some programs walk the filesystem hierarchy by counting
941 * links to directories to avoid stat'ing all the time.
942 * This means the link count on directories needs to be "correct".
943 * The only way to do that is to call getattr on both layers
944 * and fix up the link count. The link count will not necessarily
945 * be accurate but will be large enough to defeat the tree walkers.
946 */
947
948 vap = ap->a_vap;
949
950 if ((vp = un->un_uppervp) != NULLVP) {
951 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
952 if (error)
953 return (error);
954 /* XXX isn't this dangerous without a lock? */
955 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
956 }
957
958 if (vp == NULLVP) {
959 vp = un->un_lowervp;
960 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
961 vp = un->un_lowervp;
962 vap = &va;
963 } else {
964 vp = NULLVP;
965 }
966
967 if (vp != NULLVP) {
968 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_td);
969 if (error)
970 return (error);
971 /* XXX isn't this dangerous without a lock? */
972 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
973 }
974
975 if ((vap != ap->a_vap) && (vap->va_type == VDIR))
976 ap->a_vap->va_nlink += vap->va_nlink;
977 return (0);
978 }
979
980 static int
981 union_setattr(ap)
982 struct vop_setattr_args /* {
983 struct vnode *a_vp;
984 struct vattr *a_vap;
985 struct ucred *a_cred;
986 struct thread *a_td;
987 } */ *ap;
988 {
989 struct union_node *un = VTOUNION(ap->a_vp);
990 struct thread *td = ap->a_td;
991 struct vattr *vap = ap->a_vap;
992 struct vnode *uppervp;
993 int error;
994
995 /*
996 * Disallow write attempts on filesystems mounted read-only.
997 */
998 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
999 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
1000 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
1001 vap->va_mtime.tv_sec != VNOVAL ||
1002 vap->va_mode != (mode_t)VNOVAL)) {
1003 return (EROFS);
1004 }
1005
1006 /*
1007 * Handle case of truncating lower object to zero size
1008 * by creating a zero length upper object. This is to
1009 * handle the case of open with O_TRUNC and O_CREAT.
1010 */
1011 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
1012 error = union_copyup(un, (ap->a_vap->va_size != 0),
1013 ap->a_cred, ap->a_td);
1014 if (error)
1015 return (error);
1016 }
1017
1018 /*
1019 * Try to set attributes in upper layer,
1020 * otherwise return read-only filesystem error.
1021 */
1022 error = EROFS;
1023 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1024 error = VOP_SETATTR(un->un_uppervp, ap->a_vap,
1025 ap->a_cred, ap->a_td);
1026 if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
1027 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
1028 union_unlock_upper(uppervp, td);
1029 }
1030 return (error);
1031 }
1032
1033 static int
1034 union_read(ap)
1035 struct vop_read_args /* {
1036 struct vnode *a_vp;
1037 struct uio *a_uio;
1038 int a_ioflag;
1039 struct ucred *a_cred;
1040 } */ *ap;
1041 {
1042 struct union_node *un = VTOUNION(ap->a_vp);
1043 struct thread *td = ap->a_uio->uio_td;
1044 struct vnode *uvp;
1045 int error;
1046
1047 uvp = union_lock_other(un, td);
1048 KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1049
1050 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1051 union_unlock_other(uvp, td);
1052
1053 /*
1054 * XXX
1055 * Perhaps the size of the underlying object has changed under
1056 * our feet. Take advantage of the offset information present
1057 * in the uio structure.
1058 */
1059 if (error == 0) {
1060 struct union_node *un = VTOUNION(ap->a_vp);
1061 off_t cur = ap->a_uio->uio_offset;
1062
1063 if (uvp == un->un_uppervp) {
1064 if (cur > un->un_uppersz)
1065 union_newsize(ap->a_vp, cur, VNOVAL);
1066 } else {
1067 if (cur > un->un_lowersz)
1068 union_newsize(ap->a_vp, VNOVAL, cur);
1069 }
1070 }
1071 return (error);
1072 }
1073
1074 static int
1075 union_write(ap)
1076 struct vop_read_args /* {
1077 struct vnode *a_vp;
1078 struct uio *a_uio;
1079 int a_ioflag;
1080 struct ucred *a_cred;
1081 } */ *ap;
1082 {
1083 struct union_node *un = VTOUNION(ap->a_vp);
1084 struct thread *td = ap->a_uio->uio_td;
1085 struct vnode *uppervp;
1086 int error;
1087
1088 if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1089 panic("union: missing upper layer in write");
1090
1091 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1092
1093 /*
1094 * The size of the underlying object may be changed by the
1095 * write.
1096 */
1097 if (error == 0) {
1098 off_t cur = ap->a_uio->uio_offset;
1099
1100 if (cur > un->un_uppersz)
1101 union_newsize(ap->a_vp, cur, VNOVAL);
1102 }
1103 union_unlock_upper(uppervp, td);
1104 return (error);
1105 }
1106
1107 static int
1108 union_lease(ap)
1109 struct vop_lease_args /* {
1110 struct vnode *a_vp;
1111 struct thread *a_td;
1112 struct ucred *a_cred;
1113 int a_flag;
1114 } */ *ap;
1115 {
1116 struct vnode *ovp = OTHERVP(ap->a_vp);
1117
1118 ap->a_vp = ovp;
1119 return (VCALL(ovp, VOFFSET(vop_lease), ap));
1120 }
1121
1122 static int
1123 union_ioctl(ap)
1124 struct vop_ioctl_args /* {
1125 struct vnode *a_vp;
1126 u_long a_command;
1127 caddr_t a_data;
1128 int a_fflag;
1129 struct ucred *a_cred;
1130 struct thread *a_td;
1131 } */ *ap;
1132 {
1133 struct vnode *ovp = OTHERVP(ap->a_vp);
1134
1135 ap->a_vp = ovp;
1136 return (VCALL(ovp, VOFFSET(vop_ioctl), ap));
1137 }
1138
1139 static int
1140 union_poll(ap)
1141 struct vop_poll_args /* {
1142 struct vnode *a_vp;
1143 int a_events;
1144 struct ucred *a_cred;
1145 struct thread *a_td;
1146 } */ *ap;
1147 {
1148 struct vnode *ovp = OTHERVP(ap->a_vp);
1149
1150 ap->a_vp = ovp;
1151 return (VCALL(ovp, VOFFSET(vop_poll), ap));
1152 }
1153
1154 static int
1155 union_revoke(ap)
1156 struct vop_revoke_args /* {
1157 struct vnode *a_vp;
1158 int a_flags;
1159 struct thread *a_td;
1160 } */ *ap;
1161 {
1162 struct vnode *vp = ap->a_vp;
1163
1164 if (UPPERVP(vp))
1165 VOP_REVOKE(UPPERVP(vp), ap->a_flags);
1166 if (LOWERVP(vp))
1167 VOP_REVOKE(LOWERVP(vp), ap->a_flags);
1168 vgone(vp);
1169 return (0);
1170 }
1171
1172 static int
1173 union_fsync(ap)
1174 struct vop_fsync_args /* {
1175 struct vnode *a_vp;
1176 struct ucred *a_cred;
1177 int a_waitfor;
1178 struct thread *a_td;
1179 } */ *ap;
1180 {
1181 int error = 0;
1182 struct thread *td = ap->a_td;
1183 struct vnode *targetvp;
1184 struct union_node *un = VTOUNION(ap->a_vp);
1185
1186 if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1187 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, td);
1188 union_unlock_other(targetvp, td);
1189 }
1190
1191 return (error);
1192 }
1193
1194 /*
1195 * union_remove:
1196 *
1197 * Remove the specified cnp. The dvp and vp are passed to us locked
1198 * and must remain locked on return.
1199 */
1200
1201 static int
1202 union_remove(ap)
1203 struct vop_remove_args /* {
1204 struct vnode *a_dvp;
1205 struct vnode *a_vp;
1206 struct componentname *a_cnp;
1207 } */ *ap;
1208 {
1209 struct union_node *dun = VTOUNION(ap->a_dvp);
1210 struct union_node *un = VTOUNION(ap->a_vp);
1211 struct componentname *cnp = ap->a_cnp;
1212 struct thread *td = cnp->cn_thread;
1213 struct vnode *uppervp;
1214 struct vnode *upperdvp;
1215 int error;
1216
1217 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1218 panic("union remove: null upper vnode");
1219
1220 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1221 if (union_dowhiteout(un, cnp->cn_cred, td))
1222 cnp->cn_flags |= DOWHITEOUT;
1223 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */
1224 error = EOPNOTSUPP;
1225 else
1226 error = VOP_REMOVE(upperdvp, uppervp, cnp);
1227 if (!error)
1228 union_removed_upper(un);
1229 union_unlock_upper(uppervp, td);
1230 } else {
1231 error = union_mkwhiteout(
1232 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1233 upperdvp, ap->a_cnp, un->un_path);
1234 }
1235 union_unlock_upper(upperdvp, td);
1236 return (error);
1237 }
1238
1239 /*
1240 * union_link:
1241 *
1242 * tdvp and vp will be locked on entry.
1243 * tdvp and vp should remain locked on return.
1244 */
1245
1246 static int
1247 union_link(ap)
1248 struct vop_link_args /* {
1249 struct vnode *a_tdvp;
1250 struct vnode *a_vp;
1251 struct componentname *a_cnp;
1252 } */ *ap;
1253 {
1254 struct componentname *cnp = ap->a_cnp;
1255 struct thread *td = cnp->cn_thread;
1256 struct union_node *dun = VTOUNION(ap->a_tdvp);
1257 struct vnode *vp;
1258 struct vnode *tdvp;
1259 int error = 0;
1260
1261 if (ap->a_tdvp->v_op != ap->a_vp->v_op) {
1262 vp = ap->a_vp;
1263 } else {
1264 struct union_node *tun = VTOUNION(ap->a_vp);
1265
1266 if (tun->un_uppervp == NULLVP) {
1267 #if 0
1268 if (dun->un_uppervp == tun->un_dirvp) {
1269 if (dun->un_flags & UN_ULOCK) {
1270 dun->un_flags &= ~UN_ULOCK;
1271 VOP_UNLOCK(dun->un_uppervp, 0, td);
1272 }
1273 }
1274 #endif
1275 error = union_copyup(tun, 1, cnp->cn_cred, td);
1276 #if 0
1277 if (dun->un_uppervp == tun->un_dirvp) {
1278 vn_lock(dun->un_uppervp,
1279 LK_EXCLUSIVE | LK_RETRY, td);
1280 dun->un_flags |= UN_ULOCK;
1281 }
1282 #endif
1283 if (error)
1284 return (error);
1285 }
1286 vp = tun->un_uppervp;
1287 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1288 }
1289
1290 /*
1291 * Make sure upper is locked, then unlock the union directory we were
1292 * called with to avoid a deadlock while we are calling VOP_LINK() on
1293 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp
1294 * is expected to be locked on return.
1295 */
1296
1297 if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1298 return (EROFS);
1299
1300 VOP_UNLOCK(ap->a_tdvp, 0, td); /* unlock calling node */
1301 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */
1302
1303 /*
1304 * Unlock tun->un_uppervp if we locked it above.
1305 */
1306 if (ap->a_tdvp->v_op == ap->a_vp->v_op)
1307 VOP_UNLOCK(vp, 0, td);
1308 /*
1309 * We have to unlock tdvp prior to relocking our calling node in
1310 * order to avoid a deadlock. We also have to unlock ap->a_vp
1311 * before relocking the directory, but then we have to relock
1312 * ap->a_vp as our caller expects.
1313 */
1314 VOP_UNLOCK(ap->a_vp, 0, td);
1315 union_unlock_upper(tdvp, td);
1316 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY, td);
1317 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, td);
1318 return (error);
1319 }
1320
1321 static int
1322 union_rename(ap)
1323 struct vop_rename_args /* {
1324 struct vnode *a_fdvp;
1325 struct vnode *a_fvp;
1326 struct componentname *a_fcnp;
1327 struct vnode *a_tdvp;
1328 struct vnode *a_tvp;
1329 struct componentname *a_tcnp;
1330 } */ *ap;
1331 {
1332 int error;
1333 struct vnode *fdvp = ap->a_fdvp;
1334 struct vnode *fvp = ap->a_fvp;
1335 struct vnode *tdvp = ap->a_tdvp;
1336 struct vnode *tvp = ap->a_tvp;
1337
1338 /*
1339 * Figure out what fdvp to pass to our upper or lower vnode. If we
1340 * replace the fdvp, release the original one and ref the new one.
1341 */
1342
1343 if (fdvp->v_op == union_vnodeop_p) { /* always true */
1344 struct union_node *un = VTOUNION(fdvp);
1345 if (un->un_uppervp == NULLVP) {
1346 /*
1347 * this should never happen in normal
1348 * operation but might if there was
1349 * a problem creating the top-level shadow
1350 * directory.
1351 */
1352 error = EXDEV;
1353 goto bad;
1354 }
1355 fdvp = un->un_uppervp;
1356 VREF(fdvp);
1357 vrele(ap->a_fdvp);
1358 }
1359
1360 /*
1361 * Figure out what fvp to pass to our upper or lower vnode. If we
1362 * replace the fvp, release the original one and ref the new one.
1363 */
1364
1365 if (fvp->v_op == union_vnodeop_p) { /* always true */
1366 struct union_node *un = VTOUNION(fvp);
1367 #if 0
1368 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1369 #endif
1370
1371 if (un->un_uppervp == NULLVP) {
1372 switch(fvp->v_type) {
1373 case VREG:
1374 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1375 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_thread);
1376 VOP_UNLOCK(un->un_vnode, 0, ap->a_fcnp->cn_thread);
1377 if (error)
1378 goto bad;
1379 break;
1380 case VDIR:
1381 /*
1382 * XXX not yet.
1383 *
1384 * There is only one way to rename a directory
1385 * based in the lowervp, and that is to copy
1386 * the entire directory hierarchy. Otherwise
1387 * it would not last across a reboot.
1388 */
1389 #if 0
1390 vrele(fvp);
1391 fvp = NULL;
1392 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, ap->a_fcnp->cn_thread);
1393 error = union_mkshadow(um, fdvp,
1394 ap->a_fcnp, &un->un_uppervp);
1395 VOP_UNLOCK(fdvp, 0, ap->a_fcnp->cn_thread);
1396 if (un->un_uppervp)
1397 VOP_UNLOCK(un->un_uppervp, 0, ap->a_fcnp->cn_thread);
1398 if (error)
1399 goto bad;
1400 break;
1401 #endif
1402 default:
1403 error = EXDEV;
1404 goto bad;
1405 }
1406 }
1407
1408 if (un->un_lowervp != NULLVP)
1409 ap->a_fcnp->cn_flags |= DOWHITEOUT;
1410 fvp = un->un_uppervp;
1411 VREF(fvp);
1412 vrele(ap->a_fvp);
1413 }
1414
1415 /*
1416 * Figure out what tdvp (destination directory) to pass to the
1417 * lower level. If we replace it with uppervp, we need to vput the
1418 * old one. The exclusive lock is transfered to what we will pass
1419 * down in the VOP_RENAME() and we replace uppervp with a simple
1420 * reference.
1421 */
1422
1423 if (tdvp->v_op == union_vnodeop_p) {
1424 struct union_node *un = VTOUNION(tdvp);
1425
1426 if (un->un_uppervp == NULLVP) {
1427 /*
1428 * This should never happen in normal
1429 * operation but might if there was
1430 * a problem creating the top-level shadow
1431 * directory.
1432 */
1433 error = EXDEV;
1434 goto bad;
1435 }
1436
1437 /*
1438 * New tdvp is a lock and reference on uppervp.
1439 * Put away the old tdvp.
1440 */
1441 tdvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1442 vput(ap->a_tdvp);
1443 }
1444
1445 /*
1446 * Figure out what tvp (destination file) to pass to the
1447 * lower level.
1448 *
1449 * If the uppervp file does not exist, put away the (wrong)
1450 * file and change tvp to NULL.
1451 */
1452
1453 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) {
1454 struct union_node *un = VTOUNION(tvp);
1455
1456 tvp = union_lock_upper(un, ap->a_tcnp->cn_thread);
1457 vput(ap->a_tvp);
1458 /* note: tvp may be NULL */
1459 }
1460
1461 /*
1462 * VOP_RENAME() releases/vputs prior to returning, so we have no
1463 * cleanup to do.
1464 */
1465
1466 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1467
1468 /*
1469 * Error. We still have to release / vput the various elements.
1470 */
1471
1472 bad:
1473 vrele(fdvp);
1474 if (fvp)
1475 vrele(fvp);
1476 vput(tdvp);
1477 if (tvp != NULLVP) {
1478 if (tvp != tdvp)
1479 vput(tvp);
1480 else
1481 vrele(tvp);
1482 }
1483 return (error);
1484 }
1485
1486 static int
1487 union_mkdir(ap)
1488 struct vop_mkdir_args /* {
1489 struct vnode *a_dvp;
1490 struct vnode **a_vpp;
1491 struct componentname *a_cnp;
1492 struct vattr *a_vap;
1493 } */ *ap;
1494 {
1495 struct union_node *dun = VTOUNION(ap->a_dvp);
1496 struct componentname *cnp = ap->a_cnp;
1497 struct thread *td = cnp->cn_thread;
1498 struct vnode *upperdvp;
1499 int error = EROFS;
1500
1501 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1502 struct vnode *vp;
1503
1504 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1505 union_unlock_upper(upperdvp, td);
1506
1507 if (error == 0) {
1508 VOP_UNLOCK(vp, 0, td);
1509 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vrefcnt(vp)));
1510 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1511 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1512 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vrefcnt(vp)));
1513 }
1514 }
1515 return (error);
1516 }
1517
1518 static int
1519 union_rmdir(ap)
1520 struct vop_rmdir_args /* {
1521 struct vnode *a_dvp;
1522 struct vnode *a_vp;
1523 struct componentname *a_cnp;
1524 } */ *ap;
1525 {
1526 struct union_node *dun = VTOUNION(ap->a_dvp);
1527 struct union_node *un = VTOUNION(ap->a_vp);
1528 struct componentname *cnp = ap->a_cnp;
1529 struct thread *td = cnp->cn_thread;
1530 struct vnode *upperdvp;
1531 struct vnode *uppervp;
1532 int error;
1533
1534 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1535 panic("union rmdir: null upper vnode");
1536
1537 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1538 if (union_dowhiteout(un, cnp->cn_cred, td))
1539 cnp->cn_flags |= DOWHITEOUT;
1540 if (cnp->cn_flags & DOWHITEOUT) /* XXX fs corruption */
1541 error = EOPNOTSUPP;
1542 else
1543 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1544 if (!error)
1545 union_removed_upper(un);
1546 union_unlock_upper(uppervp, td);
1547 } else {
1548 error = union_mkwhiteout(
1549 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1550 dun->un_uppervp, ap->a_cnp, un->un_path);
1551 }
1552 union_unlock_upper(upperdvp, td);
1553 return (error);
1554 }
1555
1556 /*
1557 * union_symlink:
1558 *
1559 * dvp is locked on entry and remains locked on return. a_vpp is garbage
1560 * (unused).
1561 */
1562
1563 static int
1564 union_symlink(ap)
1565 struct vop_symlink_args /* {
1566 struct vnode *a_dvp;
1567 struct vnode **a_vpp;
1568 struct componentname *a_cnp;
1569 struct vattr *a_vap;
1570 char *a_target;
1571 } */ *ap;
1572 {
1573 struct union_node *dun = VTOUNION(ap->a_dvp);
1574 struct componentname *cnp = ap->a_cnp;
1575 struct thread *td = cnp->cn_thread;
1576 struct vnode *dvp;
1577 int error = EROFS;
1578
1579 if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1580 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1581 ap->a_target);
1582 union_unlock_upper(dvp, td);
1583 }
1584 return (error);
1585 }
1586
1587 /*
1588 * union_readdir ()works in concert with getdirentries() and
1589 * readdir(3) to provide a list of entries in the unioned
1590 * directories. getdirentries() is responsible for walking
1591 * down the union stack. readdir(3) is responsible for
1592 * eliminating duplicate names from the returned data stream.
1593 */
1594 static int
1595 union_readdir(ap)
1596 struct vop_readdir_args /* {
1597 struct vnode *a_vp;
1598 struct uio *a_uio;
1599 struct ucred *a_cred;
1600 int *a_eofflag;
1601 u_long *a_cookies;
1602 int a_ncookies;
1603 } */ *ap;
1604 {
1605 struct union_node *un = VTOUNION(ap->a_vp);
1606 struct thread *td = ap->a_uio->uio_td;
1607 struct vnode *uvp;
1608 int error = 0;
1609
1610 if ((uvp = union_lock_upper(un, td)) != NULLVP) {
1611 ap->a_vp = uvp;
1612 error = VCALL(uvp, VOFFSET(vop_readdir), ap);
1613 union_unlock_upper(uvp, td);
1614 }
1615 return(error);
1616 }
1617
1618 static int
1619 union_readlink(ap)
1620 struct vop_readlink_args /* {
1621 struct vnode *a_vp;
1622 struct uio *a_uio;
1623 struct ucred *a_cred;
1624 } */ *ap;
1625 {
1626 int error;
1627 struct union_node *un = VTOUNION(ap->a_vp);
1628 struct uio *uio = ap->a_uio;
1629 struct thread *td = uio->uio_td;
1630 struct vnode *vp;
1631
1632 vp = union_lock_other(un, td);
1633 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1634
1635 ap->a_vp = vp;
1636 error = VCALL(vp, VOFFSET(vop_readlink), ap);
1637 union_unlock_other(vp, td);
1638
1639 return (error);
1640 }
1641
1642 static int
1643 union_getwritemount(ap)
1644 struct vop_getwritemount_args /* {
1645 struct vnode *a_vp;
1646 struct mount **a_mpp;
1647 } */ *ap;
1648 {
1649 struct vnode *vp = ap->a_vp;
1650 struct vnode *uvp = UPPERVP(vp);
1651
1652 if (uvp == NULL) {
1653 VI_LOCK(vp);
1654 if (vp->v_iflag & VI_FREE) {
1655 VI_UNLOCK(vp);
1656 return (EOPNOTSUPP);
1657 }
1658 VI_UNLOCK(vp);
1659 return (EACCES);
1660 }
1661 return(VOP_GETWRITEMOUNT(uvp, ap->a_mpp));
1662 }
1663
1664 /*
1665 * union_inactive:
1666 *
1667 * Called with the vnode locked. We are expected to unlock the vnode.
1668 */
1669
1670 static int
1671 union_inactive(ap)
1672 struct vop_inactive_args /* {
1673 struct vnode *a_vp;
1674 struct thread *a_td;
1675 } */ *ap;
1676 {
1677 struct vnode *vp = ap->a_vp;
1678 struct thread *td = ap->a_td;
1679 struct union_node *un = VTOUNION(vp);
1680
1681 /*
1682 * Do nothing (and _don't_ bypass).
1683 * Wait to vrele lowervp until reclaim,
1684 * so that until then our union_node is in the
1685 * cache and reusable.
1686 *
1687 */
1688
1689 if (un->un_dircache != NULL)
1690 union_dircache_free(un);
1691
1692 #if 0
1693 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1694 un->un_flags &= ~UN_ULOCK;
1695 VOP_UNLOCK(un->un_uppervp, 0, td);
1696 }
1697 #endif
1698
1699 VOP_UNLOCK(vp, 0, td);
1700
1701 if ((un->un_flags & UN_CACHED) == 0)
1702 vgone(vp);
1703
1704 return (0);
1705 }
1706
1707 static int
1708 union_reclaim(ap)
1709 struct vop_reclaim_args /* {
1710 struct vnode *a_vp;
1711 } */ *ap;
1712 {
1713 union_freevp(ap->a_vp);
1714
1715 return (0);
1716 }
1717
1718 /*
1719 * unionvp do not hold a VM object and there is no need to create one for
1720 * upper or lower vp because it is done in the union_open()
1721 */
1722 static int
1723 union_createvobject(ap)
1724 struct vop_createvobject_args /* {
1725 struct vnode *vp;
1726 struct ucred *cred;
1727 struct thread *td;
1728 } */ *ap;
1729 {
1730 struct vnode *vp = ap->a_vp;
1731
1732 vp->v_vflag |= VV_OBJBUF;
1733 return (0);
1734 }
1735
1736 /*
1737 * We have nothing to destroy and this operation shouldn't be bypassed.
1738 */
1739 static int
1740 union_destroyvobject(ap)
1741 struct vop_destroyvobject_args /* {
1742 struct vnode *vp;
1743 } */ *ap;
1744 {
1745 struct vnode *vp = ap->a_vp;
1746
1747 vp->v_vflag &= ~VV_OBJBUF;
1748 return (0);
1749 }
1750
1751 /*
1752 * Get VM object from the upper or lower vp
1753 */
1754 static int
1755 union_getvobject(ap)
1756 struct vop_getvobject_args /* {
1757 struct vnode *vp;
1758 struct vm_object **objpp;
1759 } */ *ap;
1760 {
1761 struct vnode *ovp = OTHERVP(ap->a_vp);
1762
1763 if (ovp == NULL)
1764 return EINVAL;
1765 return (VOP_GETVOBJECT(ovp, ap->a_objpp));
1766 }
1767
1768 static int
1769 union_print(ap)
1770 struct vop_print_args /* {
1771 struct vnode *a_vp;
1772 } */ *ap;
1773 {
1774 struct vnode *vp = ap->a_vp;
1775
1776 printf("\tvp=%p, uppervp=%p, lowervp=%p\n",
1777 vp, UPPERVP(vp), LOWERVP(vp));
1778 if (UPPERVP(vp) != NULLVP)
1779 vprint("union: upper", UPPERVP(vp));
1780 if (LOWERVP(vp) != NULLVP)
1781 vprint("union: lower", LOWERVP(vp));
1782
1783 return (0);
1784 }
1785
1786 static int
1787 union_pathconf(ap)
1788 struct vop_pathconf_args /* {
1789 struct vnode *a_vp;
1790 int a_name;
1791 int *a_retval;
1792 } */ *ap;
1793 {
1794 int error;
1795 struct thread *td = curthread; /* XXX */
1796 struct union_node *un = VTOUNION(ap->a_vp);
1797 struct vnode *vp;
1798
1799 vp = union_lock_other(un, td);
1800 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1801
1802 ap->a_vp = vp;
1803 error = VCALL(vp, VOFFSET(vop_pathconf), ap);
1804 union_unlock_other(vp, td);
1805
1806 return (error);
1807 }
1808
1809 static int
1810 union_advlock(ap)
1811 struct vop_advlock_args /* {
1812 struct vnode *a_vp;
1813 caddr_t a_id;
1814 int a_op;
1815 struct flock *a_fl;
1816 int a_flags;
1817 } */ *ap;
1818 {
1819 register struct vnode *ovp = OTHERVP(ap->a_vp);
1820
1821 ap->a_vp = ovp;
1822 return (VCALL(ovp, VOFFSET(vop_advlock), ap));
1823 }
1824
1825
1826 /*
1827 * XXX - vop_strategy must be hand coded because it has no
1828 * YYY - and it is not coherent with anything
1829 *
1830 * vnode in its arguments.
1831 * This goes away with a merged VM/buffer cache.
1832 */
1833 static int
1834 union_strategy(ap)
1835 struct vop_strategy_args /* {
1836 struct vnode *a_vp;
1837 struct buf *a_bp;
1838 } */ *ap;
1839 {
1840 struct buf *bp = ap->a_bp;
1841 struct vnode *othervp = OTHERVP(bp->b_vp);
1842
1843 KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)",
1844 __func__, ap->a_vp, ap->a_bp->b_vp));
1845 #ifdef DIAGNOSTIC
1846 if (othervp == NULLVP)
1847 panic("union_strategy: nil vp");
1848 if ((bp->b_iocmd == BIO_WRITE) &&
1849 (othervp == LOWERVP(bp->b_vp)))
1850 panic("union_strategy: writing to lowervp");
1851 #endif
1852 return (VOP_STRATEGY(othervp, bp));
1853 }
1854
1855 static int
1856 union_getacl(ap)
1857 struct vop_getacl_args /* {
1858 struct vnode *a_vp;
1859 acl_type_t a_type;
1860 struct acl *a_aclp;
1861 struct ucred *a_cred;
1862 struct thread *a_td;
1863 } */ *ap;
1864 {
1865 int error;
1866 struct union_node *un = VTOUNION(ap->a_vp);
1867 struct vnode *vp;
1868
1869 vp = union_lock_other(un, ap->a_td);
1870 ap->a_vp = vp;
1871 error = VCALL(vp, VOFFSET(vop_getacl), ap);
1872 union_unlock_other(vp, ap->a_td);
1873
1874 return (error);
1875 }
1876
1877 static int
1878 union_setacl(ap)
1879 struct vop_setacl_args /* {
1880 struct vnode *a_vp;
1881 acl_type_t a_type;
1882 struct acl *a_aclp;
1883 struct ucred *a_cred;
1884 struct thread *a_td;
1885 } */ *ap;
1886 {
1887 int error;
1888 struct union_node *un = VTOUNION(ap->a_vp);
1889 struct vnode *vp;
1890
1891 vp = union_lock_other(un, ap->a_td);
1892 ap->a_vp = vp;
1893 error = VCALL(vp, VOFFSET(vop_setacl), ap);
1894 union_unlock_other(vp, ap->a_td);
1895
1896 return (error);
1897 }
1898
1899 static int
1900 union_aclcheck(ap)
1901 struct vop_aclcheck_args /* {
1902 struct vnode *a_vp;
1903 acl_type_t a_type;
1904 struct acl *a_aclp;
1905 struct ucred *a_cred;
1906 struct thread *a_td;
1907 } */ *ap;
1908 {
1909 struct vnode *ovp = OTHERVP(ap->a_vp);
1910
1911 ap->a_vp = ovp;
1912 return (VCALL(ovp, VOFFSET(vop_aclcheck), ap));
1913 }
1914
1915 static int
1916 union_closeextattr(ap)
1917 struct vop_closeextattr_args /* {
1918 struct vnode *a_vp;
1919 int a_commit;
1920 struct ucred *a_cred;
1921 struct thread *a_td;
1922 } */ *ap;
1923 {
1924 int error;
1925 struct union_node *un = VTOUNION(ap->a_vp);
1926 struct vnode *vp;
1927
1928 vp = union_lock_other(un, ap->a_td);
1929 ap->a_vp = vp;
1930 error = VCALL(vp, VOFFSET(vop_closeextattr), ap);
1931 union_unlock_other(vp, ap->a_td);
1932
1933 return (error);
1934 }
1935
1936 static int
1937 union_getextattr(ap)
1938 struct vop_getextattr_args /* {
1939 struct vnode *a_vp;
1940 int a_attrnamespace;
1941 const char *a_name;
1942 struct uio *a_uio;
1943 size_t *a_size;
1944 struct ucred *a_cred;
1945 struct thread *a_td;
1946 } */ *ap;
1947 {
1948 int error;
1949 struct union_node *un = VTOUNION(ap->a_vp);
1950 struct vnode *vp;
1951
1952 vp = union_lock_other(un, ap->a_td);
1953 ap->a_vp = vp;
1954 error = VCALL(vp, VOFFSET(vop_getextattr), ap);
1955 union_unlock_other(vp, ap->a_td);
1956
1957 return (error);
1958 }
1959
1960 static int
1961 union_listextattr(ap)
1962 struct vop_listextattr_args /* {
1963 struct vnode *a_vp;
1964 int a_attrnamespace;
1965 struct uio *a_uio;
1966 size_t *a_size;
1967 struct ucred *a_cred;
1968 struct thread *a_td;
1969 } */ *ap;
1970 {
1971 int error;
1972 struct union_node *un = VTOUNION(ap->a_vp);
1973 struct vnode *vp;
1974
1975 vp = union_lock_other(un, ap->a_td);
1976 ap->a_vp = vp;
1977 error = VCALL(vp, VOFFSET(vop_listextattr), ap);
1978 union_unlock_other(vp, ap->a_td);
1979
1980 return (error);
1981 }
1982
1983 static int
1984 union_openextattr(ap)
1985 struct vop_openextattr_args /* {
1986 struct vnode *a_vp;
1987 struct ucred *a_cred;
1988 struct thread *a_td;
1989 } */ *ap;
1990 {
1991 int error;
1992 struct union_node *un = VTOUNION(ap->a_vp);
1993 struct vnode *vp;
1994
1995 vp = union_lock_other(un, ap->a_td);
1996 ap->a_vp = vp;
1997 error = VCALL(vp, VOFFSET(vop_openextattr), ap);
1998 union_unlock_other(vp, ap->a_td);
1999
2000 return (error);
2001 }
2002
2003 static int
2004 union_deleteextattr(ap)
2005 struct vop_deleteextattr_args /* {
2006 struct vnode *a_vp;
2007 int a_attrnamespace;
2008 const char *a_name;
2009 struct ucred *a_cred;
2010 struct thread *a_td;
2011 } */ *ap;
2012 {
2013 int error;
2014 struct union_node *un = VTOUNION(ap->a_vp);
2015 struct vnode *vp;
2016
2017 vp = union_lock_other(un, ap->a_td);
2018 ap->a_vp = vp;
2019 error = VCALL(vp, VOFFSET(vop_deleteextattr), ap);
2020 union_unlock_other(vp, ap->a_td);
2021
2022 return (error);
2023 }
2024
2025 static int
2026 union_setextattr(ap)
2027 struct vop_setextattr_args /* {
2028 struct vnode *a_vp;
2029 int a_attrnamespace;
2030 const char *a_name;
2031 struct uio *a_uio;
2032 struct ucred *a_cred;
2033 struct thread *a_td;
2034 } */ *ap;
2035 {
2036 int error;
2037 struct union_node *un = VTOUNION(ap->a_vp);
2038 struct vnode *vp;
2039
2040 vp = union_lock_other(un, ap->a_td);
2041 ap->a_vp = vp;
2042 error = VCALL(vp, VOFFSET(vop_setextattr), ap);
2043 union_unlock_other(vp, ap->a_td);
2044
2045 return (error);
2046 }
2047
2048 static int
2049 union_setlabel(ap)
2050 struct vop_setlabel_args /* {
2051 struct vnode *a_vp;
2052 struct label *a_label;
2053 struct ucred *a_cred;
2054 struct thread *a_td;
2055 } */ *ap;
2056 {
2057 int error;
2058 struct union_node *un = VTOUNION(ap->a_vp);
2059 struct vnode *vp;
2060
2061 vp = union_lock_other(un, ap->a_td);
2062 ap->a_vp = vp;
2063 error = VCALL(vp, VOFFSET(vop_setlabel), ap);
2064 union_unlock_other(vp, ap->a_td);
2065
2066 return (error);
2067 }
2068
2069 /*
2070 * Global vfs data structures
2071 */
2072 vop_t **union_vnodeop_p;
2073 static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
2074 { &vop_default_desc, (vop_t *) vop_defaultop },
2075 { &vop_access_desc, (vop_t *) union_access },
2076 { &vop_aclcheck_desc, (vop_t *) union_aclcheck },
2077 { &vop_advlock_desc, (vop_t *) union_advlock },
2078 { &vop_bmap_desc, (vop_t *) vop_eopnotsupp },
2079 { &vop_close_desc, (vop_t *) union_close },
2080 { &vop_closeextattr_desc, (vop_t *) union_closeextattr },
2081 { &vop_create_desc, (vop_t *) union_create },
2082 { &vop_createvobject_desc, (vop_t *) union_createvobject },
2083 { &vop_deleteextattr_desc, (vop_t *) union_deleteextattr },
2084 { &vop_destroyvobject_desc, (vop_t *) union_destroyvobject },
2085 { &vop_fsync_desc, (vop_t *) union_fsync },
2086 { &vop_getattr_desc, (vop_t *) union_getattr },
2087 { &vop_getacl_desc, (vop_t *) union_getacl },
2088 { &vop_getextattr_desc, (vop_t *) union_getextattr },
2089 { &vop_getvobject_desc, (vop_t *) union_getvobject },
2090 { &vop_inactive_desc, (vop_t *) union_inactive },
2091 { &vop_ioctl_desc, (vop_t *) union_ioctl },
2092 { &vop_lease_desc, (vop_t *) union_lease },
2093 { &vop_link_desc, (vop_t *) union_link },
2094 { &vop_listextattr_desc, (vop_t *) union_listextattr },
2095 { &vop_lookup_desc, (vop_t *) union_lookup },
2096 { &vop_mkdir_desc, (vop_t *) union_mkdir },
2097 { &vop_mknod_desc, (vop_t *) union_mknod },
2098 { &vop_open_desc, (vop_t *) union_open },
2099 { &vop_openextattr_desc, (vop_t *) union_openextattr },
2100 { &vop_pathconf_desc, (vop_t *) union_pathconf },
2101 { &vop_poll_desc, (vop_t *) union_poll },
2102 { &vop_print_desc, (vop_t *) union_print },
2103 { &vop_read_desc, (vop_t *) union_read },
2104 { &vop_readdir_desc, (vop_t *) union_readdir },
2105 { &vop_readlink_desc, (vop_t *) union_readlink },
2106 { &vop_getwritemount_desc, (vop_t *) union_getwritemount },
2107 { &vop_reclaim_desc, (vop_t *) union_reclaim },
2108 { &vop_remove_desc, (vop_t *) union_remove },
2109 { &vop_rename_desc, (vop_t *) union_rename },
2110 { &vop_revoke_desc, (vop_t *) union_revoke },
2111 { &vop_rmdir_desc, (vop_t *) union_rmdir },
2112 { &vop_setacl_desc, (vop_t *) union_setacl },
2113 { &vop_setattr_desc, (vop_t *) union_setattr },
2114 { &vop_setextattr_desc, (vop_t *) union_setextattr },
2115 { &vop_setlabel_desc, (vop_t *) union_setlabel },
2116 { &vop_strategy_desc, (vop_t *) union_strategy },
2117 { &vop_symlink_desc, (vop_t *) union_symlink },
2118 { &vop_whiteout_desc, (vop_t *) union_whiteout },
2119 { &vop_write_desc, (vop_t *) union_write },
2120 { NULL, NULL }
2121 };
2122 static struct vnodeopv_desc union_vnodeop_opv_desc =
2123 { &union_vnodeop_p, union_vnodeop_entries };
2124
2125 VNODEOP_SET(union_vnodeop_opv_desc);
Cache object: 2198f8def0d6cbc8db5fd1a25d64f991
|