The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/nullfs/null_subr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1992, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software donated to Berkeley by
    6  * Jan-Simon Pendry.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)null_subr.c 8.7 (Berkeley) 5/14/95
   33  *
   34  * $FreeBSD: releng/6.0/sys/fs/nullfs/null_subr.c 144904 2005-04-11 11:17:20Z jeff $
   35  */
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/mutex.h>
   42 #include <sys/malloc.h>
   43 #include <sys/mount.h>
   44 #include <sys/proc.h>
   45 #include <sys/vnode.h>
   46 
   47 #include <fs/nullfs/null.h>
   48 
   49 #define LOG2_SIZEVNODE 8                /* log2(sizeof struct vnode) */
   50 #define NNULLNODECACHE 16
   51 
   52 /*
   53  * Null layer cache:
   54  * Each cache entry holds a reference to the lower vnode
   55  * along with a pointer to the alias vnode.  When an
   56  * entry is added the lower vnode is VREF'd.  When the
   57  * alias is removed the lower vnode is vrele'd.
   58  */
   59 
   60 #define NULL_NHASH(vp) \
   61         (&null_node_hashtbl[(((uintptr_t)vp)>>LOG2_SIZEVNODE) & null_node_hash])
   62 
   63 static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
   64 static u_long null_node_hash;
   65 struct mtx null_hashmtx;
   66 
   67 static MALLOC_DEFINE(M_NULLFSHASH, "NULLFS hash", "NULLFS hash table");
   68 MALLOC_DEFINE(M_NULLFSNODE, "NULLFS node", "NULLFS vnode private part");
   69 
   70 static struct vnode * null_hashget(struct mount *, struct vnode *);
   71 static struct vnode * null_hashins(struct mount *, struct null_node *);
   72 
   73 /*
   74  * Initialise cache headers
   75  */
   76 int
   77 nullfs_init(vfsp)
   78         struct vfsconf *vfsp;
   79 {
   80 
   81         NULLFSDEBUG("nullfs_init\n");           /* printed during system boot */
   82         null_node_hashtbl = hashinit(NNULLNODECACHE, M_NULLFSHASH, &null_node_hash);
   83         mtx_init(&null_hashmtx, "nullhs", NULL, MTX_DEF);
   84         return (0);
   85 }
   86 
   87 int
   88 nullfs_uninit(vfsp)
   89         struct vfsconf *vfsp;
   90 {
   91 
   92         mtx_destroy(&null_hashmtx);
   93         free(null_node_hashtbl, M_NULLFSHASH);
   94         return (0);
   95 }
   96 
   97 /*
   98  * Return a VREF'ed alias for lower vnode if already exists, else 0.
   99  * Lower vnode should be locked on entry and will be left locked on exit.
  100  */
  101 static struct vnode *
  102 null_hashget(mp, lowervp)
  103         struct mount *mp;
  104         struct vnode *lowervp;
  105 {
  106         struct thread *td = curthread;  /* XXX */
  107         struct null_node_hashhead *hd;
  108         struct null_node *a;
  109         struct vnode *vp;
  110 
  111         /*
  112          * Find hash base, and then search the (two-way) linked
  113          * list looking for a null_node structure which is referencing
  114          * the lower vnode.  If found, the increment the null_node
  115          * reference count (but NOT the lower vnode's VREF counter).
  116          */
  117         hd = NULL_NHASH(lowervp);
  118 loop:
  119         mtx_lock(&null_hashmtx);
  120         LIST_FOREACH(a, hd, null_hash) {
  121                 if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
  122                         vp = NULLTOV(a);
  123                         VI_LOCK(vp);
  124                         /*
  125                          * If the nullfs node is being recycled we have
  126                          * to wait until it finishes prior to scanning
  127                          * again.
  128                          */
  129                         mtx_unlock(&null_hashmtx);
  130                         if ((vp->v_iflag & VI_DOOMED) != 0) {
  131                                 /* Wait for recycling to finish. */
  132                                 VOP_LOCK(vp, LK_EXCLUSIVE|LK_INTERLOCK, td);
  133                                 VOP_UNLOCK(vp, 0, td);
  134                                 goto loop;
  135                         }
  136                         /*
  137                          * We need to clear the OWEINACT flag here as this
  138                          * may lead vget() to try to lock our vnode which
  139                          * is already locked via lowervp.
  140                          */
  141                         vp->v_iflag &= ~VI_OWEINACT;
  142                         vget(vp, LK_INTERLOCK, td);
  143                         return (vp);
  144                 }
  145         }
  146         mtx_unlock(&null_hashmtx);
  147         return (NULLVP);
  148 }
  149 
  150 /*
  151  * Act like null_hashget, but add passed null_node to hash if no existing
  152  * node found.
  153  */
  154 static struct vnode *
  155 null_hashins(mp, xp)
  156         struct mount *mp;
  157         struct null_node *xp;
  158 {
  159         struct thread *td = curthread;  /* XXX */
  160         struct null_node_hashhead *hd;
  161         struct null_node *oxp;
  162         struct vnode *ovp;
  163 
  164         hd = NULL_NHASH(xp->null_lowervp);
  165 loop:
  166         mtx_lock(&null_hashmtx);
  167         LIST_FOREACH(oxp, hd, null_hash) {
  168                 if (oxp->null_lowervp == xp->null_lowervp &&
  169                     NULLTOV(oxp)->v_mount == mp) {
  170                         ovp = NULLTOV(oxp);
  171                         VI_LOCK(ovp);
  172                         /*
  173                          * If the nullfs node is being recycled we have
  174                          * to wait until it finishes prior to scanning
  175                          * again.
  176                          */
  177                         mtx_unlock(&null_hashmtx);
  178                         if ((ovp->v_iflag & VI_DOOMED) != 0) {
  179                                 VOP_LOCK(ovp, LK_EXCLUSIVE|LK_INTERLOCK, td);
  180                                 VOP_UNLOCK(ovp, 0, td);
  181                                 goto loop;
  182                         }
  183                         ovp->v_iflag &= ~VI_OWEINACT; /* see hashget comment */
  184                         vget(ovp, LK_INTERLOCK, td);
  185                         return (ovp);
  186                 }
  187         }
  188         LIST_INSERT_HEAD(hd, xp, null_hash);
  189         mtx_unlock(&null_hashmtx);
  190         return (NULLVP);
  191 }
  192 
  193 /*
  194  * Make a new or get existing nullfs node.
  195  * Vp is the alias vnode, lowervp is the lower vnode.
  196  * 
  197  * The lowervp assumed to be locked and having "spare" reference. This routine
  198  * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers"
  199  * the caller's "spare" reference to created nullfs vnode.
  200  */
  201 int
  202 null_nodeget(mp, lowervp, vpp)
  203         struct mount *mp;
  204         struct vnode *lowervp;
  205         struct vnode **vpp;
  206 {
  207         struct null_node *xp;
  208         struct vnode *vp;
  209         int error;
  210 
  211         /* Lookup the hash firstly */
  212         *vpp = null_hashget(mp, lowervp);
  213         if (*vpp != NULL) {
  214                 vrele(lowervp);
  215                 return (0);
  216         }
  217 
  218         /*
  219          * We do not serialize vnode creation, instead we will check for
  220          * duplicates later, when adding new vnode to hash.
  221          *
  222          * Note that duplicate can only appear in hash if the lowervp is
  223          * locked LK_SHARED.
  224          */
  225 
  226         /*
  227          * Do the MALLOC before the getnewvnode since doing so afterward
  228          * might cause a bogus v_data pointer to get dereferenced
  229          * elsewhere if MALLOC should block.
  230          */
  231         MALLOC(xp, struct null_node *, sizeof(struct null_node),
  232             M_NULLFSNODE, M_WAITOK);
  233 
  234         error = getnewvnode("null", mp, &null_vnodeops, &vp);
  235         if (error) {
  236                 FREE(xp, M_NULLFSNODE);
  237                 return (error);
  238         }
  239 
  240         xp->null_vnode = vp;
  241         xp->null_lowervp = lowervp;
  242         vp->v_type = lowervp->v_type;
  243         vp->v_data = xp;
  244         vp->v_vnlock = lowervp->v_vnlock;
  245         if (vp->v_vnlock == NULL)
  246                 panic("null_nodeget: Passed a NULL vnlock.\n");
  247         /*
  248          * Atomically insert our new node into the hash or vget existing 
  249          * if someone else has beaten us to it.
  250          */
  251         *vpp = null_hashins(mp, xp);
  252         if (*vpp != NULL) {
  253                 vrele(lowervp);
  254                 vp->v_vnlock = &vp->v_lock;
  255                 xp->null_lowervp = NULL;
  256                 vrele(vp);
  257                 return (0);
  258         }
  259         *vpp = vp;
  260 
  261         return (0);
  262 }
  263 
  264 /*
  265  * Remove node from hash.
  266  */
  267 void
  268 null_hashrem(xp)
  269         struct null_node *xp;
  270 {
  271 
  272         mtx_lock(&null_hashmtx);
  273         LIST_REMOVE(xp, null_hash);
  274         mtx_unlock(&null_hashmtx);
  275 }
  276 
  277 #ifdef DIAGNOSTIC
  278 
  279 #ifdef KDB
  280 #define null_checkvp_barrier    1
  281 #else
  282 #define null_checkvp_barrier    0
  283 #endif
  284 
  285 struct vnode *
  286 null_checkvp(vp, fil, lno)
  287         struct vnode *vp;
  288         char *fil;
  289         int lno;
  290 {
  291         struct null_node *a = VTONULL(vp);
  292 #ifdef notyet
  293         /*
  294          * Can't do this check because vop_reclaim runs
  295          * with a funny vop vector.
  296          */
  297         if (vp->v_op != null_vnodeop_p) {
  298                 printf ("null_checkvp: on non-null-node\n");
  299                 while (null_checkvp_barrier) /*WAIT*/ ;
  300                 panic("null_checkvp");
  301         };
  302 #endif
  303         if (a->null_lowervp == NULLVP) {
  304                 /* Should never happen */
  305                 int i; u_long *p;
  306                 printf("vp = %p, ZERO ptr\n", (void *)vp);
  307                 for (p = (u_long *) a, i = 0; i < 8; i++)
  308                         printf(" %lx", p[i]);
  309                 printf("\n");
  310                 /* wait for debugger */
  311                 while (null_checkvp_barrier) /*WAIT*/ ;
  312                 panic("null_checkvp");
  313         }
  314         if (vrefcnt(a->null_lowervp) < 1) {
  315                 int i; u_long *p;
  316                 printf("vp = %p, unref'ed lowervp\n", (void *)vp);
  317                 for (p = (u_long *) a, i = 0; i < 8; i++)
  318                         printf(" %lx", p[i]);
  319                 printf("\n");
  320                 /* wait for debugger */
  321                 while (null_checkvp_barrier) /*WAIT*/ ;
  322                 panic ("null with unref'ed lowervp");
  323         };
  324 #ifdef notyet
  325         printf("null %x/%d -> %x/%d [%s, %d]\n",
  326                 NULLTOV(a), vrefcnt(NULLTOV(a)),
  327                 a->null_lowervp, vrefcnt(a->null_lowervp),
  328                 fil, lno);
  329 #endif
  330         return a->null_lowervp;
  331 }
  332 #endif

Cache object: 152208ea851920a8f8bed60d20ab65ba


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.