The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/pseudofs/pseudofs_vncache.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 2001 Dag-Erling Coïdan Smørgrav
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer
   12  *    in this position and unchanged.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. The name of the author may not be used to endorse or promote products
   17  *    derived from this software without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/12.0/sys/fs/pseudofs/pseudofs_vncache.c 326268 2017-11-27 15:15:37Z pfg $");
   33 
   34 #include "opt_pseudofs.h"
   35 
   36 #include <sys/param.h>
   37 #include <sys/kernel.h>
   38 #include <sys/systm.h>
   39 #include <sys/eventhandler.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/sysctl.h>
   45 #include <sys/vnode.h>
   46 
   47 #include <fs/pseudofs/pseudofs.h>
   48 #include <fs/pseudofs/pseudofs_internal.h>
   49 
   50 static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
   51 
   52 static struct mtx pfs_vncache_mutex;
   53 static struct pfs_vdata *pfs_vncache;
   54 static eventhandler_tag pfs_exit_tag;
   55 static void pfs_exit(void *arg, struct proc *p);
   56 static void pfs_purge_locked(struct pfs_node *pn, bool force);
   57 
   58 static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
   59     "pseudofs vnode cache");
   60 
   61 static int pfs_vncache_entries;
   62 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
   63     &pfs_vncache_entries, 0,
   64     "number of entries in the vnode cache");
   65 
   66 static int pfs_vncache_maxentries;
   67 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
   68     &pfs_vncache_maxentries, 0,
   69     "highest number of entries in the vnode cache");
   70 
   71 static int pfs_vncache_hits;
   72 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
   73     &pfs_vncache_hits, 0,
   74     "number of cache hits since initialization");
   75 
   76 static int pfs_vncache_misses;
   77 SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
   78     &pfs_vncache_misses, 0,
   79     "number of cache misses since initialization");
   80 
   81 extern struct vop_vector pfs_vnodeops;  /* XXX -> .h file */
   82 
   83 /*
   84  * Initialize vnode cache
   85  */
   86 void
   87 pfs_vncache_load(void)
   88 {
   89 
   90         mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF);
   91         pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
   92             EVENTHANDLER_PRI_ANY);
   93 }
   94 
   95 /*
   96  * Tear down vnode cache
   97  */
   98 void
   99 pfs_vncache_unload(void)
  100 {
  101 
  102         EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
  103         mtx_lock(&pfs_vncache_mutex);
  104         pfs_purge_locked(NULL, true);
  105         mtx_unlock(&pfs_vncache_mutex);
  106         KASSERT(pfs_vncache_entries == 0,
  107             ("%d vncache entries remaining", pfs_vncache_entries));
  108         mtx_destroy(&pfs_vncache_mutex);
  109 }
  110 
  111 /*
  112  * Allocate a vnode
  113  */
  114 int
  115 pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
  116                   struct pfs_node *pn, pid_t pid)
  117 {
  118         struct pfs_vdata *pvd, *pvd2;
  119         struct vnode *vp;
  120         int error;
  121 
  122         /*
  123          * See if the vnode is in the cache.
  124          * XXX linear search is not very efficient.
  125          */
  126 retry:
  127         mtx_lock(&pfs_vncache_mutex);
  128         for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
  129                 if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
  130                     pvd->pvd_vnode->v_mount == mp) {
  131                         vp = pvd->pvd_vnode;
  132                         VI_LOCK(vp);
  133                         mtx_unlock(&pfs_vncache_mutex);
  134                         if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
  135                                 ++pfs_vncache_hits;
  136                                 *vpp = vp;
  137                                 /*
  138                                  * Some callers cache_enter(vp) later, so
  139                                  * we have to make sure it's not in the
  140                                  * VFS cache so it doesn't get entered
  141                                  * twice.  A better solution would be to
  142                                  * make pfs_vncache_alloc() responsible
  143                                  * for entering the vnode in the VFS
  144                                  * cache.
  145                                  */
  146                                 cache_purge(vp);
  147                                 return (0);
  148                         }
  149                         goto retry;
  150                 }
  151         }
  152         mtx_unlock(&pfs_vncache_mutex);
  153 
  154         /* nope, get a new one */
  155         pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
  156         pvd->pvd_next = pvd->pvd_prev = NULL;
  157         error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
  158         if (error) {
  159                 free(pvd, M_PFSVNCACHE);
  160                 return (error);
  161         }
  162         pvd->pvd_pn = pn;
  163         pvd->pvd_pid = pid;
  164         (*vpp)->v_data = pvd;
  165         switch (pn->pn_type) {
  166         case pfstype_root:
  167                 (*vpp)->v_vflag = VV_ROOT;
  168 #if 0
  169                 printf("root vnode allocated\n");
  170 #endif
  171                 /* fall through */
  172         case pfstype_dir:
  173         case pfstype_this:
  174         case pfstype_parent:
  175         case pfstype_procdir:
  176                 (*vpp)->v_type = VDIR;
  177                 break;
  178         case pfstype_file:
  179                 (*vpp)->v_type = VREG;
  180                 break;
  181         case pfstype_symlink:
  182                 (*vpp)->v_type = VLNK;
  183                 break;
  184         case pfstype_none:
  185                 KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
  186         default:
  187                 panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
  188         }
  189         /*
  190          * Propagate flag through to vnode so users know it can change
  191          * if the process changes (i.e. execve)
  192          */
  193         if ((pn->pn_flags & PFS_PROCDEP) != 0)
  194                 (*vpp)->v_vflag |= VV_PROCDEP;
  195         pvd->pvd_vnode = *vpp;
  196         vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
  197         VN_LOCK_AREC(*vpp);
  198         error = insmntque(*vpp, mp);
  199         if (error != 0) {
  200                 free(pvd, M_PFSVNCACHE);
  201                 *vpp = NULLVP;
  202                 return (error);
  203         }
  204 retry2:
  205         mtx_lock(&pfs_vncache_mutex);
  206         /*
  207          * Other thread may race with us, creating the entry we are
  208          * going to insert into the cache. Recheck after
  209          * pfs_vncache_mutex is reacquired.
  210          */
  211         for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) {
  212                 if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
  213                     pvd2->pvd_vnode->v_mount == mp) {
  214                         vp = pvd2->pvd_vnode;
  215                         VI_LOCK(vp);
  216                         mtx_unlock(&pfs_vncache_mutex);
  217                         if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
  218                                 ++pfs_vncache_hits;
  219                                 vgone(*vpp);
  220                                 vput(*vpp);
  221                                 *vpp = vp;
  222                                 cache_purge(vp);
  223                                 return (0);
  224                         }
  225                         goto retry2;
  226                 }
  227         }
  228         ++pfs_vncache_misses;
  229         if (++pfs_vncache_entries > pfs_vncache_maxentries)
  230                 pfs_vncache_maxentries = pfs_vncache_entries;
  231         pvd->pvd_prev = NULL;
  232         pvd->pvd_next = pfs_vncache;
  233         if (pvd->pvd_next)
  234                 pvd->pvd_next->pvd_prev = pvd;
  235         pfs_vncache = pvd;
  236         mtx_unlock(&pfs_vncache_mutex);
  237         return (0);
  238 }
  239 
  240 /*
  241  * Free a vnode
  242  */
  243 int
  244 pfs_vncache_free(struct vnode *vp)
  245 {
  246         struct pfs_vdata *pvd;
  247 
  248         mtx_lock(&pfs_vncache_mutex);
  249         pvd = (struct pfs_vdata *)vp->v_data;
  250         KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
  251         if (pvd->pvd_next)
  252                 pvd->pvd_next->pvd_prev = pvd->pvd_prev;
  253         if (pvd->pvd_prev) {
  254                 pvd->pvd_prev->pvd_next = pvd->pvd_next;
  255                 --pfs_vncache_entries;
  256         } else if (pfs_vncache == pvd) {
  257                 pfs_vncache = pvd->pvd_next;
  258                 --pfs_vncache_entries;
  259         }
  260         mtx_unlock(&pfs_vncache_mutex);
  261 
  262         free(pvd, M_PFSVNCACHE);
  263         vp->v_data = NULL;
  264         return (0);
  265 }
  266 
  267 /*
  268  * Purge the cache of dead entries
  269  *
  270  * This is extremely inefficient due to the fact that vgone() not only
  271  * indirectly modifies the vnode cache, but may also sleep.  We can
  272  * neither hold pfs_vncache_mutex across a vgone() call, nor make any
  273  * assumptions about the state of the cache after vgone() returns.  In
  274  * consequence, we must start over after every vgone() call, and keep
  275  * trying until we manage to traverse the entire cache.
  276  *
  277  * The only way to improve this situation is to change the data structure
  278  * used to implement the cache.
  279  */
  280 static void
  281 pfs_purge_locked(struct pfs_node *pn, bool force)
  282 {
  283         struct pfs_vdata *pvd;
  284         struct vnode *vnp;
  285 
  286         mtx_assert(&pfs_vncache_mutex, MA_OWNED);
  287         pvd = pfs_vncache;
  288         while (pvd != NULL) {
  289                 if (force || pvd->pvd_dead ||
  290                     (pn != NULL && pvd->pvd_pn == pn)) {
  291                         vnp = pvd->pvd_vnode;
  292                         vhold(vnp);
  293                         mtx_unlock(&pfs_vncache_mutex);
  294                         VOP_LOCK(vnp, LK_EXCLUSIVE);
  295                         vgone(vnp);
  296                         VOP_UNLOCK(vnp, 0);
  297                         mtx_lock(&pfs_vncache_mutex);
  298                         vdrop(vnp);
  299                         pvd = pfs_vncache;
  300                 } else {
  301                         pvd = pvd->pvd_next;
  302                 }
  303         }
  304 }
  305 
  306 void
  307 pfs_purge(struct pfs_node *pn)
  308 {
  309 
  310         mtx_lock(&pfs_vncache_mutex);
  311         pfs_purge_locked(pn, false);
  312         mtx_unlock(&pfs_vncache_mutex);
  313 }
  314 
  315 /*
  316  * Free all vnodes associated with a defunct process
  317  */
  318 static void
  319 pfs_exit(void *arg, struct proc *p)
  320 {
  321         struct pfs_vdata *pvd;
  322         int dead;
  323 
  324         if (pfs_vncache == NULL)
  325                 return;
  326         mtx_lock(&pfs_vncache_mutex);
  327         for (pvd = pfs_vncache, dead = 0; pvd != NULL; pvd = pvd->pvd_next)
  328                 if (pvd->pvd_pid == p->p_pid)
  329                         dead = pvd->pvd_dead = 1;
  330         if (dead)
  331                 pfs_purge_locked(NULL, false);
  332         mtx_unlock(&pfs_vncache_mutex);
  333 }

Cache object: 2ddf84eb2f303343fb879975365e365d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.